X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=udevd.c;h=5756b46122d0f03037c4b9521679fd53e2b14bab;hp=24cf9c9a7cf8c5a491c63483fff09f4a91455b70;hb=84df02dd63bf53acb5a61e9db1da067760b927e9;hpb=872344c41094f636fd667b9e619f8f219d814605 diff --git a/udevd.c b/udevd.c index 24cf9c9a7..5756b4612 100644 --- a/udevd.c +++ b/udevd.c @@ -2,6 +2,7 @@ * udevd.c - hotplug event serializer * * Copyright (C) 2004 Kay Sievers + * Copyright (C) 2004 Chris Friesen * * * This program is free software; you can redistribute it and/or modify it @@ -19,9 +20,7 @@ * */ -#include #include -#include #include #include #include @@ -29,47 +28,62 @@ #include #include #include -#include -#include +#include #include #include #include +#include +#include "klibc_fixups.h" +#include #include "list.h" #include "udev.h" +#include "udev_lib.h" #include "udev_version.h" #include "udevd.h" #include "logging.h" - -unsigned char logname[42]; -static pthread_mutex_t msg_lock; -static pthread_mutex_t msg_active_lock; -static pthread_cond_t msg_active; -static pthread_mutex_t exec_lock; -static pthread_mutex_t exec_active_lock; -static pthread_cond_t exec_active; -static pthread_mutex_t running_lock; -static pthread_attr_t thr_attr; +static int pipefds[2]; static int expected_seqnum = 0; +volatile static int children_waiting; +volatile static int run_msg_q; +volatile static int sig_flag; +static int run_exec_q; + +static LIST_HEAD(msg_list); +static LIST_HEAD(exec_list); +static LIST_HEAD(running_list); + +static void exec_queue_manager(void); +static void msg_queue_manager(void); +static void user_sighandler(void); +static void reap_kids(void); +char *udev_bin; + +#ifdef LOG +unsigned char logname[LOGNAME_SIZE]; +void log_message (int level, const char *format, ...) +{ + va_list args; -LIST_HEAD(msg_list); -LIST_HEAD(exec_list); -LIST_HEAD(running_list); + va_start(args, format); + vsyslog(level, format, args); + va_end(args); +} +#endif +#define msg_dump(msg) \ + dbg("msg_dump: sequence %d, '%s', '%s', '%s'", \ + msg->seqnum, msg->action, msg->devpath, msg->subsystem); static void msg_dump_queue(void) { +#ifdef DEBUG struct hotplug_msg *msg; list_for_each_entry(msg, &msg_list, list) dbg("sequence %d in queue", msg->seqnum); -} - -static void msg_dump(struct hotplug_msg *msg) -{ - dbg("sequence %d, '%s', '%s', '%s'", - msg->seqnum, msg->action, msg->devpath, msg->subsystem); +#endif } static struct hotplug_msg *msg_create(void) @@ -77,351 +91,423 @@ static struct hotplug_msg *msg_create(void) struct hotplug_msg *new_msg; new_msg = malloc(sizeof(struct hotplug_msg)); - if (new_msg == NULL) { + if (new_msg == NULL) dbg("error malloc"); - return NULL; - } return new_msg; } -static void msg_delete(struct hotplug_msg *msg) +static void run_queue_delete(struct hotplug_msg *msg) { - if (msg != NULL) - free(msg); + list_del(&msg->list); + free(msg); } /* orders the message in the queue by sequence number */ static void msg_queue_insert(struct hotplug_msg *msg) { struct hotplug_msg *loop_msg; + struct sysinfo info; - /* sort message by sequence number into list*/ - list_for_each_entry(loop_msg, &msg_list, list) - if (loop_msg->seqnum > msg->seqnum) + /* sort message by sequence number into list. events + * will tend to come in order, so scan the list backwards + */ + list_for_each_entry_reverse(loop_msg, &msg_list, list) + if (loop_msg->seqnum < msg->seqnum) break; - list_add_tail(&msg->list, &loop_msg->list); - dbg("queued message seq %d", msg->seqnum); /* store timestamp of queuing */ - msg->queue_time = time(NULL); + sysinfo(&info); + msg->queue_time = info.uptime; + + list_add(&msg->list, &loop_msg->list); + dbg("queued message seq %d", msg->seqnum); - /* signal queue activity to manager */ - pthread_mutex_lock(&msg_active_lock); - pthread_cond_signal(&msg_active); - pthread_mutex_unlock(&msg_active_lock); + /* run msg queue manager */ + run_msg_q = 1; return ; } /* forks event and removes event from run queue when finished */ -static void *run_threads(void * parm) +static void udev_run(struct hotplug_msg *msg) { pid_t pid; - struct hotplug_msg *msg; + char action[ACTION_SIZE]; + char devpath[DEVPATH_SIZE]; + char *env[] = { action, devpath, NULL }; - msg = parm; - setenv("ACTION", msg->action, 1); - setenv("DEVPATH", msg->devpath, 1); + strcpy(action, "ACTION="); + strfieldcat(action, msg->action); + strcpy(devpath, "DEVPATH="); + strfieldcat(devpath, msg->devpath); pid = fork(); switch (pid) { case 0: /* child */ - execl(UDEV_BIN, "udev", msg->subsystem, NULL); + execle(udev_bin, "udev", msg->subsystem, NULL, env); dbg("exec of child failed"); exit(1); break; case -1: dbg("fork of child failed"); - goto exit; + run_queue_delete(msg); + /* note: we never managed to run, so we had no impact on + * running_with_devpath(), so don't bother setting run_exec_q + */ + break; default: - /* wait for exit of child */ - dbg("==> exec seq %d [%d] working at '%s'", - msg->seqnum, pid, msg->devpath); - wait(NULL); - dbg("<== exec seq %d came back", msg->seqnum); + /* get SIGCHLD in main loop */ + dbg("==> exec seq %d [%d] working at '%s'", msg->seqnum, pid, msg->devpath); + msg->pid = pid; } - -exit: - /* remove event from run list */ - pthread_mutex_lock(&running_lock); - list_del_init(&msg->list); - pthread_mutex_unlock(&running_lock); - - msg_delete(msg); - - /* signal queue activity to exec manager */ - pthread_mutex_lock(&exec_active_lock); - pthread_cond_signal(&exec_active); - pthread_mutex_unlock(&exec_active_lock); - - pthread_exit(0); } /* returns already running task with devpath */ static struct hotplug_msg *running_with_devpath(struct hotplug_msg *msg) { struct hotplug_msg *loop_msg; - struct hotplug_msg *tmp_msg; - - list_for_each_entry_safe(loop_msg, tmp_msg, &running_list, list) + list_for_each_entry(loop_msg, &running_list, list) if (strncmp(loop_msg->devpath, msg->devpath, sizeof(loop_msg->devpath)) == 0) return loop_msg; return NULL; } -/* queue management executes the events and delays events for the same devpath */ -static void *exec_queue_manager(void * parm) +/* exec queue management routine executes the events and delays events for the same devpath */ +static void exec_queue_manager() { struct hotplug_msg *loop_msg; struct hotplug_msg *tmp_msg; struct hotplug_msg *msg; - pthread_t run_tid; - while (1) { - pthread_mutex_lock(&exec_lock); - list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) { - msg = running_with_devpath(loop_msg); - if (msg == NULL) { - /* move event to run list */ - pthread_mutex_lock(&running_lock); - list_move_tail(&loop_msg->list, &running_list); - pthread_mutex_unlock(&running_lock); - - pthread_create(&run_tid, &thr_attr, run_threads, (void *) loop_msg); - - dbg("moved seq %d to running list", loop_msg->seqnum); - } else { - dbg("delay seq %d, cause seq %d already working on '%s'", - loop_msg->seqnum, msg->seqnum, msg->devpath); - } + list_for_each_entry_safe(loop_msg, tmp_msg, &exec_list, list) { + msg = running_with_devpath(loop_msg); + if (!msg) { + /* move event to run list */ + list_move_tail(&loop_msg->list, &running_list); + udev_run(loop_msg); + dbg("moved seq %d to running list", loop_msg->seqnum); + } else { + dbg("delay seq %d, cause seq %d already working on '%s'", + loop_msg->seqnum, msg->seqnum, msg->devpath); } - pthread_mutex_unlock(&exec_lock); - - /* wait for activation, new events or childs coming back */ - pthread_mutex_lock(&exec_active_lock); - pthread_cond_wait(&exec_active, &exec_active_lock); - pthread_mutex_unlock(&exec_active_lock); } } -static void exec_queue_activate(void) -{ - pthread_mutex_lock(&exec_active_lock); - pthread_cond_signal(&exec_active); - pthread_mutex_unlock(&exec_active_lock); -} - -/* move message from incoming to exec queue */ -static void msg_move_exec(struct list_head *head) +static void msg_move_exec(struct hotplug_msg *msg) { - list_move_tail(head, &exec_list); - exec_queue_activate(); + list_move_tail(&msg->list, &exec_list); + run_exec_q = 1; + expected_seqnum = msg->seqnum+1; + dbg("moved seq %d to exec, next expected is %d", + msg->seqnum, expected_seqnum); } -/* queue management thread handles the timeouts and dispatches the events */ -static void *msg_queue_manager(void * parm) +/* msg queue management routine handles the timeouts and dispatches the events */ +static void msg_queue_manager() { struct hotplug_msg *loop_msg; struct hotplug_msg *tmp_msg; - time_t msg_age = 0; - struct timespec tv; + struct sysinfo info; + long msg_age = 0; - while (1) { - dbg("msg queue manager, next expected is %d", expected_seqnum); - pthread_mutex_lock(&msg_lock); - pthread_mutex_lock(&exec_lock); + dbg("msg queue manager, next expected is %d", expected_seqnum); recheck: - list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) { - /* move event with expected sequence to the exec list */ - if (loop_msg->seqnum == expected_seqnum) { - msg_move_exec(&loop_msg->list); - expected_seqnum++; - dbg("moved seq %d to exec, next expected is %d", - loop_msg->seqnum, expected_seqnum); - continue; - } - - /* move event with expired timeout to the exec list */ - msg_age = time(NULL) - loop_msg->queue_time; - if (msg_age > EVENT_TIMEOUT_SEC-1) { - msg_move_exec(&loop_msg->list); - expected_seqnum = loop_msg->seqnum+1; - dbg("moved seq %d to exec, reset next expected to %d", - loop_msg->seqnum, expected_seqnum); - goto recheck; - } else { - break; - } + list_for_each_entry_safe(loop_msg, tmp_msg, &msg_list, list) { + /* move event with expected sequence to the exec list */ + if (loop_msg->seqnum == expected_seqnum) { + msg_move_exec(loop_msg); + continue; } - msg_dump_queue(); - pthread_mutex_unlock(&exec_lock); - pthread_mutex_unlock(&msg_lock); - - /* wait until queue gets active or next message timeout expires */ - pthread_mutex_lock(&msg_active_lock); - - if (list_empty(&msg_list) == 0) { - tv.tv_sec = time(NULL) + EVENT_TIMEOUT_SEC - msg_age; - tv.tv_nsec = 0; - dbg("next event expires in %li seconds", - EVENT_TIMEOUT_SEC - msg_age); - pthread_cond_timedwait(&msg_active, &msg_active_lock, &tv); + /* move event with expired timeout to the exec list */ + sysinfo(&info); + msg_age = info.uptime - loop_msg->queue_time; + dbg("seq %d is %li seconds old", loop_msg->seqnum, msg_age); + if (msg_age > EVENT_TIMEOUT_SEC-1) { + msg_move_exec(loop_msg); + goto recheck; } else { - pthread_cond_wait(&msg_active, &msg_active_lock); + break; } - pthread_mutex_unlock(&msg_active_lock); + } + + msg_dump_queue(); + + /* set timeout for remaining queued events */ + if (list_empty(&msg_list) == 0) { + struct itimerval itv = {{0, 0}, {EVENT_TIMEOUT_SEC - msg_age, 0}}; + dbg("next event expires in %li seconds", EVENT_TIMEOUT_SEC - msg_age); + setitimer(ITIMER_REAL, &itv, 0); } } -/* every connect creates a thread which gets the msg, queues it and exits */ -static void *client_threads(void * parm) +/* receive the msg, do some basic sanity checks, and queue it */ +static void handle_msg(int sock) { - int sock; struct hotplug_msg *msg; int retval; - - sock = (int) parm; + struct msghdr smsg; + struct cmsghdr *cmsg; + struct iovec iov; + struct ucred *cred; + char cred_msg[CMSG_SPACE(sizeof(struct ucred))]; msg = msg_create(); if (msg == NULL) { dbg("unable to store message"); - goto exit; + return; } - retval = recv(sock, msg, sizeof(struct hotplug_msg), 0); + iov.iov_base = msg; + iov.iov_len = sizeof(struct hotplug_msg); + + memset(&smsg, 0x00, sizeof(struct msghdr)); + smsg.msg_iov = &iov; + smsg.msg_iovlen = 1; + smsg.msg_control = cred_msg; + smsg.msg_controllen = sizeof(cred_msg); + + retval = recvmsg(sock, &smsg, 0); if (retval < 0) { - dbg("unable to receive message"); - goto exit; + if (errno != EINTR) + dbg("unable to receive message"); + return; + } + cmsg = CMSG_FIRSTHDR(&smsg); + cred = (struct ucred *) CMSG_DATA(cmsg); + + if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) { + dbg("no sender credentials received, message ignored"); + goto skip; + } + + if (cred->uid != 0) { + dbg("sender uid=%i, message ignored", cred->uid); + goto skip; } if (strncmp(msg->magic, UDEV_MAGIC, sizeof(UDEV_MAGIC)) != 0 ) { dbg("message magic '%s' doesn't match, ignore it", msg->magic); - msg_delete(msg); - goto exit; + goto skip; } /* if no seqnum is given, we move straight to exec queue */ - if (msg->seqnum == 0) { - pthread_mutex_lock(&exec_lock); + if (msg->seqnum == -1) { list_add(&msg->list, &exec_list); - exec_queue_activate(); - pthread_mutex_unlock(&exec_lock); + run_exec_q = 1; } else { - pthread_mutex_lock(&msg_lock); msg_queue_insert(msg); - pthread_mutex_unlock(&msg_lock); } + return; -exit: - close(sock); - pthread_exit(0); +skip: + free(msg); + return; } static void sig_handler(int signum) { + int rc; switch (signum) { case SIGINT: case SIGTERM: - unlink(UDEVD_LOCK); exit(20 + signum); break; + case SIGALRM: + /* set flag, then write to pipe if needed */ + run_msg_q = 1; + goto do_write; + break; + case SIGCHLD: + /* set flag, then write to pipe if needed */ + children_waiting = 1; + goto do_write; + break; default: dbg("unhandled signal"); + return; + } + +do_write: + /* if pipe is empty, write to pipe to force select to return + * immediately when it gets called + */ + if (!sig_flag) { + rc = write(pipefds[1],&signum,sizeof(signum)); + if (rc < 0) + dbg("unable to write to pipe"); + else + sig_flag = 1; } } -static int one_and_only(void) +static void udev_done(int pid) { - char string[50]; - int lock_file; - - lock_file = open(UDEVD_LOCK, O_RDWR | O_CREAT, 0x640); - if (lock_file < 0) - return -1; - - /* see if we can lock */ - if (lockf(lock_file, F_TLOCK, 0) < 0) { - dbg("file is already locked, exit"); - close(lock_file); - return -1; + /* find msg associated with pid and delete it */ + struct hotplug_msg *msg; + + list_for_each_entry(msg, &running_list, list) { + if (msg->pid == pid) { + dbg("<== exec seq %d came back", msg->seqnum); + run_queue_delete(msg); + + /* we want to run the exec queue manager since there may + * be events waiting with the devpath of the one that + * just finished + */ + run_exec_q = 1; + return; + } } +} - snprintf(string, sizeof(string), "%d\n", getpid()); - write(lock_file, string, strlen(string)); +static void reap_kids() +{ + /* reap all dead children */ + while(1) { + int pid = waitpid(-1, 0, WNOHANG); + if ((pid == -1) || (pid == 0)) + break; + udev_done(pid); + } +} + +/* just read everything from the pipe and clear the flag, + * the useful flags were set in the signal handler + */ +static void user_sighandler() +{ + int sig; + while(1) { + int rc = read(pipefds[0],&sig,sizeof(sig)); + if (rc < 0) + break; - return 0; + sig_flag = 0; + } } + int main(int argc, char *argv[]) { - int ssock; - int csock; + int ssock, maxsockplus; struct sockaddr_un saddr; - struct sockaddr_un caddr; - socklen_t clen; - pthread_t cli_tid; - pthread_t mgr_msg_tid; - pthread_t mgr_exec_tid; + socklen_t addrlen; int retval; + const int on = 1; + struct sigaction act; + fd_set readfds; init_logging("udevd"); + dbg("version %s", UDEV_VERSION); - /* only let one version of the daemon run at any one time */ - if (one_and_only() != 0) - exit(0); + if (getuid() != 0) { + dbg("need to be root, exit"); + exit(1); + } - signal(SIGINT, sig_handler); - signal(SIGTERM, sig_handler); + /* setup signal handler pipe */ + retval = pipe(pipefds); + if (retval < 0) { + dbg("error getting pipes: %s", strerror(errno)); + exit(1); + } + + retval = fcntl(pipefds[0], F_SETFL, O_NONBLOCK); + if (retval < 0) { + dbg("error fcntl on read pipe: %s", strerror(errno)); + exit(1); + } + + retval = fcntl(pipefds[1], F_SETFL, O_NONBLOCK); + if (retval < 0) { + dbg("error fcntl on write pipe: %s", strerror(errno)); + exit(1); + } + + /* set signal handlers */ + act.sa_handler = sig_handler; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_RESTART; + sigaction(SIGINT, &act, NULL); + sigaction(SIGTERM, &act, NULL); + sigaction(SIGALRM, &act, NULL); + sigaction(SIGCHLD, &act, NULL); memset(&saddr, 0x00, sizeof(saddr)); saddr.sun_family = AF_LOCAL; /* use abstract namespace for socket path */ strcpy(&saddr.sun_path[1], UDEVD_SOCK_PATH); + addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(saddr.sun_path+1) + 1; - ssock = socket(AF_LOCAL, SOCK_STREAM, 0); + ssock = socket(AF_LOCAL, SOCK_DGRAM, 0); if (ssock == -1) { - dbg("error getting socket"); + dbg("error getting socket, exit"); exit(1); } - retval = bind(ssock, &saddr, sizeof(saddr)); - if (retval < 0) { - dbg("bind failed\n"); - goto exit; - } - - retval = listen(ssock, SOMAXCONN); + /* the bind takes care of ensuring only one copy running */ + retval = bind(ssock, (struct sockaddr *) &saddr, addrlen); if (retval < 0) { - dbg("listen failed\n"); + dbg("bind failed, exit"); goto exit; } - pthread_mutex_init(&msg_lock, NULL); - pthread_mutex_init(&msg_active_lock, NULL); - pthread_mutex_init(&exec_lock, NULL); - pthread_mutex_init(&exec_active_lock, NULL); - pthread_mutex_init(&running_lock, NULL); + /* enable receiving of the sender credentials */ + setsockopt(ssock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on)); - /* set default attributes for created threads */ - pthread_attr_init(&thr_attr); - pthread_attr_setdetachstate(&thr_attr, PTHREAD_CREATE_DETACHED); - pthread_attr_setstacksize(&thr_attr, 16 * 1024); + /* possible override of udev binary, used for testing */ + udev_bin = getenv("UDEV_BIN"); + if (udev_bin != NULL) + dbg("udev binary is set to '%s'", udev_bin); + else + udev_bin = UDEV_BIN; - /* init queue management */ - pthread_create(&mgr_msg_tid, &thr_attr, msg_queue_manager, NULL); - pthread_create(&mgr_exec_tid, &thr_attr, exec_queue_manager, NULL); - - clen = sizeof(caddr); - /* main loop */ + FD_ZERO(&readfds); + FD_SET(ssock, &readfds); + FD_SET(pipefds[0], &readfds); + maxsockplus = ssock+1; while (1) { - csock = accept(ssock, &caddr, &clen); - if (csock < 0) { - dbg("client accept failed\n"); + fd_set workreadfds = readfds; + retval = select(maxsockplus, &workreadfds, NULL, NULL, NULL); + + if (retval < 0) { + if (errno != EINTR) + dbg("error in select: %s", strerror(errno)); continue; } - pthread_create(&cli_tid, &thr_attr, client_threads, (void *) csock); + + if (FD_ISSET(ssock, &workreadfds)) + handle_msg(ssock); + + if (FD_ISSET(pipefds[0], &workreadfds)) + user_sighandler(); + + if (children_waiting) { + children_waiting = 0; + reap_kids(); + } + + if (run_msg_q) { + run_msg_q = 0; + msg_queue_manager(); + } + + if (run_exec_q) { + /* this is tricky. exec_queue_manager() loops over exec_list, and + * calls running_with_devpath(), which loops over running_list. This gives + * O(N*M), which can get *nasty*. Clean up running_list before + * calling exec_queue_manager(). + */ + if (children_waiting) { + children_waiting = 0; + reap_kids(); + } + + run_exec_q = 0; + exec_queue_manager(); + } } exit: close(ssock);