2 * Copyright (C) 2004-2012 Kay Sievers <kay.sievers@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
35 #include <sys/prctl.h>
36 #include <sys/socket.h>
38 #include <sys/signalfd.h>
39 #include <sys/epoll.h>
43 #include <sys/ioctl.h>
44 #include <sys/inotify.h>
45 #include <sys/utsname.h>
48 #include "sd-daemon.h"
49 #include "cgroup-util.h"
50 #include "dev-setup.h"
54 void udev_main_log(struct udev *udev, int priority,
55 const char *file, int line, const char *fn,
56 const char *format, va_list args)
58 log_metav(priority, file, line, fn, format, args);
61 static struct udev_rules *rules;
62 static struct udev_queue_export *udev_queue_export;
63 static struct udev_ctrl *udev_ctrl;
64 static struct udev_monitor *monitor;
65 static int worker_watch[2] = { -1, -1 };
66 static int fd_signal = -1;
67 static int fd_ep = -1;
68 static int fd_inotify = -1;
69 static bool stop_exec_queue;
72 static int children_max;
73 static int exec_delay;
74 static sigset_t sigmask_orig;
75 static UDEV_LIST(event_list);
76 static UDEV_LIST(worker_list);
78 static bool udev_exit;
87 struct udev_list_node node;
89 struct udev_device *dev;
90 enum event_state state;
92 unsigned long long int delaying_seqnum;
93 unsigned long long int seqnum;
96 const char *devpath_old;
102 static struct event *node_to_event(struct udev_list_node *node)
106 event = (char *)node;
107 event -= offsetof(struct event, node);
108 return (struct event *)event;
111 static void event_queue_cleanup(struct udev *udev, enum event_state type);
121 struct udev_list_node node;
125 struct udev_monitor *monitor;
126 enum worker_state state;
128 unsigned long long event_start_usec;
131 /* passed from worker to main process */
132 struct worker_message {
137 static struct worker *node_to_worker(struct udev_list_node *node)
141 worker = (char *)node;
142 worker -= offsetof(struct worker, node);
143 return (struct worker *)worker;
146 static void event_queue_delete(struct event *event, bool export)
148 udev_list_node_remove(&event->node);
151 udev_queue_export_device_finished(udev_queue_export, event->dev);
152 log_debug("seq %llu done with %i\n", udev_device_get_seqnum(event->dev), event->exitcode);
154 udev_device_unref(event->dev);
158 static struct worker *worker_ref(struct worker *worker)
164 static void worker_cleanup(struct worker *worker)
166 udev_list_node_remove(&worker->node);
167 udev_monitor_unref(worker->monitor);
172 static void worker_unref(struct worker *worker)
175 if (worker->refcount > 0)
177 log_debug("worker [%u] cleaned up\n", worker->pid);
178 worker_cleanup(worker);
181 static void worker_list_cleanup(struct udev *udev)
183 struct udev_list_node *loop, *tmp;
185 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
186 struct worker *worker = node_to_worker(loop);
188 worker_cleanup(worker);
192 static void worker_new(struct event *event)
194 struct udev *udev = event->udev;
195 struct worker *worker;
196 struct udev_monitor *worker_monitor;
199 /* listen for new events */
200 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
201 if (worker_monitor == NULL)
203 /* allow the main daemon netlink address to send devices to the worker */
204 udev_monitor_allow_unicast_sender(worker_monitor, monitor);
205 udev_monitor_enable_receiving(worker_monitor);
207 worker = calloc(1, sizeof(struct worker));
208 if (worker == NULL) {
209 udev_monitor_unref(worker_monitor);
212 /* worker + event reference */
213 worker->refcount = 2;
219 struct udev_device *dev = NULL;
221 struct epoll_event ep_signal, ep_monitor;
223 int rc = EXIT_SUCCESS;
225 /* take initial device from queue */
230 worker_list_cleanup(udev);
231 event_queue_cleanup(udev, EVENT_UNDEF);
232 udev_queue_export_unref(udev_queue_export);
233 udev_monitor_unref(monitor);
234 udev_ctrl_unref(udev_ctrl);
237 close(worker_watch[READ_END]);
240 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
242 log_error("error creating signalfd %m\n");
247 fd_ep = epoll_create1(EPOLL_CLOEXEC);
249 log_error("error creating epoll fd: %m\n");
254 memset(&ep_signal, 0, sizeof(struct epoll_event));
255 ep_signal.events = EPOLLIN;
256 ep_signal.data.fd = fd_signal;
258 fd_monitor = udev_monitor_get_fd(worker_monitor);
259 memset(&ep_monitor, 0, sizeof(struct epoll_event));
260 ep_monitor.events = EPOLLIN;
261 ep_monitor.data.fd = fd_monitor;
263 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
264 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
265 log_error("fail to add fds to epoll: %m\n");
270 /* request TERM signal if parent exits */
271 prctl(PR_SET_PDEATHSIG, SIGTERM);
274 struct udev_event *udev_event;
275 struct worker_message msg;
278 log_debug("seq %llu running\n", udev_device_get_seqnum(dev));
279 udev_event = udev_event_new(dev);
280 if (udev_event == NULL) {
285 /* needed for SIGCHLD/SIGTERM in spawn() */
286 udev_event->fd_signal = fd_signal;
289 udev_event->exec_delay = exec_delay;
291 /* apply rules, create node, symlinks */
292 err = udev_event_execute_rules(udev_event, rules, &sigmask_orig);
295 udev_event_execute_run(udev_event, &sigmask_orig);
297 /* apply/restore inotify watch */
298 if (err == 0 && udev_event->inotify_watch) {
299 udev_watch_begin(udev, dev);
300 udev_device_update_db(dev);
303 /* send processed event back to libudev listeners */
304 udev_monitor_send_device(worker_monitor, NULL, dev);
306 /* send udevd the result of the event execution */
307 memset(&msg, 0, sizeof(struct worker_message));
311 send(worker_watch[WRITE_END], &msg, sizeof(struct worker_message), 0);
313 log_debug("seq %llu processed with %i\n", udev_device_get_seqnum(dev), err);
315 udev_device_unref(dev);
318 if (udev_event->sigterm) {
319 udev_event_unref(udev_event);
323 udev_event_unref(udev_event);
325 /* wait for more device messages from main udevd, or term signal */
326 while (dev == NULL) {
327 struct epoll_event ev[4];
331 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
335 log_error("failed to poll: %m\n");
339 for (i = 0; i < fdcount; i++) {
340 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
341 dev = udev_monitor_receive_device(worker_monitor);
343 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
344 struct signalfd_siginfo fdsi;
347 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
348 if (size != sizeof(struct signalfd_siginfo))
350 switch (fdsi.ssi_signo) {
359 udev_device_unref(dev);
365 close(worker_watch[WRITE_END]);
366 udev_rules_unref(rules);
367 udev_builtin_exit(udev);
368 udev_monitor_unref(worker_monitor);
374 udev_monitor_unref(worker_monitor);
375 event->state = EVENT_QUEUED;
377 log_error("fork of child failed: %m\n");
380 /* close monitor, but keep address around */
381 udev_monitor_disconnect(worker_monitor);
382 worker->monitor = worker_monitor;
384 worker->state = WORKER_RUNNING;
385 worker->event_start_usec = now_usec();
386 worker->event = event;
387 event->state = EVENT_RUNNING;
388 udev_list_node_append(&worker->node, &worker_list);
390 log_debug("seq %llu forked new worker [%u]\n", udev_device_get_seqnum(event->dev), pid);
395 static void event_run(struct event *event)
397 struct udev_list_node *loop;
399 udev_list_node_foreach(loop, &worker_list) {
400 struct worker *worker = node_to_worker(loop);
403 if (worker->state != WORKER_IDLE)
406 count = udev_monitor_send_device(monitor, worker->monitor, event->dev);
408 log_error("worker [%u] did not accept message %zi (%m), kill it\n", worker->pid, count);
409 kill(worker->pid, SIGKILL);
410 worker->state = WORKER_KILLED;
414 worker->event = event;
415 worker->state = WORKER_RUNNING;
416 worker->event_start_usec = now_usec();
417 event->state = EVENT_RUNNING;
421 if (children >= children_max) {
422 if (children_max > 1)
423 log_debug("maximum number (%i) of children reached\n", children);
427 /* start new worker and pass initial device */
431 static int event_queue_insert(struct udev_device *dev)
435 event = calloc(1, sizeof(struct event));
439 event->udev = udev_device_get_udev(dev);
441 event->seqnum = udev_device_get_seqnum(dev);
442 event->devpath = udev_device_get_devpath(dev);
443 event->devpath_len = strlen(event->devpath);
444 event->devpath_old = udev_device_get_devpath_old(dev);
445 event->devnum = udev_device_get_devnum(dev);
446 event->is_block = (strcmp("block", udev_device_get_subsystem(dev)) == 0);
447 event->ifindex = udev_device_get_ifindex(dev);
449 udev_queue_export_device_queued(udev_queue_export, dev);
450 log_debug("seq %llu queued, '%s' '%s'\n", udev_device_get_seqnum(dev),
451 udev_device_get_action(dev), udev_device_get_subsystem(dev));
453 event->state = EVENT_QUEUED;
454 udev_list_node_append(&event->node, &event_list);
458 static void worker_kill(struct udev *udev)
460 struct udev_list_node *loop;
462 udev_list_node_foreach(loop, &worker_list) {
463 struct worker *worker = node_to_worker(loop);
465 if (worker->state == WORKER_KILLED)
468 worker->state = WORKER_KILLED;
469 kill(worker->pid, SIGTERM);
473 /* lookup event for identical, parent, child device */
474 static bool is_devpath_busy(struct event *event)
476 struct udev_list_node *loop;
479 /* check if queue contains events we depend on */
480 udev_list_node_foreach(loop, &event_list) {
481 struct event *loop_event = node_to_event(loop);
483 /* we already found a later event, earlier can not block us, no need to check again */
484 if (loop_event->seqnum < event->delaying_seqnum)
487 /* event we checked earlier still exists, no need to check again */
488 if (loop_event->seqnum == event->delaying_seqnum)
491 /* found ourself, no later event can block us */
492 if (loop_event->seqnum >= event->seqnum)
495 /* check major/minor */
496 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
499 /* check network device ifindex */
500 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
503 /* check our old name */
504 if (event->devpath_old != NULL && strcmp(loop_event->devpath, event->devpath_old) == 0) {
505 event->delaying_seqnum = loop_event->seqnum;
509 /* compare devpath */
510 common = MIN(loop_event->devpath_len, event->devpath_len);
512 /* one devpath is contained in the other? */
513 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
516 /* identical device event found */
517 if (loop_event->devpath_len == event->devpath_len) {
518 /* devices names might have changed/swapped in the meantime */
519 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
521 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
523 event->delaying_seqnum = loop_event->seqnum;
527 /* parent device event found */
528 if (event->devpath[common] == '/') {
529 event->delaying_seqnum = loop_event->seqnum;
533 /* child device event found */
534 if (loop_event->devpath[common] == '/') {
535 event->delaying_seqnum = loop_event->seqnum;
539 /* no matching device */
546 static void event_queue_start(struct udev *udev)
548 struct udev_list_node *loop;
550 udev_list_node_foreach(loop, &event_list) {
551 struct event *event = node_to_event(loop);
553 if (event->state != EVENT_QUEUED)
556 /* do not start event if parent or child event is still running */
557 if (is_devpath_busy(event))
564 static void event_queue_cleanup(struct udev *udev, enum event_state match_type)
566 struct udev_list_node *loop, *tmp;
568 udev_list_node_foreach_safe(loop, tmp, &event_list) {
569 struct event *event = node_to_event(loop);
571 if (match_type != EVENT_UNDEF && match_type != event->state)
574 event_queue_delete(event, false);
578 static void worker_returned(int fd_worker)
581 struct worker_message msg;
583 struct udev_list_node *loop;
585 size = recv(fd_worker, &msg, sizeof(struct worker_message), MSG_DONTWAIT);
586 if (size != sizeof(struct worker_message))
589 /* lookup worker who sent the signal */
590 udev_list_node_foreach(loop, &worker_list) {
591 struct worker *worker = node_to_worker(loop);
593 if (worker->pid != msg.pid)
596 /* worker returned */
598 worker->event->exitcode = msg.exitcode;
599 event_queue_delete(worker->event, true);
600 worker->event = NULL;
602 if (worker->state != WORKER_KILLED)
603 worker->state = WORKER_IDLE;
604 worker_unref(worker);
610 /* receive the udevd message from userspace */
611 static struct udev_ctrl_connection *handle_ctrl_msg(struct udev_ctrl *uctrl)
613 struct udev *udev = udev_ctrl_get_udev(uctrl);
614 struct udev_ctrl_connection *ctrl_conn;
615 struct udev_ctrl_msg *ctrl_msg = NULL;
619 ctrl_conn = udev_ctrl_get_connection(uctrl);
620 if (ctrl_conn == NULL)
623 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
624 if (ctrl_msg == NULL)
627 i = udev_ctrl_get_set_log_level(ctrl_msg);
629 log_debug("udevd message (SET_LOG_PRIORITY) received, log_priority=%i\n", i);
630 log_set_max_level(i);
631 udev_set_log_priority(udev, i);
635 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
636 log_debug("udevd message (STOP_EXEC_QUEUE) received\n");
637 stop_exec_queue = true;
640 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
641 log_debug("udevd message (START_EXEC_QUEUE) received\n");
642 stop_exec_queue = false;
645 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
646 log_debug("udevd message (RELOAD) received\n");
650 str = udev_ctrl_get_set_env(ctrl_msg);
658 val = strchr(key, '=');
662 if (val[0] == '\0') {
663 log_debug("udevd message (ENV) received, unset '%s'\n", key);
664 udev_add_property(udev, key, NULL);
666 log_debug("udevd message (ENV) received, set '%s=%s'\n", key, val);
667 udev_add_property(udev, key, val);
670 log_error("wrong key format '%s'\n", key);
677 i = udev_ctrl_get_set_children_max(ctrl_msg);
679 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i\n", i);
683 if (udev_ctrl_get_ping(ctrl_msg) > 0)
684 log_debug("udevd message (SYNC) received\n");
686 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
687 log_debug("udevd message (EXIT) received\n");
689 /* keep reference to block the client until we exit */
690 udev_ctrl_connection_ref(ctrl_conn);
693 udev_ctrl_msg_unref(ctrl_msg);
694 return udev_ctrl_connection_unref(ctrl_conn);
697 /* read inotify messages */
698 static int handle_inotify(struct udev *udev)
702 struct inotify_event *ev;
704 if ((ioctl(fd_inotify, FIONREAD, &nbytes) < 0) || (nbytes <= 0))
707 buf = malloc(nbytes);
709 log_error("error getting buffer for inotify\n");
713 nbytes = read(fd_inotify, buf, nbytes);
715 for (pos = 0; pos < nbytes; pos += sizeof(struct inotify_event) + ev->len) {
716 struct udev_device *dev;
718 ev = (struct inotify_event *)(buf + pos);
719 dev = udev_watch_lookup(udev, ev->wd);
721 log_debug("inotify event: %x for %s\n", ev->mask, udev_device_get_devnode(dev));
722 if (ev->mask & IN_CLOSE_WRITE) {
723 char filename[UTIL_PATH_SIZE];
726 log_debug("device %s closed, synthesising 'change'\n", udev_device_get_devnode(dev));
727 util_strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
728 fd = open(filename, O_WRONLY);
730 if (write(fd, "change", 6) < 0)
731 log_debug("error writing uevent: %m\n");
735 if (ev->mask & IN_IGNORED)
736 udev_watch_end(udev, dev);
738 udev_device_unref(dev);
747 static void handle_signal(struct udev *udev, int signo)
758 struct udev_list_node *loop, *tmp;
760 pid = waitpid(-1, &status, WNOHANG);
764 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
765 struct worker *worker = node_to_worker(loop);
767 if (worker->pid != pid)
769 log_debug("worker [%u] exit\n", pid);
771 if (WIFEXITED(status)) {
772 if (WEXITSTATUS(status) != 0)
773 log_error("worker [%u] exit with return code %i\n", pid, WEXITSTATUS(status));
774 } else if (WIFSIGNALED(status)) {
775 log_error("worker [%u] terminated by signal %i (%s)\n",
776 pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
777 } else if (WIFSTOPPED(status)) {
778 log_error("worker [%u] stopped\n", pid);
779 } else if (WIFCONTINUED(status)) {
780 log_error("worker [%u] continued\n", pid);
782 log_error("worker [%u] exit with status 0x%04x\n", pid, status);
785 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
787 log_error("worker [%u] failed while handling '%s'\n",
788 pid, worker->event->devpath);
789 worker->event->exitcode = -32;
790 event_queue_delete(worker->event, true);
791 /* drop reference taken for state 'running' */
792 worker_unref(worker);
795 worker_unref(worker);
806 static void static_dev_create_from_modules(struct udev *udev)
808 struct utsname kernel;
809 char modules[UTIL_PATH_SIZE];
814 util_strscpyl(modules, sizeof(modules), "/lib/modules/", kernel.release, "/modules.devname", NULL);
815 f = fopen(modules, "r");
819 while (fgets(buf, sizeof(buf), f) != NULL) {
827 char filename[UTIL_PATH_SIZE];
833 s = strchr(modname, ' ');
839 s = strchr(devname, ' ');
845 s = strchr(devno, ' ');
847 s = strchr(devno, '\n');
850 if (sscanf(devno, "%c%u:%u", &type, &maj, &min) != 3)
855 else if (type == 'b')
860 util_strscpyl(filename, sizeof(filename), "/dev/", devname, NULL);
861 mkdir_parents(filename, 0755);
862 label_context_set(filename, mode);
863 log_debug("mknod '%s' %c%u:%u\n", filename, type, maj, min);
864 if (mknod(filename, mode, makedev(maj, min)) < 0 && errno == EEXIST)
865 utimensat(AT_FDCWD, filename, NULL, 0);
866 label_context_clear();
872 static int mem_size_mb(void)
876 long int memsize = -1;
878 f = fopen("/proc/meminfo", "r");
882 while (fgets(buf, sizeof(buf), f) != NULL) {
885 if (sscanf(buf, "MemTotal: %ld kB", &value) == 1) {
886 memsize = value / 1024;
895 static int convert_db(struct udev *udev)
897 char filename[UTIL_PATH_SIZE];
899 struct udev_enumerate *udev_enumerate;
900 struct udev_list_entry *list_entry;
902 /* current database */
903 if (access("/run/udev/data", F_OK) >= 0)
906 /* make sure we do not get here again */
907 mkdir_parents("/run/udev/data", 0755);
908 mkdir(filename, 0755);
911 util_strscpyl(filename, sizeof(filename), "/dev/.udev/db", NULL);
912 if (access(filename, F_OK) < 0)
915 f = fopen("/dev/kmsg", "w");
917 fprintf(f, "<30>udevd[%u]: converting old udev database\n", getpid());
921 udev_enumerate = udev_enumerate_new(udev);
922 if (udev_enumerate == NULL)
924 udev_enumerate_scan_devices(udev_enumerate);
925 udev_list_entry_foreach(list_entry, udev_enumerate_get_list_entry(udev_enumerate)) {
926 struct udev_device *device;
928 device = udev_device_new_from_syspath(udev, udev_list_entry_get_name(list_entry));
932 /* try to find the old database for devices without a current one */
933 if (udev_device_read_db(device, NULL) < 0) {
937 char devpath[UTIL_PATH_SIZE];
938 char from[UTIL_PATH_SIZE];
942 /* find database in old location */
943 id = udev_device_get_id_filename(device);
944 util_strscpyl(from, sizeof(from), "/dev/.udev/db/", id, NULL);
945 if (lstat(from, &stats) == 0) {
947 udev_device_read_db(device, from);
953 /* find old database with $subsys:$sysname name */
954 util_strscpyl(from, sizeof(from), "/dev/.udev/db/",
955 udev_device_get_subsystem(device), ":", udev_device_get_sysname(device), NULL);
956 if (lstat(from, &stats) == 0) {
958 udev_device_read_db(device, from);
964 /* find old database with the encoded devpath name */
965 util_path_encode(udev_device_get_devpath(device), devpath, sizeof(devpath));
966 util_strscpyl(from, sizeof(from), "/dev/.udev/db/", devpath, NULL);
967 if (lstat(from, &stats) == 0) {
969 udev_device_read_db(device, from);
975 /* write out new database */
977 udev_device_update_db(device);
979 udev_device_unref(device);
981 udev_enumerate_unref(udev_enumerate);
985 static int systemd_fds(struct udev *udev, int *rctrl, int *rnetlink)
987 int ctrl = -1, netlink = -1;
990 n = sd_listen_fds(true);
994 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
995 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
1002 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
1012 if (ctrl < 0 || netlink < 0)
1015 log_debug("ctrl=%i netlink=%i\n", ctrl, netlink);
1017 *rnetlink = netlink;
1021 int main(int argc, char *argv[])
1026 int daemonize = false;
1027 int resolve_names = 1;
1028 static const struct option options[] = {
1029 { "daemon", no_argument, NULL, 'd' },
1030 { "debug", no_argument, NULL, 'D' },
1031 { "children-max", required_argument, NULL, 'c' },
1032 { "exec-delay", required_argument, NULL, 'e' },
1033 { "resolve-names", required_argument, NULL, 'N' },
1034 { "help", no_argument, NULL, 'h' },
1035 { "version", no_argument, NULL, 'V' },
1039 int fd_netlink = -1;
1041 struct epoll_event ep_ctrl, ep_inotify, ep_signal, ep_netlink, ep_worker;
1042 struct udev_ctrl_connection *ctrl_conn = NULL;
1050 log_parse_environment();
1051 udev_set_log_fn(udev, udev_main_log);
1052 log_debug("version %s\n", VERSION);
1058 option = getopt_long(argc, argv, "c:deDtN:hV", options, NULL);
1067 children_max = strtoul(optarg, NULL, 0);
1070 exec_delay = strtoul(optarg, NULL, 0);
1074 log_set_max_level(LOG_DEBUG);
1075 udev_set_log_priority(udev, LOG_INFO);
1078 if (strcmp (optarg, "early") == 0) {
1080 } else if (strcmp (optarg, "late") == 0) {
1082 } else if (strcmp (optarg, "never") == 0) {
1085 fprintf(stderr, "resolve-names must be early, late or never\n");
1086 log_error("resolve-names must be early, late or never\n");
1091 printf("Usage: udevd OPTIONS\n"
1094 " --children-max=<maximum number of workers>\n"
1095 " --exec-delay=<seconds to wait before executing RUN=>\n"
1096 " --resolve-names=early|late|never\n"
1102 printf("%s\n", VERSION);
1110 * read the kernel commandline, in case we need to get into debug mode
1111 * udev.log-priority=<level> syslog priority
1112 * udev.children-max=<number of workers> events are fully serialized if set to 1
1115 f = fopen("/proc/cmdline", "r");
1119 if (fgets(cmdline, sizeof(cmdline), f) != NULL) {
1122 pos = strstr(cmdline, "udev.log-priority=");
1124 pos += strlen("udev.log-priority=");
1125 udev_set_log_priority(udev, util_log_priority(pos));
1128 pos = strstr(cmdline, "udev.children-max=");
1130 pos += strlen("udev.children-max=");
1131 children_max = strtoul(pos, NULL, 0);
1134 pos = strstr(cmdline, "udev.exec-delay=");
1136 pos += strlen("udev.exec-delay=");
1137 exec_delay = strtoul(pos, NULL, 0);
1143 if (getuid() != 0) {
1144 fprintf(stderr, "root privileges required\n");
1145 log_error("root privileges required\n");
1149 /* set umask before creating any file/directory */
1153 mkdir("/run/udev", 0755);
1156 static_dev_create_from_modules(udev);
1158 /* before opening new files, make sure std{in,out,err} fds are in a sane state */
1162 fd = open("/dev/null", O_RDWR);
1164 if (write(STDOUT_FILENO, 0, 0) < 0)
1165 dup2(fd, STDOUT_FILENO);
1166 if (write(STDERR_FILENO, 0, 0) < 0)
1167 dup2(fd, STDERR_FILENO);
1168 if (fd > STDERR_FILENO)
1171 fprintf(stderr, "cannot open /dev/null\n");
1172 log_error("cannot open /dev/null\n");
1176 if (systemd_fds(udev, &fd_ctrl, &fd_netlink) >= 0) {
1177 /* get control and netlink socket from from systemd */
1178 udev_ctrl = udev_ctrl_new_from_fd(udev, fd_ctrl);
1179 if (udev_ctrl == NULL) {
1180 log_error("error taking over udev control socket");
1185 monitor = udev_monitor_new_from_netlink_fd(udev, "kernel", fd_netlink);
1186 if (monitor == NULL) {
1187 log_error("error taking over netlink socket\n");
1192 /* get our own cgroup, we regularly kill everything udev has left behind */
1193 if (cg_get_by_pid(SYSTEMD_CGROUP_CONTROLLER, 0, &udev_cgroup) < 0)
1196 /* open control and netlink socket */
1197 udev_ctrl = udev_ctrl_new(udev);
1198 if (udev_ctrl == NULL) {
1199 fprintf(stderr, "error initializing udev control socket");
1200 log_error("error initializing udev control socket");
1204 fd_ctrl = udev_ctrl_get_fd(udev_ctrl);
1206 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1207 if (monitor == NULL) {
1208 fprintf(stderr, "error initializing netlink socket\n");
1209 log_error("error initializing netlink socket\n");
1213 fd_netlink = udev_monitor_get_fd(monitor);
1216 if (udev_monitor_enable_receiving(monitor) < 0) {
1217 fprintf(stderr, "error binding netlink socket\n");
1218 log_error("error binding netlink socket\n");
1223 if (udev_ctrl_enable_receiving(udev_ctrl) < 0) {
1224 fprintf(stderr, "error binding udev control socket\n");
1225 log_error("error binding udev control socket\n");
1230 udev_monitor_set_receive_buffer_size(monitor, 128*1024*1024);
1232 /* create queue file before signalling 'ready', to make sure we block 'settle' */
1233 udev_queue_export = udev_queue_export_new(udev);
1234 if (udev_queue_export == NULL) {
1235 log_error("error creating queue file\n");
1248 log_error("fork of daemon failed: %m\n");
1253 goto exit_daemonize;
1258 fd = open("/proc/self/oom_score_adj", O_RDWR);
1260 /* Fallback to old interface */
1261 fd = open("/proc/self/oom_adj", O_RDWR);
1263 log_error("error disabling OOM: %m\n");
1265 /* OOM_DISABLE == -17 */
1266 write(fd, "-17", 3);
1270 write(fd, "-1000", 5);
1274 sd_notify(1, "READY=1");
1277 f = fopen("/dev/kmsg", "w");
1279 fprintf(f, "<30>udevd[%u]: starting version " VERSION "\n", getpid());
1286 fd = open("/dev/null", O_RDWR);
1288 dup2(fd, STDIN_FILENO);
1289 dup2(fd, STDOUT_FILENO);
1290 dup2(fd, STDERR_FILENO);
1295 fd_inotify = udev_watch_init(udev);
1296 if (fd_inotify < 0) {
1297 fprintf(stderr, "error initializing inotify\n");
1298 log_error("error initializing inotify\n");
1302 udev_watch_restore(udev);
1304 /* block and listen to all signals on signalfd */
1306 sigprocmask(SIG_SETMASK, &mask, &sigmask_orig);
1307 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
1308 if (fd_signal < 0) {
1309 fprintf(stderr, "error creating signalfd\n");
1310 log_error("error creating signalfd\n");
1315 /* unnamed socket from workers to the main daemon */
1316 if (socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, worker_watch) < 0) {
1317 fprintf(stderr, "error creating socketpair\n");
1318 log_error("error creating socketpair\n");
1322 fd_worker = worker_watch[READ_END];
1324 udev_builtin_init(udev);
1326 rules = udev_rules_new(udev, resolve_names);
1327 if (rules == NULL) {
1328 log_error("error reading rules\n");
1332 memset(&ep_ctrl, 0, sizeof(struct epoll_event));
1333 ep_ctrl.events = EPOLLIN;
1334 ep_ctrl.data.fd = fd_ctrl;
1336 memset(&ep_inotify, 0, sizeof(struct epoll_event));
1337 ep_inotify.events = EPOLLIN;
1338 ep_inotify.data.fd = fd_inotify;
1340 memset(&ep_signal, 0, sizeof(struct epoll_event));
1341 ep_signal.events = EPOLLIN;
1342 ep_signal.data.fd = fd_signal;
1344 memset(&ep_netlink, 0, sizeof(struct epoll_event));
1345 ep_netlink.events = EPOLLIN;
1346 ep_netlink.data.fd = fd_netlink;
1348 memset(&ep_worker, 0, sizeof(struct epoll_event));
1349 ep_worker.events = EPOLLIN;
1350 ep_worker.data.fd = fd_worker;
1352 fd_ep = epoll_create1(EPOLL_CLOEXEC);
1354 log_error("error creating epoll fd: %m\n");
1357 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_ctrl, &ep_ctrl) < 0 ||
1358 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_inotify, &ep_inotify) < 0 ||
1359 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
1360 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_netlink, &ep_netlink) < 0 ||
1361 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_worker, &ep_worker) < 0) {
1362 log_error("fail to add fds to epoll: %m\n");
1366 /* if needed, convert old database from earlier udev version */
1369 if (children_max <= 0) {
1370 int memsize = mem_size_mb();
1372 /* set value depending on the amount of RAM */
1374 children_max = 128 + (memsize / 8);
1378 log_debug("set children_max to %u\n", children_max);
1380 udev_rules_apply_static_dev_perms(rules);
1382 udev_list_node_init(&event_list);
1383 udev_list_node_init(&worker_list);
1386 static unsigned long long last_usec;
1387 struct epoll_event ev[8];
1390 bool is_worker, is_signal, is_inotify, is_netlink, is_ctrl;
1394 /* close sources of new events and discard buffered events */
1396 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_ctrl, NULL);
1399 if (monitor != NULL) {
1400 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_netlink, NULL);
1401 udev_monitor_unref(monitor);
1404 if (fd_inotify >= 0) {
1405 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_inotify, NULL);
1410 /* discard queued events and kill workers */
1411 event_queue_cleanup(udev, EVENT_QUEUED);
1414 /* exit after all has cleaned up */
1415 if (udev_list_node_is_empty(&event_list) && udev_list_node_is_empty(&worker_list))
1418 /* timeout at exit for workers to finish */
1419 timeout = 30 * 1000;
1420 } else if (udev_list_node_is_empty(&event_list) && !children) {
1424 /* cleanup possible left-over processes in our cgroup */
1426 cg_kill(SYSTEMD_CGROUP_CONTROLLER, udev_cgroup, SIGKILL, false, true, NULL);
1428 /* kill idle or hanging workers */
1431 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), timeout);
1436 struct udev_list_node *loop;
1440 log_error("timeout, giving up waiting for workers to finish\n");
1444 /* kill idle workers */
1445 if (udev_list_node_is_empty(&event_list)) {
1446 log_debug("cleanup idle workers\n");
1450 /* check for hanging events */
1451 udev_list_node_foreach(loop, &worker_list) {
1452 struct worker *worker = node_to_worker(loop);
1454 if (worker->state != WORKER_RUNNING)
1457 if ((now_usec() - worker->event_start_usec) > 30 * 1000 * 1000) {
1458 log_error("worker [%u] %s timeout; kill it\n", worker->pid,
1459 worker->event ? worker->event->devpath : "<idle>");
1460 kill(worker->pid, SIGKILL);
1461 worker->state = WORKER_KILLED;
1462 /* drop reference taken for state 'running' */
1463 worker_unref(worker);
1464 if (worker->event) {
1465 log_error("seq %llu '%s' killed\n",
1466 udev_device_get_seqnum(worker->event->dev), worker->event->devpath);
1467 worker->event->exitcode = -64;
1468 event_queue_delete(worker->event, true);
1469 worker->event = NULL;
1476 is_worker = is_signal = is_inotify = is_netlink = is_ctrl = false;
1477 for (i = 0; i < fdcount; i++) {
1478 if (ev[i].data.fd == fd_worker && ev[i].events & EPOLLIN)
1480 else if (ev[i].data.fd == fd_netlink && ev[i].events & EPOLLIN)
1482 else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN)
1484 else if (ev[i].data.fd == fd_inotify && ev[i].events & EPOLLIN)
1486 else if (ev[i].data.fd == fd_ctrl && ev[i].events & EPOLLIN)
1490 /* check for changed config, every 3 seconds at most */
1491 if ((now_usec() - last_usec) > 3 * 1000 * 1000) {
1492 if (udev_rules_check_timestamp(rules))
1494 if (udev_builtin_validate(udev))
1497 last_usec = now_usec();
1500 /* reload requested, HUP signal received, rules changed, builtin changed */
1503 rules = udev_rules_unref(rules);
1504 udev_builtin_exit(udev);
1508 /* event has finished */
1510 worker_returned(fd_worker);
1513 struct udev_device *dev;
1515 dev = udev_monitor_receive_device(monitor);
1517 udev_device_set_usec_initialized(dev, now_usec());
1518 if (event_queue_insert(dev) < 0)
1519 udev_device_unref(dev);
1523 /* start new events */
1524 if (!udev_list_node_is_empty(&event_list) && !udev_exit && !stop_exec_queue) {
1526 rules = udev_rules_new(udev, resolve_names);
1528 event_queue_start(udev);
1532 struct signalfd_siginfo fdsi;
1535 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
1536 if (size == sizeof(struct signalfd_siginfo))
1537 handle_signal(udev, fdsi.ssi_signo);
1540 /* we are shutting down, the events below are not handled anymore */
1544 /* device node watch */
1546 handle_inotify(udev);
1549 * This needs to be after the inotify handling, to make sure,
1550 * that the ping is send back after the possibly generated
1551 * "change" events by the inotify device node watch.
1553 * A single time we may receive a client connection which we need to
1554 * keep open to block the client. It will be closed right before we
1558 ctrl_conn = handle_ctrl_msg(udev_ctrl);
1563 udev_queue_export_cleanup(udev_queue_export);
1564 udev_ctrl_cleanup(udev_ctrl);
1568 worker_list_cleanup(udev);
1569 event_queue_cleanup(udev, EVENT_UNDEF);
1570 udev_rules_unref(rules);
1571 udev_builtin_exit(udev);
1574 if (worker_watch[READ_END] >= 0)
1575 close(worker_watch[READ_END]);
1576 if (worker_watch[WRITE_END] >= 0)
1577 close(worker_watch[WRITE_END]);
1578 udev_monitor_unref(monitor);
1579 udev_queue_export_unref(udev_queue_export);
1580 udev_ctrl_connection_unref(ctrl_conn);
1581 udev_ctrl_unref(udev_ctrl);