1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2013 Lennart Poettering
9 #include <sys/timerfd.h>
12 #include "sd-daemon.h"
16 #include "alloc-util.h"
18 //#include "fs-util.h"
24 #include "process-util.h"
26 #include "signal-util.h"
27 #include "string-table.h"
28 #include "string-util.h"
29 #include "time-util.h"
32 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
34 typedef enum EventSourceType {
38 SOURCE_TIME_MONOTONIC,
39 SOURCE_TIME_REALTIME_ALARM,
40 SOURCE_TIME_BOOTTIME_ALARM,
48 _SOURCE_EVENT_SOURCE_TYPE_MAX,
49 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
52 static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
54 [SOURCE_TIME_REALTIME] = "realtime",
55 [SOURCE_TIME_BOOTTIME] = "bootime",
56 [SOURCE_TIME_MONOTONIC] = "monotonic",
57 [SOURCE_TIME_REALTIME_ALARM] = "realtime-alarm",
58 [SOURCE_TIME_BOOTTIME_ALARM] = "boottime-alarm",
59 [SOURCE_SIGNAL] = "signal",
60 [SOURCE_CHILD] = "child",
61 [SOURCE_DEFER] = "defer",
62 [SOURCE_POST] = "post",
63 [SOURCE_EXIT] = "exit",
64 [SOURCE_WATCHDOG] = "watchdog",
65 [SOURCE_INOTIFY] = "inotify",
68 DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
70 /* All objects we use in epoll events start with this value, so that
71 * we know how to dispatch it */
72 typedef enum WakeupType {
79 _WAKEUP_TYPE_INVALID = -1,
82 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
86 struct sd_event_source {
93 sd_event_handler_t prepare;
97 EventSourceType type:5;
104 unsigned pending_index;
105 unsigned prepare_index;
106 uint64_t pending_iteration;
107 uint64_t prepare_iteration;
109 LIST_FIELDS(sd_event_source, sources);
113 sd_event_io_handler_t callback;
121 sd_event_time_handler_t callback;
122 usec_t next, accuracy;
123 unsigned earliest_index;
124 unsigned latest_index;
127 sd_event_signal_handler_t callback;
128 struct signalfd_siginfo siginfo;
132 sd_event_child_handler_t callback;
138 sd_event_handler_t callback;
141 sd_event_handler_t callback;
144 sd_event_handler_t callback;
145 unsigned prioq_index;
148 sd_event_inotify_handler_t callback;
150 struct inode_data *inode_data;
151 LIST_FIELDS(sd_event_source, by_inode_data);
160 /* For all clocks we maintain two priority queues each, one
161 * ordered for the earliest times the events may be
162 * dispatched, and one ordered by the latest times they must
163 * have been dispatched. The range between the top entries in
164 * the two prioqs is the time window we can freely schedule
177 /* For each priority we maintain one signal fd, so that we
178 * only have to dequeue a single event per priority at a
184 sd_event_source *current;
187 /* A structure listing all event sources currently watching a specific inode */
189 /* The identifier for the inode, the combination of the .st_dev + .st_ino fields of the file */
193 /* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
194 * rearrange the priority still until then, as we need the original inode to change the priority as we need to
195 * add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
196 * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
197 * the sd-event object, so that it is efficient to close everything, before entering the next event loop
201 /* The inotify "watch descriptor" */
204 /* The combination of the mask of all inotify watches on this inode we manage. This is also the mask that has
205 * most recently been set on the watch descriptor. */
206 uint32_t combined_mask;
208 /* All event sources subscribed to this inode */
209 LIST_HEAD(sd_event_source, event_sources);
211 /* The inotify object we watch this inode with */
212 struct inotify_data *inotify_data;
214 /* A linked list of all inode data objects with fds to close (see above) */
215 LIST_FIELDS(struct inode_data, to_close);
218 /* A structure encapsulating an inotify fd */
219 struct inotify_data {
222 /* For each priority we maintain one inotify fd, so that we only have to dequeue a single event per priority at
228 Hashmap *inodes; /* The inode_data structures keyed by dev+ino */
229 Hashmap *wd; /* The inode_data structures keyed by the watch descriptor for each */
231 /* The buffer we read inotify events into */
232 union inotify_event_buffer buffer;
233 size_t buffer_filled; /* fill level of the buffer */
235 /* How many event sources are currently marked pending for this inotify. We won't read new events off the
236 * inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
237 * the events locally if they can't be coalesced). */
240 /* A linked list of all inotify objects with data already read, that still need processing. We keep this list
241 * to make it efficient to figure out what inotify objects to process data on next. */
242 LIST_FIELDS(struct inotify_data, buffered);
254 /* timerfd_create() only supports these five clocks so far. We
255 * can add support for more clocks when the kernel learns to
256 * deal with them, too. */
257 struct clock_data realtime;
258 struct clock_data boottime;
259 struct clock_data monotonic;
260 struct clock_data realtime_alarm;
261 struct clock_data boottime_alarm;
265 sd_event_source **signal_sources; /* indexed by signal number */
266 Hashmap *signal_data; /* indexed by priority */
268 Hashmap *child_sources;
269 unsigned n_enabled_child_sources;
275 Hashmap *inotify_data; /* indexed by priority */
277 /* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
278 LIST_HEAD(struct inode_data, inode_data_to_close);
280 /* A list of inotify objects that already have events buffered which aren't processed yet */
281 LIST_HEAD(struct inotify_data, inotify_data_buffered);
286 triple_timestamp timestamp;
289 bool exit_requested:1;
290 bool need_process_child:1;
292 bool profile_delays:1;
297 sd_event **default_event_ptr;
299 usec_t watchdog_last, watchdog_period;
303 LIST_HEAD(sd_event_source, sources);
305 usec_t last_run, last_log;
306 unsigned delays[sizeof(usec_t) * 8];
309 static thread_local sd_event *default_event = NULL;
311 static void source_disconnect(sd_event_source *s);
312 static void event_gc_inode_data(sd_event *e, struct inode_data *d);
314 static sd_event *event_resolve(sd_event *e) {
315 return e == SD_EVENT_DEFAULT ? default_event : e;
318 static int pending_prioq_compare(const void *a, const void *b) {
319 const sd_event_source *x = a, *y = b;
324 /* Enabled ones first */
325 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
327 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
330 /* Lower priority values first */
331 if (x->priority < y->priority)
333 if (x->priority > y->priority)
336 /* Older entries first */
337 if (x->pending_iteration < y->pending_iteration)
339 if (x->pending_iteration > y->pending_iteration)
345 static int prepare_prioq_compare(const void *a, const void *b) {
346 const sd_event_source *x = a, *y = b;
351 /* Enabled ones first */
352 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
354 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
357 /* Move most recently prepared ones last, so that we can stop
358 * preparing as soon as we hit one that has already been
359 * prepared in the current iteration */
360 if (x->prepare_iteration < y->prepare_iteration)
362 if (x->prepare_iteration > y->prepare_iteration)
365 /* Lower priority values first */
366 if (x->priority < y->priority)
368 if (x->priority > y->priority)
374 static int earliest_time_prioq_compare(const void *a, const void *b) {
375 const sd_event_source *x = a, *y = b;
377 assert(EVENT_SOURCE_IS_TIME(x->type));
378 assert(x->type == y->type);
380 /* Enabled ones first */
381 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
383 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
386 /* Move the pending ones to the end */
387 if (!x->pending && y->pending)
389 if (x->pending && !y->pending)
393 if (x->time.next < y->time.next)
395 if (x->time.next > y->time.next)
401 static usec_t time_event_source_latest(const sd_event_source *s) {
402 return usec_add(s->time.next, s->time.accuracy);
405 static int latest_time_prioq_compare(const void *a, const void *b) {
406 const sd_event_source *x = a, *y = b;
408 assert(EVENT_SOURCE_IS_TIME(x->type));
409 assert(x->type == y->type);
411 /* Enabled ones first */
412 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
414 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
417 /* Move the pending ones to the end */
418 if (!x->pending && y->pending)
420 if (x->pending && !y->pending)
424 if (time_event_source_latest(x) < time_event_source_latest(y))
426 if (time_event_source_latest(x) > time_event_source_latest(y))
432 static int exit_prioq_compare(const void *a, const void *b) {
433 const sd_event_source *x = a, *y = b;
435 assert(x->type == SOURCE_EXIT);
436 assert(y->type == SOURCE_EXIT);
438 /* Enabled ones first */
439 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
441 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
444 /* Lower priority values first */
445 if (x->priority < y->priority)
447 if (x->priority > y->priority)
453 static void free_clock_data(struct clock_data *d) {
455 assert(d->wakeup == WAKEUP_CLOCK_DATA);
458 prioq_free(d->earliest);
459 prioq_free(d->latest);
462 static void event_free(sd_event *e) {
467 while ((s = e->sources)) {
469 source_disconnect(s);
470 sd_event_source_unref(s);
473 assert(e->n_sources == 0);
475 if (e->default_event_ptr)
476 *(e->default_event_ptr) = NULL;
478 safe_close(e->epoll_fd);
479 safe_close(e->watchdog_fd);
481 free_clock_data(&e->realtime);
482 free_clock_data(&e->boottime);
483 free_clock_data(&e->monotonic);
484 free_clock_data(&e->realtime_alarm);
485 free_clock_data(&e->boottime_alarm);
487 prioq_free(e->pending);
488 prioq_free(e->prepare);
491 free(e->signal_sources);
492 hashmap_free(e->signal_data);
494 hashmap_free(e->inotify_data);
496 hashmap_free(e->child_sources);
497 set_free(e->post_sources);
501 _public_ int sd_event_new(sd_event** ret) {
505 assert_return(ret, -EINVAL);
507 e = new(sd_event, 1);
515 .realtime.wakeup = WAKEUP_CLOCK_DATA,
517 .realtime.next = USEC_INFINITY,
518 .boottime.wakeup = WAKEUP_CLOCK_DATA,
520 .boottime.next = USEC_INFINITY,
521 .monotonic.wakeup = WAKEUP_CLOCK_DATA,
523 .monotonic.next = USEC_INFINITY,
524 .realtime_alarm.wakeup = WAKEUP_CLOCK_DATA,
525 .realtime_alarm.fd = -1,
526 .realtime_alarm.next = USEC_INFINITY,
527 .boottime_alarm.wakeup = WAKEUP_CLOCK_DATA,
528 .boottime_alarm.fd = -1,
529 .boottime_alarm.next = USEC_INFINITY,
530 .perturb = USEC_INFINITY,
531 .original_pid = getpid_cached(),
534 r = prioq_ensure_allocated(&e->pending, pending_prioq_compare);
538 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
539 if (e->epoll_fd < 0) {
544 e->epoll_fd = fd_move_above_stdio(e->epoll_fd);
546 if (secure_getenv("SD_EVENT_PROFILE_DELAYS")) {
547 log_debug("Event loop profiling enabled. Logarithmic histogram of event loop iterations in the range 2^0 ... 2^63 us will be logged every 5s.");
548 e->profile_delays = true;
559 _public_ sd_event* sd_event_ref(sd_event *e) {
564 assert(e->n_ref >= 1);
570 _public_ sd_event* sd_event_unref(sd_event *e) {
575 assert(e->n_ref >= 1);
584 static bool event_pid_changed(sd_event *e) {
587 /* We don't support people creating an event loop and keeping
588 * it around over a fork(). Let's complain. */
590 return e->original_pid != getpid_cached();
593 static void source_io_unregister(sd_event_source *s) {
597 assert(s->type == SOURCE_IO);
599 if (event_pid_changed(s->event))
602 if (!s->io.registered)
605 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
607 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll: %m",
608 strna(s->description), event_source_type_to_string(s->type));
610 s->io.registered = false;
613 static int source_io_register(
618 struct epoll_event ev;
622 assert(s->type == SOURCE_IO);
623 assert(enabled != SD_EVENT_OFF);
625 ev = (struct epoll_event) {
626 .events = events | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
630 if (s->io.registered)
631 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
633 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
637 s->io.registered = true;
642 static clockid_t event_source_type_to_clock(EventSourceType t) {
646 case SOURCE_TIME_REALTIME:
647 return CLOCK_REALTIME;
649 case SOURCE_TIME_BOOTTIME:
650 return CLOCK_BOOTTIME;
652 case SOURCE_TIME_MONOTONIC:
653 return CLOCK_MONOTONIC;
655 case SOURCE_TIME_REALTIME_ALARM:
656 return CLOCK_REALTIME_ALARM;
658 case SOURCE_TIME_BOOTTIME_ALARM:
659 return CLOCK_BOOTTIME_ALARM;
662 return (clockid_t) -1;
666 static EventSourceType clock_to_event_source_type(clockid_t clock) {
671 return SOURCE_TIME_REALTIME;
674 return SOURCE_TIME_BOOTTIME;
676 case CLOCK_MONOTONIC:
677 return SOURCE_TIME_MONOTONIC;
679 case CLOCK_REALTIME_ALARM:
680 return SOURCE_TIME_REALTIME_ALARM;
682 case CLOCK_BOOTTIME_ALARM:
683 return SOURCE_TIME_BOOTTIME_ALARM;
686 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
690 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
695 case SOURCE_TIME_REALTIME:
698 case SOURCE_TIME_BOOTTIME:
701 case SOURCE_TIME_MONOTONIC:
702 return &e->monotonic;
704 case SOURCE_TIME_REALTIME_ALARM:
705 return &e->realtime_alarm;
707 case SOURCE_TIME_BOOTTIME_ALARM:
708 return &e->boottime_alarm;
715 static int event_make_signal_data(
718 struct signal_data **ret) {
720 struct epoll_event ev;
721 struct signal_data *d;
729 if (event_pid_changed(e))
732 if (e->signal_sources && e->signal_sources[sig])
733 priority = e->signal_sources[sig]->priority;
735 priority = SD_EVENT_PRIORITY_NORMAL;
737 d = hashmap_get(e->signal_data, &priority);
739 if (sigismember(&d->sigset, sig) > 0) {
745 r = hashmap_ensure_allocated(&e->signal_data, &uint64_hash_ops);
749 d = new(struct signal_data, 1);
753 *d = (struct signal_data) {
754 .wakeup = WAKEUP_SIGNAL_DATA,
756 .priority = priority,
759 r = hashmap_put(e->signal_data, &d->priority, d);
769 assert_se(sigaddset(&ss_copy, sig) >= 0);
771 r = signalfd(d->fd, &ss_copy, SFD_NONBLOCK|SFD_CLOEXEC);
785 d->fd = fd_move_above_stdio(r);
787 ev = (struct epoll_event) {
792 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev);
805 d->fd = safe_close(d->fd);
806 hashmap_remove(e->signal_data, &d->priority);
813 static void event_unmask_signal_data(sd_event *e, struct signal_data *d, int sig) {
817 /* Turns off the specified signal in the signal data
818 * object. If the signal mask of the object becomes empty that
821 if (sigismember(&d->sigset, sig) == 0)
824 assert_se(sigdelset(&d->sigset, sig) >= 0);
826 if (sigisemptyset(&d->sigset)) {
828 /* If all the mask is all-zero we can get rid of the structure */
829 hashmap_remove(e->signal_data, &d->priority);
837 if (signalfd(d->fd, &d->sigset, SFD_NONBLOCK|SFD_CLOEXEC) < 0)
838 log_debug_errno(errno, "Failed to unset signal bit, ignoring: %m");
841 static void event_gc_signal_data(sd_event *e, const int64_t *priority, int sig) {
842 struct signal_data *d;
843 static const int64_t zero_priority = 0;
847 /* Rechecks if the specified signal is still something we are
848 * interested in. If not, we'll unmask it, and possibly drop
849 * the signalfd for it. */
851 if (sig == SIGCHLD &&
852 e->n_enabled_child_sources > 0)
855 if (e->signal_sources &&
856 e->signal_sources[sig] &&
857 e->signal_sources[sig]->enabled != SD_EVENT_OFF)
861 * The specified signal might be enabled in three different queues:
863 * 1) the one that belongs to the priority passed (if it is non-NULL)
864 * 2) the one that belongs to the priority of the event source of the signal (if there is one)
865 * 3) the 0 priority (to cover the SIGCHLD case)
867 * Hence, let's remove it from all three here.
871 d = hashmap_get(e->signal_data, priority);
873 event_unmask_signal_data(e, d, sig);
876 if (e->signal_sources && e->signal_sources[sig]) {
877 d = hashmap_get(e->signal_data, &e->signal_sources[sig]->priority);
879 event_unmask_signal_data(e, d, sig);
882 d = hashmap_get(e->signal_data, &zero_priority);
884 event_unmask_signal_data(e, d, sig);
887 static void source_disconnect(sd_event_source *s) {
895 assert(s->event->n_sources > 0);
901 source_io_unregister(s);
905 case SOURCE_TIME_REALTIME:
906 case SOURCE_TIME_BOOTTIME:
907 case SOURCE_TIME_MONOTONIC:
908 case SOURCE_TIME_REALTIME_ALARM:
909 case SOURCE_TIME_BOOTTIME_ALARM: {
910 struct clock_data *d;
912 d = event_get_clock_data(s->event, s->type);
915 prioq_remove(d->earliest, s, &s->time.earliest_index);
916 prioq_remove(d->latest, s, &s->time.latest_index);
917 d->needs_rearm = true;
922 if (s->signal.sig > 0) {
924 if (s->event->signal_sources)
925 s->event->signal_sources[s->signal.sig] = NULL;
927 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
933 if (s->child.pid > 0) {
934 if (s->enabled != SD_EVENT_OFF) {
935 assert(s->event->n_enabled_child_sources > 0);
936 s->event->n_enabled_child_sources--;
939 (void) hashmap_remove(s->event->child_sources, PID_TO_PTR(s->child.pid));
940 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
950 set_remove(s->event->post_sources, s);
954 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
957 case SOURCE_INOTIFY: {
958 struct inode_data *inode_data;
960 inode_data = s->inotify.inode_data;
962 struct inotify_data *inotify_data;
963 assert_se(inotify_data = inode_data->inotify_data);
965 /* Detach this event source from the inode object */
966 LIST_REMOVE(inotify.by_inode_data, inode_data->event_sources, s);
967 s->inotify.inode_data = NULL;
970 assert(inotify_data->n_pending > 0);
971 inotify_data->n_pending--;
974 /* Note that we don't reduce the inotify mask for the watch descriptor here if the inode is
975 * continued to being watched. That's because inotify doesn't really have an API for that: we
976 * can only change watch masks with access to the original inode either by fd or by path. But
977 * paths aren't stable, and keeping an O_PATH fd open all the time would mean wasting an fd
978 * continously and keeping the mount busy which we can't really do. We could reconstruct the
979 * original inode from /proc/self/fdinfo/$INOTIFY_FD (as all watch descriptors are listed
980 * there), but given the need for open_by_handle_at() which is privileged and not universally
981 * available this would be quite an incomplete solution. Hence we go the other way, leave the
982 * mask set, even if it is not minimized now, and ignore all events we aren't interested in
983 * anymore after reception. Yes, this sucks, but … Linux … */
985 /* Maybe release the inode data (and its inotify) */
986 event_gc_inode_data(s->event, inode_data);
993 assert_not_reached("Wut? I shouldn't exist.");
997 prioq_remove(s->event->pending, s, &s->pending_index);
1000 prioq_remove(s->event->prepare, s, &s->prepare_index);
1004 s->type = _SOURCE_EVENT_SOURCE_TYPE_INVALID;
1006 LIST_REMOVE(sources, event->sources, s);
1010 sd_event_unref(event);
1013 static void source_free(sd_event_source *s) {
1016 source_disconnect(s);
1018 if (s->type == SOURCE_IO && s->io.owned)
1019 safe_close(s->io.fd);
1021 free(s->description);
1025 static int source_set_pending(sd_event_source *s, bool b) {
1029 assert(s->type != SOURCE_EXIT);
1031 if (s->pending == b)
1037 s->pending_iteration = s->event->iteration;
1039 r = prioq_put(s->event->pending, s, &s->pending_index);
1045 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
1047 if (EVENT_SOURCE_IS_TIME(s->type)) {
1048 struct clock_data *d;
1050 d = event_get_clock_data(s->event, s->type);
1053 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1054 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1055 d->needs_rearm = true;
1058 if (s->type == SOURCE_SIGNAL && !b) {
1059 struct signal_data *d;
1061 d = hashmap_get(s->event->signal_data, &s->priority);
1062 if (d && d->current == s)
1066 if (s->type == SOURCE_INOTIFY) {
1068 assert(s->inotify.inode_data);
1069 assert(s->inotify.inode_data->inotify_data);
1072 s->inotify.inode_data->inotify_data->n_pending ++;
1074 assert(s->inotify.inode_data->inotify_data->n_pending > 0);
1075 s->inotify.inode_data->inotify_data->n_pending --;
1082 static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
1087 s = new(sd_event_source, 1);
1091 *s = (struct sd_event_source) {
1094 .floating = floating,
1096 .pending_index = PRIOQ_IDX_NULL,
1097 .prepare_index = PRIOQ_IDX_NULL,
1103 LIST_PREPEND(sources, e->sources, s);
1109 _public_ int sd_event_add_io(
1111 sd_event_source **ret,
1114 sd_event_io_handler_t callback,
1120 assert_return(e, -EINVAL);
1121 assert_return(e = event_resolve(e), -ENOPKG);
1122 assert_return(fd >= 0, -EBADF);
1123 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1124 assert_return(callback, -EINVAL);
1125 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1126 assert_return(!event_pid_changed(e), -ECHILD);
1128 s = source_new(e, !ret, SOURCE_IO);
1132 s->wakeup = WAKEUP_EVENT_SOURCE;
1134 s->io.events = events;
1135 s->io.callback = callback;
1136 s->userdata = userdata;
1137 s->enabled = SD_EVENT_ON;
1139 r = source_io_register(s, s->enabled, events);
1151 static void initialize_perturb(sd_event *e) {
1152 sd_id128_t bootid = {};
1154 /* When we sleep for longer, we try to realign the wakeup to
1155 the same time wihtin each minute/second/250ms, so that
1156 events all across the system can be coalesced into a single
1157 CPU wakeup. However, let's take some system-specific
1158 randomness for this value, so that in a network of systems
1159 with synced clocks timer events are distributed a
1160 bit. Here, we calculate a perturbation usec offset from the
1163 if (_likely_(e->perturb != USEC_INFINITY))
1166 if (sd_id128_get_boot(&bootid) >= 0)
1167 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
1170 static int event_setup_timer_fd(
1172 struct clock_data *d,
1175 struct epoll_event ev;
1181 if (_likely_(d->fd >= 0))
1184 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
1188 fd = fd_move_above_stdio(fd);
1190 ev = (struct epoll_event) {
1195 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
1205 static int time_exit_callback(sd_event_source *s, uint64_t usec, void *userdata) {
1208 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1211 _public_ int sd_event_add_time(
1213 sd_event_source **ret,
1217 sd_event_time_handler_t callback,
1220 EventSourceType type;
1222 struct clock_data *d;
1225 assert_return(e, -EINVAL);
1226 assert_return(e = event_resolve(e), -ENOPKG);
1227 assert_return(accuracy != (uint64_t) -1, -EINVAL);
1228 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1229 assert_return(!event_pid_changed(e), -ECHILD);
1231 if (!clock_supported(clock)) /* Checks whether the kernel supports the clock */
1234 type = clock_to_event_source_type(clock); /* checks whether sd-event supports this clock */
1239 callback = time_exit_callback;
1241 d = event_get_clock_data(e, type);
1244 r = prioq_ensure_allocated(&d->earliest, earliest_time_prioq_compare);
1248 r = prioq_ensure_allocated(&d->latest, latest_time_prioq_compare);
1253 r = event_setup_timer_fd(e, d, clock);
1258 s = source_new(e, !ret, type);
1262 s->time.next = usec;
1263 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
1264 s->time.callback = callback;
1265 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
1266 s->userdata = userdata;
1267 s->enabled = SD_EVENT_ONESHOT;
1269 d->needs_rearm = true;
1271 r = prioq_put(d->earliest, s, &s->time.earliest_index);
1275 r = prioq_put(d->latest, s, &s->time.latest_index);
1289 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1292 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
1295 _public_ int sd_event_add_signal(
1297 sd_event_source **ret,
1299 sd_event_signal_handler_t callback,
1303 struct signal_data *d;
1307 assert_return(e, -EINVAL);
1308 assert_return(e = event_resolve(e), -ENOPKG);
1309 assert_return(SIGNAL_VALID(sig), -EINVAL);
1310 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1311 assert_return(!event_pid_changed(e), -ECHILD);
1314 callback = signal_exit_callback;
1316 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
1320 if (!sigismember(&ss, sig))
1323 if (!e->signal_sources) {
1324 e->signal_sources = new0(sd_event_source*, _NSIG);
1325 if (!e->signal_sources)
1327 } else if (e->signal_sources[sig])
1330 s = source_new(e, !ret, SOURCE_SIGNAL);
1334 s->signal.sig = sig;
1335 s->signal.callback = callback;
1336 s->userdata = userdata;
1337 s->enabled = SD_EVENT_ON;
1339 e->signal_sources[sig] = s;
1341 r = event_make_signal_data(e, sig, &d);
1347 /* Use the signal name as description for the event source by default */
1348 (void) sd_event_source_set_description(s, signal_to_string(sig));
1356 _public_ int sd_event_add_child(
1358 sd_event_source **ret,
1361 sd_event_child_handler_t callback,
1367 assert_return(e, -EINVAL);
1368 assert_return(e = event_resolve(e), -ENOPKG);
1369 assert_return(pid > 1, -EINVAL);
1370 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1371 assert_return(options != 0, -EINVAL);
1372 assert_return(callback, -EINVAL);
1373 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1374 assert_return(!event_pid_changed(e), -ECHILD);
1376 r = hashmap_ensure_allocated(&e->child_sources, NULL);
1380 if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
1383 s = source_new(e, !ret, SOURCE_CHILD);
1388 s->child.options = options;
1389 s->child.callback = callback;
1390 s->userdata = userdata;
1391 s->enabled = SD_EVENT_ONESHOT;
1393 r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
1399 e->n_enabled_child_sources++;
1401 r = event_make_signal_data(e, SIGCHLD, NULL);
1403 e->n_enabled_child_sources--;
1408 e->need_process_child = true;
1416 _public_ int sd_event_add_defer(
1418 sd_event_source **ret,
1419 sd_event_handler_t callback,
1425 assert_return(e, -EINVAL);
1426 assert_return(e = event_resolve(e), -ENOPKG);
1427 assert_return(callback, -EINVAL);
1428 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1429 assert_return(!event_pid_changed(e), -ECHILD);
1431 s = source_new(e, !ret, SOURCE_DEFER);
1435 s->defer.callback = callback;
1436 s->userdata = userdata;
1437 s->enabled = SD_EVENT_ONESHOT;
1439 r = source_set_pending(s, true);
1451 _public_ int sd_event_add_post(
1453 sd_event_source **ret,
1454 sd_event_handler_t callback,
1460 assert_return(e, -EINVAL);
1461 assert_return(e = event_resolve(e), -ENOPKG);
1462 assert_return(callback, -EINVAL);
1463 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1464 assert_return(!event_pid_changed(e), -ECHILD);
1466 r = set_ensure_allocated(&e->post_sources, NULL);
1470 s = source_new(e, !ret, SOURCE_POST);
1474 s->post.callback = callback;
1475 s->userdata = userdata;
1476 s->enabled = SD_EVENT_ON;
1478 r = set_put(e->post_sources, s);
1490 _public_ int sd_event_add_exit(
1492 sd_event_source **ret,
1493 sd_event_handler_t callback,
1499 assert_return(e, -EINVAL);
1500 assert_return(e = event_resolve(e), -ENOPKG);
1501 assert_return(callback, -EINVAL);
1502 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1503 assert_return(!event_pid_changed(e), -ECHILD);
1505 r = prioq_ensure_allocated(&e->exit, exit_prioq_compare);
1509 s = source_new(e, !ret, SOURCE_EXIT);
1513 s->exit.callback = callback;
1514 s->userdata = userdata;
1515 s->exit.prioq_index = PRIOQ_IDX_NULL;
1516 s->enabled = SD_EVENT_ONESHOT;
1518 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1530 static void event_free_inotify_data(sd_event *e, struct inotify_data *d) {
1536 assert(hashmap_isempty(d->inodes));
1537 assert(hashmap_isempty(d->wd));
1539 if (d->buffer_filled > 0)
1540 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
1542 hashmap_free(d->inodes);
1543 hashmap_free(d->wd);
1545 assert_se(hashmap_remove(e->inotify_data, &d->priority) == d);
1548 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, d->fd, NULL) < 0)
1549 log_debug_errno(errno, "Failed to remove inotify fd from epoll, ignoring: %m");
1556 static int event_make_inotify_data(
1559 struct inotify_data **ret) {
1561 _cleanup_close_ int fd = -1;
1562 struct inotify_data *d;
1563 struct epoll_event ev;
1568 d = hashmap_get(e->inotify_data, &priority);
1575 fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
1579 fd = fd_move_above_stdio(fd);
1581 r = hashmap_ensure_allocated(&e->inotify_data, &uint64_hash_ops);
1585 d = new(struct inotify_data, 1);
1589 *d = (struct inotify_data) {
1590 .wakeup = WAKEUP_INOTIFY_DATA,
1592 .priority = priority,
1595 r = hashmap_put(e->inotify_data, &d->priority, d);
1597 d->fd = safe_close(d->fd);
1602 ev = (struct epoll_event) {
1607 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
1609 d->fd = safe_close(d->fd); /* let's close this ourselves, as event_free_inotify_data() would otherwise
1610 * remove the fd from the epoll first, which we don't want as we couldn't
1611 * add it in the first place. */
1612 event_free_inotify_data(e, d);
1622 static int inode_data_compare(const void *a, const void *b) {
1623 const struct inode_data *x = a, *y = b;
1628 if (x->dev < y->dev)
1630 if (x->dev > y->dev)
1633 if (x->ino < y->ino)
1635 if (x->ino > y->ino)
1641 static void inode_data_hash_func(const void *p, struct siphash *state) {
1642 const struct inode_data *d = p;
1646 siphash24_compress(&d->dev, sizeof(d->dev), state);
1647 siphash24_compress(&d->ino, sizeof(d->ino), state);
1650 const struct hash_ops inode_data_hash_ops = {
1651 .hash = inode_data_hash_func,
1652 .compare = inode_data_compare
1655 static void event_free_inode_data(
1657 struct inode_data *d) {
1664 assert(!d->event_sources);
1667 LIST_REMOVE(to_close, e->inode_data_to_close, d);
1671 if (d->inotify_data) {
1674 if (d->inotify_data->fd >= 0) {
1675 /* So here's a problem. At the time this runs the watch descriptor might already be
1676 * invalidated, because an IN_IGNORED event might be queued right the moment we enter
1677 * the syscall. Hence, whenever we get EINVAL, ignore it entirely, since it's a very
1678 * likely case to happen. */
1680 if (inotify_rm_watch(d->inotify_data->fd, d->wd) < 0 && errno != EINVAL)
1681 log_debug_errno(errno, "Failed to remove watch descriptor %i from inotify, ignoring: %m", d->wd);
1684 assert_se(hashmap_remove(d->inotify_data->wd, INT_TO_PTR(d->wd)) == d);
1687 assert_se(hashmap_remove(d->inotify_data->inodes, d) == d);
1693 static void event_gc_inode_data(
1695 struct inode_data *d) {
1697 struct inotify_data *inotify_data;
1704 if (d->event_sources)
1707 inotify_data = d->inotify_data;
1708 event_free_inode_data(e, d);
1710 if (inotify_data && hashmap_isempty(inotify_data->inodes))
1711 event_free_inotify_data(e, inotify_data);
1714 static int event_make_inode_data(
1716 struct inotify_data *inotify_data,
1719 struct inode_data **ret) {
1721 struct inode_data *d, key;
1725 assert(inotify_data);
1727 key = (struct inode_data) {
1732 d = hashmap_get(inotify_data->inodes, &key);
1740 r = hashmap_ensure_allocated(&inotify_data->inodes, &inode_data_hash_ops);
1744 d = new(struct inode_data, 1);
1748 *d = (struct inode_data) {
1753 .inotify_data = inotify_data,
1756 r = hashmap_put(inotify_data->inodes, d, d);
1768 static uint32_t inode_data_determine_mask(struct inode_data *d) {
1769 bool excl_unlink = true;
1770 uint32_t combined = 0;
1775 /* Combines the watch masks of all event sources watching this inode. We generally just OR them together, but
1776 * the IN_EXCL_UNLINK flag is ANDed instead.
1778 * Note that we add all sources to the mask here, regardless whether enabled, disabled or oneshot. That's
1779 * because we cannot change the mask anymore after the event source was created once, since the kernel has no
1780 * API for that. Hence we need to subscribe to the maximum mask we ever might be interested in, and supress
1781 * events we don't care for client-side. */
1783 LIST_FOREACH(inotify.by_inode_data, s, d->event_sources) {
1785 if ((s->inotify.mask & IN_EXCL_UNLINK) == 0)
1786 excl_unlink = false;
1788 combined |= s->inotify.mask;
1791 return (combined & ~(IN_ONESHOT|IN_DONT_FOLLOW|IN_ONLYDIR|IN_EXCL_UNLINK)) | (excl_unlink ? IN_EXCL_UNLINK : 0);
1794 static int inode_data_realize_watch(sd_event *e, struct inode_data *d) {
1795 uint32_t combined_mask;
1801 combined_mask = inode_data_determine_mask(d);
1803 if (d->wd >= 0 && combined_mask == d->combined_mask)
1806 r = hashmap_ensure_allocated(&d->inotify_data->wd, NULL);
1810 wd = inotify_add_watch_fd(d->inotify_data->fd, d->fd, combined_mask);
1815 r = hashmap_put(d->inotify_data->wd, INT_TO_PTR(wd), d);
1817 (void) inotify_rm_watch(d->inotify_data->fd, wd);
1823 } else if (d->wd != wd) {
1825 log_debug("Weird, the watch descriptor we already knew for this inode changed?");
1826 (void) inotify_rm_watch(d->fd, wd);
1830 d->combined_mask = combined_mask;
1834 _public_ int sd_event_add_inotify(
1836 sd_event_source **ret,
1839 sd_event_inotify_handler_t callback,
1842 bool rm_inotify = false, rm_inode = false;
1843 struct inotify_data *inotify_data = NULL;
1844 struct inode_data *inode_data = NULL;
1845 _cleanup_close_ int fd = -1;
1850 assert_return(e, -EINVAL);
1851 assert_return(e = event_resolve(e), -ENOPKG);
1852 assert_return(path, -EINVAL);
1853 assert_return(callback, -EINVAL);
1854 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1855 assert_return(!event_pid_changed(e), -ECHILD);
1857 /* Refuse IN_MASK_ADD since we coalesce watches on the same inode, and hence really don't want to merge
1858 * masks. Or in other words, this whole code exists only to manage IN_MASK_ADD type operations for you, hence
1859 * the user can't use them for us. */
1860 if (mask & IN_MASK_ADD)
1863 fd = open(path, O_PATH|O_CLOEXEC|
1864 (mask & IN_ONLYDIR ? O_DIRECTORY : 0)|
1865 (mask & IN_DONT_FOLLOW ? O_NOFOLLOW : 0));
1869 if (fstat(fd, &st) < 0)
1872 s = source_new(e, !ret, SOURCE_INOTIFY);
1876 s->enabled = mask & IN_ONESHOT ? SD_EVENT_ONESHOT : SD_EVENT_ON;
1877 s->inotify.mask = mask;
1878 s->inotify.callback = callback;
1879 s->userdata = userdata;
1881 /* Allocate an inotify object for this priority, and an inode object within it */
1882 r = event_make_inotify_data(e, SD_EVENT_PRIORITY_NORMAL, &inotify_data);
1887 r = event_make_inode_data(e, inotify_data, st.st_dev, st.st_ino, &inode_data);
1892 /* Keep the O_PATH fd around until the first iteration of the loop, so that we can still change the priority of
1893 * the event source, until then, for which we need the original inode. */
1894 if (inode_data->fd < 0) {
1895 inode_data->fd = TAKE_FD(fd);
1896 LIST_PREPEND(to_close, e->inode_data_to_close, inode_data);
1899 /* Link our event source to the inode data object */
1900 LIST_PREPEND(inotify.by_inode_data, inode_data->event_sources, s);
1901 s->inotify.inode_data = inode_data;
1903 rm_inode = rm_inotify = false;
1905 /* Actually realize the watch now */
1906 r = inode_data_realize_watch(e, inode_data);
1910 (void) sd_event_source_set_description(s, path);
1921 event_free_inode_data(e, inode_data);
1924 event_free_inotify_data(e, inotify_data);
1929 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1934 assert(s->n_ref >= 1);
1940 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1945 assert(s->n_ref >= 1);
1948 if (s->n_ref <= 0) {
1949 /* Here's a special hack: when we are called from a
1950 * dispatch handler we won't free the event source
1951 * immediately, but we will detach the fd from the
1952 * epoll. This way it is safe for the caller to unref
1953 * the event source and immediately close the fd, but
1954 * we still retain a valid event source object after
1957 if (s->dispatching) {
1958 if (s->type == SOURCE_IO)
1959 source_io_unregister(s);
1961 source_disconnect(s);
1969 _public_ int sd_event_source_set_description(sd_event_source *s, const char *description) {
1970 assert_return(s, -EINVAL);
1971 assert_return(!event_pid_changed(s->event), -ECHILD);
1973 return free_and_strdup(&s->description, description);
1976 _public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
1977 assert_return(s, -EINVAL);
1978 assert_return(description, -EINVAL);
1979 assert_return(s->description, -ENXIO);
1980 assert_return(!event_pid_changed(s->event), -ECHILD);
1982 *description = s->description;
1986 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1987 assert_return(s, NULL);
1992 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1993 assert_return(s, -EINVAL);
1994 assert_return(s->type != SOURCE_EXIT, -EDOM);
1995 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1996 assert_return(!event_pid_changed(s->event), -ECHILD);
2001 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
2002 assert_return(s, -EINVAL);
2003 assert_return(s->type == SOURCE_IO, -EDOM);
2004 assert_return(!event_pid_changed(s->event), -ECHILD);
2009 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
2012 assert_return(s, -EINVAL);
2013 assert_return(fd >= 0, -EBADF);
2014 assert_return(s->type == SOURCE_IO, -EDOM);
2015 assert_return(!event_pid_changed(s->event), -ECHILD);
2020 if (s->enabled == SD_EVENT_OFF) {
2022 s->io.registered = false;
2026 saved_fd = s->io.fd;
2027 assert(s->io.registered);
2030 s->io.registered = false;
2032 r = source_io_register(s, s->enabled, s->io.events);
2034 s->io.fd = saved_fd;
2035 s->io.registered = true;
2039 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
2045 _public_ int sd_event_source_get_io_fd_own(sd_event_source *s) {
2046 assert_return(s, -EINVAL);
2047 assert_return(s->type == SOURCE_IO, -EDOM);
2052 _public_ int sd_event_source_set_io_fd_own(sd_event_source *s, int own) {
2053 assert_return(s, -EINVAL);
2054 assert_return(s->type == SOURCE_IO, -EDOM);
2060 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
2061 assert_return(s, -EINVAL);
2062 assert_return(events, -EINVAL);
2063 assert_return(s->type == SOURCE_IO, -EDOM);
2064 assert_return(!event_pid_changed(s->event), -ECHILD);
2066 *events = s->io.events;
2070 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
2073 assert_return(s, -EINVAL);
2074 assert_return(s->type == SOURCE_IO, -EDOM);
2075 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
2076 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2077 assert_return(!event_pid_changed(s->event), -ECHILD);
2079 /* edge-triggered updates are never skipped, so we can reset edges */
2080 if (s->io.events == events && !(events & EPOLLET))
2083 r = source_set_pending(s, false);
2087 if (s->enabled != SD_EVENT_OFF) {
2088 r = source_io_register(s, s->enabled, events);
2093 s->io.events = events;
2098 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
2099 assert_return(s, -EINVAL);
2100 assert_return(revents, -EINVAL);
2101 assert_return(s->type == SOURCE_IO, -EDOM);
2102 assert_return(s->pending, -ENODATA);
2103 assert_return(!event_pid_changed(s->event), -ECHILD);
2105 *revents = s->io.revents;
2109 _public_ int sd_event_source_get_signal(sd_event_source *s) {
2110 assert_return(s, -EINVAL);
2111 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
2112 assert_return(!event_pid_changed(s->event), -ECHILD);
2114 return s->signal.sig;
2117 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
2118 assert_return(s, -EINVAL);
2119 assert_return(!event_pid_changed(s->event), -ECHILD);
2121 *priority = s->priority;
2125 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
2126 bool rm_inotify = false, rm_inode = false;
2127 struct inotify_data *new_inotify_data = NULL;
2128 struct inode_data *new_inode_data = NULL;
2131 assert_return(s, -EINVAL);
2132 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2133 assert_return(!event_pid_changed(s->event), -ECHILD);
2135 if (s->priority == priority)
2138 if (s->type == SOURCE_INOTIFY) {
2139 struct inode_data *old_inode_data;
2141 assert(s->inotify.inode_data);
2142 old_inode_data = s->inotify.inode_data;
2144 /* We need the original fd to change the priority. If we don't have it we can't change the priority,
2145 * anymore. Note that we close any fds when entering the next event loop iteration, i.e. for inotify
2146 * events we allow priority changes only until the first following iteration. */
2147 if (old_inode_data->fd < 0)
2150 r = event_make_inotify_data(s->event, priority, &new_inotify_data);
2155 r = event_make_inode_data(s->event, new_inotify_data, old_inode_data->dev, old_inode_data->ino, &new_inode_data);
2160 if (new_inode_data->fd < 0) {
2161 /* Duplicate the fd for the new inode object if we don't have any yet */
2162 new_inode_data->fd = fcntl(old_inode_data->fd, F_DUPFD_CLOEXEC, 3);
2163 if (new_inode_data->fd < 0) {
2168 LIST_PREPEND(to_close, s->event->inode_data_to_close, new_inode_data);
2171 /* Move the event source to the new inode data structure */
2172 LIST_REMOVE(inotify.by_inode_data, old_inode_data->event_sources, s);
2173 LIST_PREPEND(inotify.by_inode_data, new_inode_data->event_sources, s);
2174 s->inotify.inode_data = new_inode_data;
2176 /* Now create the new watch */
2177 r = inode_data_realize_watch(s->event, new_inode_data);
2180 LIST_REMOVE(inotify.by_inode_data, new_inode_data->event_sources, s);
2181 LIST_PREPEND(inotify.by_inode_data, old_inode_data->event_sources, s);
2182 s->inotify.inode_data = old_inode_data;
2186 s->priority = priority;
2188 event_gc_inode_data(s->event, old_inode_data);
2190 } else if (s->type == SOURCE_SIGNAL && s->enabled != SD_EVENT_OFF) {
2191 struct signal_data *old, *d;
2193 /* Move us from the signalfd belonging to the old
2194 * priority to the signalfd of the new priority */
2196 assert_se(old = hashmap_get(s->event->signal_data, &s->priority));
2198 s->priority = priority;
2200 r = event_make_signal_data(s->event, s->signal.sig, &d);
2202 s->priority = old->priority;
2206 event_unmask_signal_data(s->event, old, s->signal.sig);
2208 s->priority = priority;
2211 prioq_reshuffle(s->event->pending, s, &s->pending_index);
2214 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
2216 if (s->type == SOURCE_EXIT)
2217 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2223 event_free_inode_data(s->event, new_inode_data);
2226 event_free_inotify_data(s->event, new_inotify_data);
2231 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
2232 assert_return(s, -EINVAL);
2233 assert_return(m, -EINVAL);
2234 assert_return(!event_pid_changed(s->event), -ECHILD);
2240 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
2243 assert_return(s, -EINVAL);
2244 assert_return(IN_SET(m, SD_EVENT_OFF, SD_EVENT_ON, SD_EVENT_ONESHOT), -EINVAL);
2245 assert_return(!event_pid_changed(s->event), -ECHILD);
2247 /* If we are dead anyway, we are fine with turning off
2248 * sources, but everything else needs to fail. */
2249 if (s->event->state == SD_EVENT_FINISHED)
2250 return m == SD_EVENT_OFF ? 0 : -ESTALE;
2252 if (s->enabled == m)
2255 if (m == SD_EVENT_OFF) {
2257 /* Unset the pending flag when this event source is disabled */
2258 if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
2259 r = source_set_pending(s, false);
2267 source_io_unregister(s);
2271 case SOURCE_TIME_REALTIME:
2272 case SOURCE_TIME_BOOTTIME:
2273 case SOURCE_TIME_MONOTONIC:
2274 case SOURCE_TIME_REALTIME_ALARM:
2275 case SOURCE_TIME_BOOTTIME_ALARM: {
2276 struct clock_data *d;
2279 d = event_get_clock_data(s->event, s->type);
2282 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
2283 prioq_reshuffle(d->latest, s, &s->time.latest_index);
2284 d->needs_rearm = true;
2291 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2297 assert(s->event->n_enabled_child_sources > 0);
2298 s->event->n_enabled_child_sources--;
2300 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2305 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2310 case SOURCE_INOTIFY:
2315 assert_not_reached("Wut? I shouldn't exist.");
2320 /* Unset the pending flag when this event source is enabled */
2321 if (s->enabled == SD_EVENT_OFF && !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
2322 r = source_set_pending(s, false);
2330 r = source_io_register(s, m, s->io.events);
2337 case SOURCE_TIME_REALTIME:
2338 case SOURCE_TIME_BOOTTIME:
2339 case SOURCE_TIME_MONOTONIC:
2340 case SOURCE_TIME_REALTIME_ALARM:
2341 case SOURCE_TIME_BOOTTIME_ALARM: {
2342 struct clock_data *d;
2345 d = event_get_clock_data(s->event, s->type);
2348 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
2349 prioq_reshuffle(d->latest, s, &s->time.latest_index);
2350 d->needs_rearm = true;
2358 r = event_make_signal_data(s->event, s->signal.sig, NULL);
2360 s->enabled = SD_EVENT_OFF;
2361 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
2369 if (s->enabled == SD_EVENT_OFF)
2370 s->event->n_enabled_child_sources++;
2374 r = event_make_signal_data(s->event, SIGCHLD, NULL);
2376 s->enabled = SD_EVENT_OFF;
2377 s->event->n_enabled_child_sources--;
2378 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
2386 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
2391 case SOURCE_INOTIFY:
2396 assert_not_reached("Wut? I shouldn't exist.");
2401 prioq_reshuffle(s->event->pending, s, &s->pending_index);
2404 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
2409 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
2410 assert_return(s, -EINVAL);
2411 assert_return(usec, -EINVAL);
2412 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2413 assert_return(!event_pid_changed(s->event), -ECHILD);
2415 *usec = s->time.next;
2419 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
2420 struct clock_data *d;
2423 assert_return(s, -EINVAL);
2424 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2425 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2426 assert_return(!event_pid_changed(s->event), -ECHILD);
2428 r = source_set_pending(s, false);
2432 s->time.next = usec;
2434 d = event_get_clock_data(s->event, s->type);
2437 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
2438 prioq_reshuffle(d->latest, s, &s->time.latest_index);
2439 d->needs_rearm = true;
2444 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
2445 assert_return(s, -EINVAL);
2446 assert_return(usec, -EINVAL);
2447 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2448 assert_return(!event_pid_changed(s->event), -ECHILD);
2450 *usec = s->time.accuracy;
2454 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
2455 struct clock_data *d;
2458 assert_return(s, -EINVAL);
2459 assert_return(usec != (uint64_t) -1, -EINVAL);
2460 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2461 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2462 assert_return(!event_pid_changed(s->event), -ECHILD);
2464 r = source_set_pending(s, false);
2469 usec = DEFAULT_ACCURACY_USEC;
2471 s->time.accuracy = usec;
2473 d = event_get_clock_data(s->event, s->type);
2476 prioq_reshuffle(d->latest, s, &s->time.latest_index);
2477 d->needs_rearm = true;
2482 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
2483 assert_return(s, -EINVAL);
2484 assert_return(clock, -EINVAL);
2485 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
2486 assert_return(!event_pid_changed(s->event), -ECHILD);
2488 *clock = event_source_type_to_clock(s->type);
2492 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
2493 assert_return(s, -EINVAL);
2494 assert_return(pid, -EINVAL);
2495 assert_return(s->type == SOURCE_CHILD, -EDOM);
2496 assert_return(!event_pid_changed(s->event), -ECHILD);
2498 *pid = s->child.pid;
2502 _public_ int sd_event_source_get_inotify_mask(sd_event_source *s, uint32_t *mask) {
2503 assert_return(s, -EINVAL);
2504 assert_return(mask, -EINVAL);
2505 assert_return(s->type == SOURCE_INOTIFY, -EDOM);
2506 assert_return(!event_pid_changed(s->event), -ECHILD);
2508 *mask = s->inotify.mask;
2512 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
2515 assert_return(s, -EINVAL);
2516 assert_return(s->type != SOURCE_EXIT, -EDOM);
2517 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
2518 assert_return(!event_pid_changed(s->event), -ECHILD);
2520 if (s->prepare == callback)
2523 if (callback && s->prepare) {
2524 s->prepare = callback;
2528 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
2532 s->prepare = callback;
2535 r = prioq_put(s->event->prepare, s, &s->prepare_index);
2539 prioq_remove(s->event->prepare, s, &s->prepare_index);
2544 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
2545 assert_return(s, NULL);
2550 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
2553 assert_return(s, NULL);
2556 s->userdata = userdata;
2561 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
2568 if (a >= USEC_INFINITY)
2569 return USEC_INFINITY;
2574 initialize_perturb(e);
2577 Find a good time to wake up again between times a and b. We
2578 have two goals here:
2580 a) We want to wake up as seldom as possible, hence prefer
2581 later times over earlier times.
2583 b) But if we have to wake up, then let's make sure to
2584 dispatch as much as possible on the entire system.
2586 We implement this by waking up everywhere at the same time
2587 within any given minute if we can, synchronised via the
2588 perturbation value determined from the boot ID. If we can't,
2589 then we try to find the same spot in every 10s, then 1s and
2590 then 250ms step. Otherwise, we pick the last possible time
2594 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
2596 if (_unlikely_(c < USEC_PER_MINUTE))
2599 c -= USEC_PER_MINUTE;
2605 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
2607 if (_unlikely_(c < USEC_PER_SEC*10))
2610 c -= USEC_PER_SEC*10;
2616 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
2618 if (_unlikely_(c < USEC_PER_SEC))
2627 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
2629 if (_unlikely_(c < USEC_PER_MSEC*250))
2632 c -= USEC_PER_MSEC*250;
2641 static int event_arm_timer(
2643 struct clock_data *d) {
2645 struct itimerspec its = {};
2646 sd_event_source *a, *b;
2653 if (!d->needs_rearm)
2656 d->needs_rearm = false;
2658 a = prioq_peek(d->earliest);
2659 if (!a || a->enabled == SD_EVENT_OFF || a->time.next == USEC_INFINITY) {
2664 if (d->next == USEC_INFINITY)
2668 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
2672 d->next = USEC_INFINITY;
2676 b = prioq_peek(d->latest);
2677 assert_se(b && b->enabled != SD_EVENT_OFF);
2679 t = sleep_between(e, a->time.next, time_event_source_latest(b));
2683 assert_se(d->fd >= 0);
2686 /* We don' want to disarm here, just mean some time looooong ago. */
2687 its.it_value.tv_sec = 0;
2688 its.it_value.tv_nsec = 1;
2690 timespec_store(&its.it_value, t);
2692 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
2700 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
2703 assert(s->type == SOURCE_IO);
2705 /* If the event source was already pending, we just OR in the
2706 * new revents, otherwise we reset the value. The ORing is
2707 * necessary to handle EPOLLONESHOT events properly where
2708 * readability might happen independently of writability, and
2709 * we need to keep track of both */
2712 s->io.revents |= revents;
2714 s->io.revents = revents;
2716 return source_set_pending(s, true);
2719 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
2726 assert_return(events == EPOLLIN, -EIO);
2728 ss = read(fd, &x, sizeof(x));
2730 if (IN_SET(errno, EAGAIN, EINTR))
2736 if (_unlikely_(ss != sizeof(x)))
2740 *next = USEC_INFINITY;
2745 static int process_timer(
2748 struct clock_data *d) {
2757 s = prioq_peek(d->earliest);
2760 s->enabled == SD_EVENT_OFF ||
2764 r = source_set_pending(s, true);
2768 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
2769 prioq_reshuffle(d->latest, s, &s->time.latest_index);
2770 d->needs_rearm = true;
2776 static int process_child(sd_event *e) {
2783 e->need_process_child = false;
2786 So, this is ugly. We iteratively invoke waitid() with P_PID
2787 + WNOHANG for each PID we wait for, instead of using
2788 P_ALL. This is because we only want to get child
2789 information of very specific child processes, and not all
2790 of them. We might not have processed the SIGCHLD even of a
2791 previous invocation and we don't want to maintain a
2792 unbounded *per-child* event queue, hence we really don't
2793 want anything flushed out of the kernel's queue that we
2794 don't care about. Since this is O(n) this means that if you
2795 have a lot of processes you probably want to handle SIGCHLD
2798 We do not reap the children here (by using WNOWAIT), this
2799 is only done after the event source is dispatched so that
2800 the callback still sees the process as a zombie.
2803 HASHMAP_FOREACH(s, e->child_sources, i) {
2804 assert(s->type == SOURCE_CHILD);
2809 if (s->enabled == SD_EVENT_OFF)
2812 zero(s->child.siginfo);
2813 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
2814 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
2818 if (s->child.siginfo.si_pid != 0) {
2819 bool zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
2821 if (!zombie && (s->child.options & WEXITED)) {
2822 /* If the child isn't dead then let's
2823 * immediately remove the state change
2824 * from the queue, since there's no
2825 * benefit in leaving it queued */
2827 assert(s->child.options & (WSTOPPED|WCONTINUED));
2828 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
2831 r = source_set_pending(s, true);
2840 static int process_signal(sd_event *e, struct signal_data *d, uint32_t events) {
2841 bool read_one = false;
2846 assert_return(events == EPOLLIN, -EIO);
2848 /* If there's a signal queued on this priority and SIGCHLD is
2849 on this priority too, then make sure to recheck the
2850 children we watch. This is because we only ever dequeue
2851 the first signal per priority, and if we dequeue one, and
2852 SIGCHLD might be enqueued later we wouldn't know, but we
2853 might have higher priority children we care about hence we
2854 need to check that explicitly. */
2856 if (sigismember(&d->sigset, SIGCHLD))
2857 e->need_process_child = true;
2859 /* If there's already an event source pending for this
2860 * priority we don't read another */
2865 struct signalfd_siginfo si;
2867 sd_event_source *s = NULL;
2869 n = read(d->fd, &si, sizeof(si));
2871 if (IN_SET(errno, EAGAIN, EINTR))
2877 if (_unlikely_(n != sizeof(si)))
2880 assert(SIGNAL_VALID(si.ssi_signo));
2884 if (e->signal_sources)
2885 s = e->signal_sources[si.ssi_signo];
2891 s->signal.siginfo = si;
2894 r = source_set_pending(s, true);
2902 static int event_inotify_data_read(sd_event *e, struct inotify_data *d, uint32_t revents) {
2908 assert_return(revents == EPOLLIN, -EIO);
2910 /* If there's already an event source pending for this priority, don't read another */
2911 if (d->n_pending > 0)
2914 /* Is the read buffer non-empty? If so, let's not read more */
2915 if (d->buffer_filled > 0)
2918 n = read(d->fd, &d->buffer, sizeof(d->buffer));
2920 if (IN_SET(errno, EAGAIN, EINTR))
2927 d->buffer_filled = (size_t) n;
2928 LIST_PREPEND(buffered, e->inotify_data_buffered, d);
2933 static void event_inotify_data_drop(sd_event *e, struct inotify_data *d, size_t sz) {
2936 assert(sz <= d->buffer_filled);
2941 /* Move the rest to the buffer to the front, in order to get things properly aligned again */
2942 memmove(d->buffer.raw, d->buffer.raw + sz, d->buffer_filled - sz);
2943 d->buffer_filled -= sz;
2945 if (d->buffer_filled == 0)
2946 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
2949 static int event_inotify_data_process(sd_event *e, struct inotify_data *d) {
2955 /* If there's already an event source pending for this priority, don't read another */
2956 if (d->n_pending > 0)
2959 while (d->buffer_filled > 0) {
2962 /* Let's validate that the event structures are complete */
2963 if (d->buffer_filled < offsetof(struct inotify_event, name))
2966 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
2967 if (d->buffer_filled < sz)
2970 if (d->buffer.ev.mask & IN_Q_OVERFLOW) {
2971 struct inode_data *inode_data;
2974 /* The queue overran, let's pass this event to all event sources connected to this inotify
2977 HASHMAP_FOREACH(inode_data, d->inodes, i) {
2980 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
2982 if (s->enabled == SD_EVENT_OFF)
2985 r = source_set_pending(s, true);
2991 struct inode_data *inode_data;
2994 /* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
2995 * our watch descriptor table. */
2996 if (d->buffer.ev.mask & IN_IGNORED) {
2998 inode_data = hashmap_remove(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3000 event_inotify_data_drop(e, d, sz);
3004 /* The watch descriptor was removed by the kernel, let's drop it here too */
3005 inode_data->wd = -1;
3007 inode_data = hashmap_get(d->wd, INT_TO_PTR(d->buffer.ev.wd));
3009 event_inotify_data_drop(e, d, sz);
3014 /* Trigger all event sources that are interested in these events. Also trigger all event
3015 * sources if IN_IGNORED or IN_UNMOUNT is set. */
3016 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
3018 if (s->enabled == SD_EVENT_OFF)
3021 if ((d->buffer.ev.mask & (IN_IGNORED|IN_UNMOUNT)) == 0 &&
3022 (s->inotify.mask & d->buffer.ev.mask & IN_ALL_EVENTS) == 0)
3025 r = source_set_pending(s, true);
3031 /* Something pending now? If so, let's finish, otherwise let's read more. */
3032 if (d->n_pending > 0)
3039 static int process_inotify(sd_event *e) {
3040 struct inotify_data *d;
3045 LIST_FOREACH(buffered, d, e->inotify_data_buffered) {
3046 r = event_inotify_data_process(e, d);
3056 static int source_dispatch(sd_event_source *s) {
3057 EventSourceType saved_type;
3061 assert(s->pending || s->type == SOURCE_EXIT);
3063 /* Save the event source type, here, so that we still know it after the event callback which might invalidate
3065 saved_type = s->type;
3067 if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
3068 r = source_set_pending(s, false);
3073 if (s->type != SOURCE_POST) {
3077 /* If we execute a non-post source, let's mark all
3078 * post sources as pending */
3080 SET_FOREACH(z, s->event->post_sources, i) {
3081 if (z->enabled == SD_EVENT_OFF)
3084 r = source_set_pending(z, true);
3090 if (s->enabled == SD_EVENT_ONESHOT) {
3091 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
3096 s->dispatching = true;
3101 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
3104 case SOURCE_TIME_REALTIME:
3105 case SOURCE_TIME_BOOTTIME:
3106 case SOURCE_TIME_MONOTONIC:
3107 case SOURCE_TIME_REALTIME_ALARM:
3108 case SOURCE_TIME_BOOTTIME_ALARM:
3109 r = s->time.callback(s, s->time.next, s->userdata);
3113 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
3116 case SOURCE_CHILD: {
3119 zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
3121 r = s->child.callback(s, &s->child.siginfo, s->userdata);
3123 /* Now, reap the PID for good. */
3125 (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
3131 r = s->defer.callback(s, s->userdata);
3135 r = s->post.callback(s, s->userdata);
3139 r = s->exit.callback(s, s->userdata);
3142 case SOURCE_INOTIFY: {
3143 struct sd_event *e = s->event;
3144 struct inotify_data *d;
3147 assert(s->inotify.inode_data);
3148 assert_se(d = s->inotify.inode_data->inotify_data);
3150 assert(d->buffer_filled >= offsetof(struct inotify_event, name));
3151 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
3152 assert(d->buffer_filled >= sz);
3154 r = s->inotify.callback(s, &d->buffer.ev, s->userdata);
3156 /* When no event is pending anymore on this inotify object, then let's drop the event from the
3158 if (d->n_pending == 0)
3159 event_inotify_data_drop(e, d, sz);
3164 case SOURCE_WATCHDOG:
3165 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
3166 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
3167 assert_not_reached("Wut? I shouldn't exist.");
3170 s->dispatching = false;
3173 log_debug_errno(r, "Event source %s (type %s) returned error, disabling: %m",
3174 strna(s->description), event_source_type_to_string(saved_type));
3179 sd_event_source_set_enabled(s, SD_EVENT_OFF);
3184 static int event_prepare(sd_event *e) {
3192 s = prioq_peek(e->prepare);
3193 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
3196 s->prepare_iteration = e->iteration;
3197 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
3203 s->dispatching = true;
3204 r = s->prepare(s, s->userdata);
3205 s->dispatching = false;
3208 log_debug_errno(r, "Prepare callback of event source %s (type %s) returned error, disabling: %m",
3209 strna(s->description), event_source_type_to_string(s->type));
3214 sd_event_source_set_enabled(s, SD_EVENT_OFF);
3220 static int dispatch_exit(sd_event *e) {
3222 _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
3227 p = prioq_peek(e->exit);
3228 if (!p || p->enabled == SD_EVENT_OFF) {
3229 e->state = SD_EVENT_FINISHED;
3233 ref = sd_event_ref(e);
3235 e->state = SD_EVENT_EXITING;
3236 r = source_dispatch(p);
3237 e->state = SD_EVENT_INITIAL;
3241 static sd_event_source* event_next_pending(sd_event *e) {
3246 p = prioq_peek(e->pending);
3250 if (p->enabled == SD_EVENT_OFF)
3256 static int arm_watchdog(sd_event *e) {
3257 struct itimerspec its = {};
3262 assert(e->watchdog_fd >= 0);
3264 t = sleep_between(e,
3265 e->watchdog_last + (e->watchdog_period / 2),
3266 e->watchdog_last + (e->watchdog_period * 3 / 4));
3268 timespec_store(&its.it_value, t);
3270 /* Make sure we never set the watchdog to 0, which tells the
3271 * kernel to disable it. */
3272 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
3273 its.it_value.tv_nsec = 1;
3275 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
3282 static int process_watchdog(sd_event *e) {
3288 /* Don't notify watchdog too often */
3289 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
3292 sd_notify(false, "WATCHDOG=1");
3293 e->watchdog_last = e->timestamp.monotonic;
3295 return arm_watchdog(e);
3298 static void event_close_inode_data_fds(sd_event *e) {
3299 struct inode_data *d;
3303 /* Close the fds pointing to the inodes to watch now. We need to close them as they might otherwise pin
3304 * filesystems. But we can't close them right-away as we need them as long as the user still wants to make
3305 * adjustments to the even source, such as changing the priority (which requires us to remove and readd a watch
3306 * for the inode). Hence, let's close them when entering the first iteration after they were added, as a
3309 while ((d = e->inode_data_to_close)) {
3311 d->fd = safe_close(d->fd);
3313 LIST_REMOVE(to_close, e->inode_data_to_close, d);
3317 _public_ int sd_event_prepare(sd_event *e) {
3320 assert_return(e, -EINVAL);
3321 assert_return(e = event_resolve(e), -ENOPKG);
3322 assert_return(!event_pid_changed(e), -ECHILD);
3323 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3324 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
3326 if (e->exit_requested)
3331 e->state = SD_EVENT_PREPARING;
3332 r = event_prepare(e);
3333 e->state = SD_EVENT_INITIAL;
3337 r = event_arm_timer(e, &e->realtime);
3341 r = event_arm_timer(e, &e->boottime);
3345 r = event_arm_timer(e, &e->monotonic);
3349 r = event_arm_timer(e, &e->realtime_alarm);
3353 r = event_arm_timer(e, &e->boottime_alarm);
3357 event_close_inode_data_fds(e);
3359 if (event_next_pending(e) || e->need_process_child)
3362 e->state = SD_EVENT_ARMED;
3367 e->state = SD_EVENT_ARMED;
3368 r = sd_event_wait(e, 0);
3370 e->state = SD_EVENT_ARMED;
3375 _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
3376 struct epoll_event *ev_queue;
3377 unsigned ev_queue_max;
3380 assert_return(e, -EINVAL);
3381 assert_return(e = event_resolve(e), -ENOPKG);
3382 assert_return(!event_pid_changed(e), -ECHILD);
3383 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3384 assert_return(e->state == SD_EVENT_ARMED, -EBUSY);
3386 if (e->exit_requested) {
3387 e->state = SD_EVENT_PENDING;
3391 ev_queue_max = MAX(e->n_sources, 1u);
3392 ev_queue = newa(struct epoll_event, ev_queue_max);
3394 /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
3395 if (e->inotify_data_buffered)
3398 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
3399 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
3401 if (errno == EINTR) {
3402 e->state = SD_EVENT_PENDING;
3410 triple_timestamp_get(&e->timestamp);
3412 for (i = 0; i < m; i++) {
3414 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
3415 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
3417 WakeupType *t = ev_queue[i].data.ptr;
3421 case WAKEUP_EVENT_SOURCE:
3422 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
3425 case WAKEUP_CLOCK_DATA: {
3426 struct clock_data *d = ev_queue[i].data.ptr;
3427 r = flush_timer(e, d->fd, ev_queue[i].events, &d->next);
3431 case WAKEUP_SIGNAL_DATA:
3432 r = process_signal(e, ev_queue[i].data.ptr, ev_queue[i].events);
3435 case WAKEUP_INOTIFY_DATA:
3436 r = event_inotify_data_read(e, ev_queue[i].data.ptr, ev_queue[i].events);
3440 assert_not_reached("Invalid wake-up pointer");
3447 r = process_watchdog(e);
3451 r = process_timer(e, e->timestamp.realtime, &e->realtime);
3455 r = process_timer(e, e->timestamp.boottime, &e->boottime);
3459 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
3463 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
3467 r = process_timer(e, e->timestamp.boottime, &e->boottime_alarm);
3471 if (e->need_process_child) {
3472 r = process_child(e);
3477 r = process_inotify(e);
3481 if (event_next_pending(e)) {
3482 e->state = SD_EVENT_PENDING;
3490 e->state = SD_EVENT_INITIAL;
3495 _public_ int sd_event_dispatch(sd_event *e) {
3499 assert_return(e, -EINVAL);
3500 assert_return(e = event_resolve(e), -ENOPKG);
3501 assert_return(!event_pid_changed(e), -ECHILD);
3502 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3503 assert_return(e->state == SD_EVENT_PENDING, -EBUSY);
3505 if (e->exit_requested)
3506 return dispatch_exit(e);
3508 p = event_next_pending(e);
3510 _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
3512 ref = sd_event_ref(e);
3513 e->state = SD_EVENT_RUNNING;
3514 r = source_dispatch(p);
3515 e->state = SD_EVENT_INITIAL;
3519 e->state = SD_EVENT_INITIAL;
3524 static void event_log_delays(sd_event *e) {
3525 char b[ELEMENTSOF(e->delays) * DECIMAL_STR_MAX(unsigned) + 1];
3529 for (i = o = 0; i < ELEMENTSOF(e->delays); i++) {
3530 o += snprintf(&b[o], sizeof(b) - o, "%u ", e->delays[i]);
3533 log_debug("Event loop iterations: %.*s", o, b);
3536 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
3539 assert_return(e, -EINVAL);
3540 assert_return(e = event_resolve(e), -ENOPKG);
3541 assert_return(!event_pid_changed(e), -ECHILD);
3542 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3543 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
3545 if (e->profile_delays && e->last_run) {
3549 this_run = now(CLOCK_MONOTONIC);
3551 l = u64log2(this_run - e->last_run);
3552 assert(l < sizeof(e->delays));
3555 if (this_run - e->last_log >= 5*USEC_PER_SEC) {
3556 event_log_delays(e);
3557 e->last_log = this_run;
3561 r = sd_event_prepare(e);
3563 /* There was nothing? Then wait... */
3564 r = sd_event_wait(e, timeout);
3566 if (e->profile_delays)
3567 e->last_run = now(CLOCK_MONOTONIC);
3570 /* There's something now, then let's dispatch it */
3571 r = sd_event_dispatch(e);
3581 _public_ int sd_event_loop(sd_event *e) {
3582 _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
3585 assert_return(e, -EINVAL);
3586 assert_return(e = event_resolve(e), -ENOPKG);
3587 assert_return(!event_pid_changed(e), -ECHILD);
3588 assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
3590 ref = sd_event_ref(e);
3592 while (e->state != SD_EVENT_FINISHED) {
3593 r = sd_event_run(e, (uint64_t) -1);
3598 return e->exit_code;
3601 _public_ int sd_event_get_fd(sd_event *e) {
3603 assert_return(e, -EINVAL);
3604 assert_return(e = event_resolve(e), -ENOPKG);
3605 assert_return(!event_pid_changed(e), -ECHILD);
3610 _public_ int sd_event_get_state(sd_event *e) {
3611 assert_return(e, -EINVAL);
3612 assert_return(e = event_resolve(e), -ENOPKG);
3613 assert_return(!event_pid_changed(e), -ECHILD);
3618 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
3619 assert_return(e, -EINVAL);
3620 assert_return(e = event_resolve(e), -ENOPKG);
3621 assert_return(code, -EINVAL);
3622 assert_return(!event_pid_changed(e), -ECHILD);
3624 if (!e->exit_requested)
3627 *code = e->exit_code;
3631 _public_ int sd_event_exit(sd_event *e, int code) {
3632 assert_return(e, -EINVAL);
3633 assert_return(e = event_resolve(e), -ENOPKG);
3634 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
3635 assert_return(!event_pid_changed(e), -ECHILD);
3637 e->exit_requested = true;
3638 e->exit_code = code;
3643 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
3644 assert_return(e, -EINVAL);
3645 assert_return(e = event_resolve(e), -ENOPKG);
3646 assert_return(usec, -EINVAL);
3647 assert_return(!event_pid_changed(e), -ECHILD);
3649 if (!TRIPLE_TIMESTAMP_HAS_CLOCK(clock))
3652 /* Generate a clean error in case CLOCK_BOOTTIME is not available. Note that don't use clock_supported() here,
3653 * for a reason: there are systems where CLOCK_BOOTTIME is supported, but CLOCK_BOOTTIME_ALARM is not, but for
3654 * the purpose of getting the time this doesn't matter. */
3655 if (IN_SET(clock, CLOCK_BOOTTIME, CLOCK_BOOTTIME_ALARM) && !clock_boottime_supported())
3658 if (!triple_timestamp_is_set(&e->timestamp)) {
3659 /* Implicitly fall back to now() if we never ran
3660 * before and thus have no cached time. */
3665 *usec = triple_timestamp_by_clock(&e->timestamp, clock);
3669 _public_ int sd_event_default(sd_event **ret) {
3674 return !!default_event;
3676 if (default_event) {
3677 *ret = sd_event_ref(default_event);
3681 r = sd_event_new(&e);
3685 e->default_event_ptr = &default_event;
3693 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
3694 assert_return(e, -EINVAL);
3695 assert_return(e = event_resolve(e), -ENOPKG);
3696 assert_return(tid, -EINVAL);
3697 assert_return(!event_pid_changed(e), -ECHILD);
3707 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
3710 assert_return(e, -EINVAL);
3711 assert_return(e = event_resolve(e), -ENOPKG);
3712 assert_return(!event_pid_changed(e), -ECHILD);
3714 if (e->watchdog == !!b)
3718 struct epoll_event ev;
3720 r = sd_watchdog_enabled(false, &e->watchdog_period);
3724 /* Issue first ping immediately */
3725 sd_notify(false, "WATCHDOG=1");
3726 e->watchdog_last = now(CLOCK_MONOTONIC);
3728 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
3729 if (e->watchdog_fd < 0)
3732 r = arm_watchdog(e);
3736 ev = (struct epoll_event) {
3738 .data.ptr = INT_TO_PTR(SOURCE_WATCHDOG),
3741 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
3748 if (e->watchdog_fd >= 0) {
3749 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
3750 e->watchdog_fd = safe_close(e->watchdog_fd);
3758 e->watchdog_fd = safe_close(e->watchdog_fd);
3762 _public_ int sd_event_get_watchdog(sd_event *e) {
3763 assert_return(e, -EINVAL);
3764 assert_return(e = event_resolve(e), -ENOPKG);
3765 assert_return(!event_pid_changed(e), -ECHILD);
3770 _public_ int sd_event_get_iteration(sd_event *e, uint64_t *ret) {
3771 assert_return(e, -EINVAL);
3772 assert_return(e = event_resolve(e), -ENOPKG);
3773 assert_return(!event_pid_changed(e), -ECHILD);
3775 *ret = e->iteration;