1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
40 #define EPOLL_QUEUE_MAX 512U
41 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
43 typedef enum EventSourceType {
47 SOURCE_TIME_MONOTONIC,
48 SOURCE_TIME_REALTIME_ALARM,
49 SOURCE_TIME_BOOTTIME_ALARM,
56 _SOURCE_EVENT_SOURCE_TYPE_MAX,
57 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
60 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
62 struct sd_event_source {
67 sd_event_handler_t prepare;
71 EventSourceType type:5;
78 unsigned pending_index;
79 unsigned prepare_index;
80 unsigned pending_iteration;
81 unsigned prepare_iteration;
83 LIST_FIELDS(sd_event_source, sources);
87 sd_event_io_handler_t callback;
94 sd_event_time_handler_t callback;
95 usec_t next, accuracy;
96 unsigned earliest_index;
97 unsigned latest_index;
100 sd_event_signal_handler_t callback;
101 struct signalfd_siginfo siginfo;
105 sd_event_child_handler_t callback;
111 sd_event_handler_t callback;
114 sd_event_handler_t callback;
117 sd_event_handler_t callback;
118 unsigned prioq_index;
126 /* For all clocks we maintain two priority queues each, one
127 * ordered for the earliest times the events may be
128 * dispatched, and one ordered by the latest times they must
129 * have been dispatched. The range between the top entries in
130 * the two prioqs is the time window we can freely schedule
150 /* timerfd_create() only supports these five clocks so far. We
151 * can add support for more clocks when the kernel learns to
152 * deal with them, too. */
153 struct clock_data realtime;
154 struct clock_data boottime;
155 struct clock_data monotonic;
156 struct clock_data realtime_alarm;
157 struct clock_data boottime_alarm;
162 sd_event_source **signal_sources;
164 Hashmap *child_sources;
165 unsigned n_enabled_child_sources;
174 dual_timestamp timestamp;
175 usec_t timestamp_boottime;
178 bool exit_requested:1;
179 bool need_process_child:1;
185 sd_event **default_event_ptr;
187 usec_t watchdog_last, watchdog_period;
191 LIST_HEAD(sd_event_source, sources);
194 static void source_disconnect(sd_event_source *s);
196 static int pending_prioq_compare(const void *a, const void *b) {
197 const sd_event_source *x = a, *y = b;
202 /* Enabled ones first */
203 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
205 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
208 /* Lower priority values first */
209 if (x->priority < y->priority)
211 if (x->priority > y->priority)
214 /* Older entries first */
215 if (x->pending_iteration < y->pending_iteration)
217 if (x->pending_iteration > y->pending_iteration)
220 /* Stability for the rest */
229 static int prepare_prioq_compare(const void *a, const void *b) {
230 const sd_event_source *x = a, *y = b;
235 /* Move most recently prepared ones last, so that we can stop
236 * preparing as soon as we hit one that has already been
237 * prepared in the current iteration */
238 if (x->prepare_iteration < y->prepare_iteration)
240 if (x->prepare_iteration > y->prepare_iteration)
243 /* Enabled ones first */
244 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
246 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
249 /* Lower priority values first */
250 if (x->priority < y->priority)
252 if (x->priority > y->priority)
255 /* Stability for the rest */
264 static int earliest_time_prioq_compare(const void *a, const void *b) {
265 const sd_event_source *x = a, *y = b;
267 assert(EVENT_SOURCE_IS_TIME(x->type));
268 assert(x->type == y->type);
270 /* Enabled ones first */
271 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
273 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
276 /* Move the pending ones to the end */
277 if (!x->pending && y->pending)
279 if (x->pending && !y->pending)
283 if (x->time.next < y->time.next)
285 if (x->time.next > y->time.next)
288 /* Stability for the rest */
297 static int latest_time_prioq_compare(const void *a, const void *b) {
298 const sd_event_source *x = a, *y = b;
300 assert(EVENT_SOURCE_IS_TIME(x->type));
301 assert(x->type == y->type);
303 /* Enabled ones first */
304 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
306 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
309 /* Move the pending ones to the end */
310 if (!x->pending && y->pending)
312 if (x->pending && !y->pending)
316 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
318 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
321 /* Stability for the rest */
330 static int exit_prioq_compare(const void *a, const void *b) {
331 const sd_event_source *x = a, *y = b;
333 assert(x->type == SOURCE_EXIT);
334 assert(y->type == SOURCE_EXIT);
336 /* Enabled ones first */
337 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
339 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
342 /* Lower priority values first */
343 if (x->priority < y->priority)
345 if (x->priority > y->priority)
348 /* Stability for the rest */
357 static void free_clock_data(struct clock_data *d) {
361 prioq_free(d->earliest);
362 prioq_free(d->latest);
365 static void event_free(sd_event *e) {
370 while ((s = e->sources)) {
372 source_disconnect(s);
373 sd_event_source_unref(s);
376 assert(e->n_sources == 0);
378 if (e->default_event_ptr)
379 *(e->default_event_ptr) = NULL;
381 safe_close(e->epoll_fd);
382 safe_close(e->signal_fd);
383 safe_close(e->watchdog_fd);
385 free_clock_data(&e->realtime);
386 free_clock_data(&e->boottime);
387 free_clock_data(&e->monotonic);
388 free_clock_data(&e->realtime_alarm);
389 free_clock_data(&e->boottime_alarm);
391 prioq_free(e->pending);
392 prioq_free(e->prepare);
395 free(e->signal_sources);
397 hashmap_free(e->child_sources);
398 set_free(e->post_sources);
402 _public_ int sd_event_new(sd_event** ret) {
406 assert_return(ret, -EINVAL);
408 e = new0(sd_event, 1);
413 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->boottime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
414 e->realtime.next = e->boottime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = USEC_INFINITY;
415 e->original_pid = getpid();
416 e->perturb = USEC_INFINITY;
418 assert_se(sigemptyset(&e->sigset) == 0);
420 e->pending = prioq_new(pending_prioq_compare);
426 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
427 if (e->epoll_fd < 0) {
440 _public_ sd_event* sd_event_ref(sd_event *e) {
441 assert_return(e, NULL);
443 assert(e->n_ref >= 1);
449 _public_ sd_event* sd_event_unref(sd_event *e) {
454 assert(e->n_ref >= 1);
463 static bool event_pid_changed(sd_event *e) {
466 /* We don't support people creating am event loop and keeping
467 * it around over a fork(). Let's complain. */
469 return e->original_pid != getpid();
472 static int source_io_unregister(sd_event_source *s) {
476 assert(s->type == SOURCE_IO);
478 if (!s->io.registered)
481 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
485 s->io.registered = false;
489 static int source_io_register(
494 struct epoll_event ev = {};
498 assert(s->type == SOURCE_IO);
499 assert(enabled != SD_EVENT_OFF);
504 if (enabled == SD_EVENT_ONESHOT)
505 ev.events |= EPOLLONESHOT;
507 if (s->io.registered)
508 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
510 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
515 s->io.registered = true;
520 static clockid_t event_source_type_to_clock(EventSourceType t) {
524 case SOURCE_TIME_REALTIME:
525 return CLOCK_REALTIME;
527 case SOURCE_TIME_BOOTTIME:
528 return CLOCK_BOOTTIME;
530 case SOURCE_TIME_MONOTONIC:
531 return CLOCK_MONOTONIC;
533 case SOURCE_TIME_REALTIME_ALARM:
534 return CLOCK_REALTIME_ALARM;
536 case SOURCE_TIME_BOOTTIME_ALARM:
537 return CLOCK_BOOTTIME_ALARM;
540 return (clockid_t) -1;
544 static EventSourceType clock_to_event_source_type(clockid_t clock) {
549 return SOURCE_TIME_REALTIME;
552 return SOURCE_TIME_BOOTTIME;
554 case CLOCK_MONOTONIC:
555 return SOURCE_TIME_MONOTONIC;
557 case CLOCK_REALTIME_ALARM:
558 return SOURCE_TIME_REALTIME_ALARM;
560 case CLOCK_BOOTTIME_ALARM:
561 return SOURCE_TIME_BOOTTIME_ALARM;
564 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
568 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
573 case SOURCE_TIME_REALTIME:
576 case SOURCE_TIME_BOOTTIME:
579 case SOURCE_TIME_MONOTONIC:
580 return &e->monotonic;
582 case SOURCE_TIME_REALTIME_ALARM:
583 return &e->realtime_alarm;
585 case SOURCE_TIME_BOOTTIME_ALARM:
586 return &e->boottime_alarm;
593 static bool need_signal(sd_event *e, int signal) {
594 return (e->signal_sources && e->signal_sources[signal] &&
595 e->signal_sources[signal]->enabled != SD_EVENT_OFF)
597 (signal == SIGCHLD &&
598 e->n_enabled_child_sources > 0);
601 static int event_update_signal_fd(sd_event *e) {
602 struct epoll_event ev = {};
608 add_to_epoll = e->signal_fd < 0;
610 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
620 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
622 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
624 e->signal_fd = safe_close(e->signal_fd);
631 static void source_disconnect(sd_event_source *s) {
639 assert(s->event->n_sources > 0);
645 source_io_unregister(s);
649 case SOURCE_TIME_REALTIME:
650 case SOURCE_TIME_BOOTTIME:
651 case SOURCE_TIME_MONOTONIC:
652 case SOURCE_TIME_REALTIME_ALARM:
653 case SOURCE_TIME_BOOTTIME_ALARM: {
654 struct clock_data *d;
656 d = event_get_clock_data(s->event, s->type);
659 prioq_remove(d->earliest, s, &s->time.earliest_index);
660 prioq_remove(d->latest, s, &s->time.latest_index);
661 d->needs_rearm = true;
666 if (s->signal.sig > 0) {
667 if (s->event->signal_sources)
668 s->event->signal_sources[s->signal.sig] = NULL;
670 /* If the signal was on and now it is off... */
671 if (s->enabled != SD_EVENT_OFF && !need_signal(s->event, s->signal.sig)) {
672 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
674 (void) event_update_signal_fd(s->event);
675 /* If disabling failed, we might get a spurious event,
676 * but otherwise nothing bad should happen. */
683 if (s->child.pid > 0) {
684 if (s->enabled != SD_EVENT_OFF) {
685 assert(s->event->n_enabled_child_sources > 0);
686 s->event->n_enabled_child_sources--;
688 /* We know the signal was on, if it is off now... */
689 if (!need_signal(s->event, SIGCHLD)) {
690 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
692 (void) event_update_signal_fd(s->event);
693 /* If disabling failed, we might get a spurious event,
694 * but otherwise nothing bad should happen. */
698 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
708 set_remove(s->event->post_sources, s);
712 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
716 assert_not_reached("Wut? I shouldn't exist.");
720 prioq_remove(s->event->pending, s, &s->pending_index);
723 prioq_remove(s->event->prepare, s, &s->prepare_index);
727 s->type = _SOURCE_EVENT_SOURCE_TYPE_INVALID;
729 LIST_REMOVE(sources, event->sources, s);
733 sd_event_unref(event);
736 static void source_free(sd_event_source *s) {
739 source_disconnect(s);
740 free(s->description);
744 static int source_set_pending(sd_event_source *s, bool b) {
748 assert(s->type != SOURCE_EXIT);
756 s->pending_iteration = s->event->iteration;
758 r = prioq_put(s->event->pending, s, &s->pending_index);
764 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
766 if (EVENT_SOURCE_IS_TIME(s->type)) {
767 struct clock_data *d;
769 d = event_get_clock_data(s->event, s->type);
772 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
773 prioq_reshuffle(d->latest, s, &s->time.latest_index);
774 d->needs_rearm = true;
780 static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
785 s = new0(sd_event_source, 1);
791 s->floating = floating;
793 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
798 LIST_PREPEND(sources, e->sources, s);
804 _public_ int sd_event_add_io(
806 sd_event_source **ret,
809 sd_event_io_handler_t callback,
815 assert_return(e, -EINVAL);
816 assert_return(fd >= 0, -EINVAL);
817 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
818 assert_return(callback, -EINVAL);
819 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
820 assert_return(!event_pid_changed(e), -ECHILD);
822 s = source_new(e, !ret, SOURCE_IO);
827 s->io.events = events;
828 s->io.callback = callback;
829 s->userdata = userdata;
830 s->enabled = SD_EVENT_ON;
832 r = source_io_register(s, s->enabled, events);
844 static void initialize_perturb(sd_event *e) {
845 sd_id128_t bootid = {};
847 /* When we sleep for longer, we try to realign the wakeup to
848 the same time wihtin each minute/second/250ms, so that
849 events all across the system can be coalesced into a single
850 CPU wakeup. However, let's take some system-specific
851 randomness for this value, so that in a network of systems
852 with synced clocks timer events are distributed a
853 bit. Here, we calculate a perturbation usec offset from the
856 if (_likely_(e->perturb != USEC_INFINITY))
859 if (sd_id128_get_boot(&bootid) >= 0)
860 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
863 static int event_setup_timer_fd(
865 struct clock_data *d,
868 struct epoll_event ev = {};
874 if (_likely_(d->fd >= 0))
877 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
882 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
884 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
894 _public_ int sd_event_add_time(
896 sd_event_source **ret,
900 sd_event_time_handler_t callback,
903 EventSourceType type;
905 struct clock_data *d;
908 assert_return(e, -EINVAL);
909 assert_return(usec != (uint64_t) -1, -EINVAL);
910 assert_return(accuracy != (uint64_t) -1, -EINVAL);
911 assert_return(callback, -EINVAL);
912 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
913 assert_return(!event_pid_changed(e), -ECHILD);
915 type = clock_to_event_source_type(clock);
916 assert_return(type >= 0, -ENOTSUP);
918 d = event_get_clock_data(e, type);
922 d->earliest = prioq_new(earliest_time_prioq_compare);
928 d->latest = prioq_new(latest_time_prioq_compare);
934 r = event_setup_timer_fd(e, d, clock);
939 s = source_new(e, !ret, type);
944 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
945 s->time.callback = callback;
946 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
947 s->userdata = userdata;
948 s->enabled = SD_EVENT_ONESHOT;
950 d->needs_rearm = true;
952 r = prioq_put(d->earliest, s, &s->time.earliest_index);
956 r = prioq_put(d->latest, s, &s->time.latest_index);
970 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
973 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
976 _public_ int sd_event_add_signal(
978 sd_event_source **ret,
980 sd_event_signal_handler_t callback,
988 assert_return(e, -EINVAL);
989 assert_return(sig > 0, -EINVAL);
990 assert_return(sig < _NSIG, -EINVAL);
991 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
992 assert_return(!event_pid_changed(e), -ECHILD);
995 callback = signal_exit_callback;
997 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
1001 if (!sigismember(&ss, sig))
1004 if (!e->signal_sources) {
1005 e->signal_sources = new0(sd_event_source*, _NSIG);
1006 if (!e->signal_sources)
1008 } else if (e->signal_sources[sig])
1011 previous = need_signal(e, sig);
1013 s = source_new(e, !ret, SOURCE_SIGNAL);
1017 s->signal.sig = sig;
1018 s->signal.callback = callback;
1019 s->userdata = userdata;
1020 s->enabled = SD_EVENT_ON;
1022 e->signal_sources[sig] = s;
1025 assert_se(sigaddset(&e->sigset, sig) == 0);
1027 r = event_update_signal_fd(e);
1034 /* Use the signal name as description for the event source by default */
1035 (void) sd_event_source_set_description(s, signal_to_string(sig));
1043 _public_ int sd_event_add_child(
1045 sd_event_source **ret,
1048 sd_event_child_handler_t callback,
1055 assert_return(e, -EINVAL);
1056 assert_return(pid > 1, -EINVAL);
1057 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1058 assert_return(options != 0, -EINVAL);
1059 assert_return(callback, -EINVAL);
1060 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1061 assert_return(!event_pid_changed(e), -ECHILD);
1063 r = hashmap_ensure_allocated(&e->child_sources, NULL);
1067 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
1070 previous = need_signal(e, SIGCHLD);
1072 s = source_new(e, !ret, SOURCE_CHILD);
1077 s->child.options = options;
1078 s->child.callback = callback;
1079 s->userdata = userdata;
1080 s->enabled = SD_EVENT_ONESHOT;
1082 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
1088 e->n_enabled_child_sources ++;
1091 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
1093 r = event_update_signal_fd(e);
1100 e->need_process_child = true;
1108 _public_ int sd_event_add_defer(
1110 sd_event_source **ret,
1111 sd_event_handler_t callback,
1117 assert_return(e, -EINVAL);
1118 assert_return(callback, -EINVAL);
1119 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1120 assert_return(!event_pid_changed(e), -ECHILD);
1122 s = source_new(e, !ret, SOURCE_DEFER);
1126 s->defer.callback = callback;
1127 s->userdata = userdata;
1128 s->enabled = SD_EVENT_ONESHOT;
1130 r = source_set_pending(s, true);
1142 _public_ int sd_event_add_post(
1144 sd_event_source **ret,
1145 sd_event_handler_t callback,
1151 assert_return(e, -EINVAL);
1152 assert_return(callback, -EINVAL);
1153 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1154 assert_return(!event_pid_changed(e), -ECHILD);
1156 r = set_ensure_allocated(&e->post_sources, NULL);
1160 s = source_new(e, !ret, SOURCE_POST);
1164 s->post.callback = callback;
1165 s->userdata = userdata;
1166 s->enabled = SD_EVENT_ON;
1168 r = set_put(e->post_sources, s);
1180 _public_ int sd_event_add_exit(
1182 sd_event_source **ret,
1183 sd_event_handler_t callback,
1189 assert_return(e, -EINVAL);
1190 assert_return(callback, -EINVAL);
1191 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1192 assert_return(!event_pid_changed(e), -ECHILD);
1195 e->exit = prioq_new(exit_prioq_compare);
1200 s = source_new(e, !ret, SOURCE_EXIT);
1204 s->exit.callback = callback;
1205 s->userdata = userdata;
1206 s->exit.prioq_index = PRIOQ_IDX_NULL;
1207 s->enabled = SD_EVENT_ONESHOT;
1209 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1221 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1222 assert_return(s, NULL);
1224 assert(s->n_ref >= 1);
1230 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1235 assert(s->n_ref >= 1);
1238 if (s->n_ref <= 0) {
1239 /* Here's a special hack: when we are called from a
1240 * dispatch handler we won't free the event source
1241 * immediately, but we will detach the fd from the
1242 * epoll. This way it is safe for the caller to unref
1243 * the event source and immediately close the fd, but
1244 * we still retain a valid event source object after
1247 if (s->dispatching) {
1248 if (s->type == SOURCE_IO)
1249 source_io_unregister(s);
1251 source_disconnect(s);
1259 _public_ int sd_event_source_set_description(sd_event_source *s, const char *description) {
1260 assert_return(s, -EINVAL);
1262 return free_and_strdup(&s->description, description);
1265 _public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
1266 assert_return(s, -EINVAL);
1267 assert_return(description, -EINVAL);
1269 *description = s->description;
1273 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1274 assert_return(s, NULL);
1279 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1280 assert_return(s, -EINVAL);
1281 assert_return(s->type != SOURCE_EXIT, -EDOM);
1282 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1283 assert_return(!event_pid_changed(s->event), -ECHILD);
1288 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1289 assert_return(s, -EINVAL);
1290 assert_return(s->type == SOURCE_IO, -EDOM);
1291 assert_return(!event_pid_changed(s->event), -ECHILD);
1296 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1299 assert_return(s, -EINVAL);
1300 assert_return(fd >= 0, -EINVAL);
1301 assert_return(s->type == SOURCE_IO, -EDOM);
1302 assert_return(!event_pid_changed(s->event), -ECHILD);
1307 if (s->enabled == SD_EVENT_OFF) {
1309 s->io.registered = false;
1313 saved_fd = s->io.fd;
1314 assert(s->io.registered);
1317 s->io.registered = false;
1319 r = source_io_register(s, s->enabled, s->io.events);
1321 s->io.fd = saved_fd;
1322 s->io.registered = true;
1326 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1332 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1333 assert_return(s, -EINVAL);
1334 assert_return(events, -EINVAL);
1335 assert_return(s->type == SOURCE_IO, -EDOM);
1336 assert_return(!event_pid_changed(s->event), -ECHILD);
1338 *events = s->io.events;
1342 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1345 assert_return(s, -EINVAL);
1346 assert_return(s->type == SOURCE_IO, -EDOM);
1347 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1348 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1349 assert_return(!event_pid_changed(s->event), -ECHILD);
1351 /* edge-triggered updates are never skipped, so we can reset edges */
1352 if (s->io.events == events && !(events & EPOLLET))
1355 if (s->enabled != SD_EVENT_OFF) {
1356 r = source_io_register(s, s->enabled, events);
1361 s->io.events = events;
1362 source_set_pending(s, false);
1367 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1368 assert_return(s, -EINVAL);
1369 assert_return(revents, -EINVAL);
1370 assert_return(s->type == SOURCE_IO, -EDOM);
1371 assert_return(s->pending, -ENODATA);
1372 assert_return(!event_pid_changed(s->event), -ECHILD);
1374 *revents = s->io.revents;
1378 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1379 assert_return(s, -EINVAL);
1380 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1381 assert_return(!event_pid_changed(s->event), -ECHILD);
1383 return s->signal.sig;
1386 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1387 assert_return(s, -EINVAL);
1388 assert_return(!event_pid_changed(s->event), -ECHILD);
1393 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1394 assert_return(s, -EINVAL);
1395 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1396 assert_return(!event_pid_changed(s->event), -ECHILD);
1398 if (s->priority == priority)
1401 s->priority = priority;
1404 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1407 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1409 if (s->type == SOURCE_EXIT)
1410 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1415 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1416 assert_return(s, -EINVAL);
1417 assert_return(m, -EINVAL);
1418 assert_return(!event_pid_changed(s->event), -ECHILD);
1424 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1427 assert_return(s, -EINVAL);
1428 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1429 assert_return(!event_pid_changed(s->event), -ECHILD);
1431 /* If we are dead anyway, we are fine with turning off
1432 * sources, but everything else needs to fail. */
1433 if (s->event->state == SD_EVENT_FINISHED)
1434 return m == SD_EVENT_OFF ? 0 : -ESTALE;
1436 if (s->enabled == m)
1439 if (m == SD_EVENT_OFF) {
1444 r = source_io_unregister(s);
1451 case SOURCE_TIME_REALTIME:
1452 case SOURCE_TIME_BOOTTIME:
1453 case SOURCE_TIME_MONOTONIC:
1454 case SOURCE_TIME_REALTIME_ALARM:
1455 case SOURCE_TIME_BOOTTIME_ALARM: {
1456 struct clock_data *d;
1459 d = event_get_clock_data(s->event, s->type);
1462 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1463 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1464 d->needs_rearm = true;
1469 assert(need_signal(s->event, s->signal.sig));
1473 if (!need_signal(s->event, s->signal.sig)) {
1474 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1476 (void) event_update_signal_fd(s->event);
1477 /* If disabling failed, we might get a spurious event,
1478 * but otherwise nothing bad should happen. */
1484 assert(need_signal(s->event, SIGCHLD));
1488 assert(s->event->n_enabled_child_sources > 0);
1489 s->event->n_enabled_child_sources--;
1491 if (!need_signal(s->event, SIGCHLD)) {
1492 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1494 (void) event_update_signal_fd(s->event);
1501 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1510 assert_not_reached("Wut? I shouldn't exist.");
1517 r = source_io_register(s, m, s->io.events);
1524 case SOURCE_TIME_REALTIME:
1525 case SOURCE_TIME_BOOTTIME:
1526 case SOURCE_TIME_MONOTONIC:
1527 case SOURCE_TIME_REALTIME_ALARM:
1528 case SOURCE_TIME_BOOTTIME_ALARM: {
1529 struct clock_data *d;
1532 d = event_get_clock_data(s->event, s->type);
1535 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1536 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1537 d->needs_rearm = true;
1542 /* Check status before enabling. */
1543 if (!need_signal(s->event, s->signal.sig)) {
1544 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1546 r = event_update_signal_fd(s->event);
1548 s->enabled = SD_EVENT_OFF;
1557 /* Check status before enabling. */
1558 if (s->enabled == SD_EVENT_OFF) {
1559 if (!need_signal(s->event, SIGCHLD)) {
1560 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1562 r = event_update_signal_fd(s->event);
1564 s->enabled = SD_EVENT_OFF;
1569 s->event->n_enabled_child_sources++;
1577 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1586 assert_not_reached("Wut? I shouldn't exist.");
1591 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1594 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1599 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1600 assert_return(s, -EINVAL);
1601 assert_return(usec, -EINVAL);
1602 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1603 assert_return(!event_pid_changed(s->event), -ECHILD);
1605 *usec = s->time.next;
1609 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1610 struct clock_data *d;
1612 assert_return(s, -EINVAL);
1613 assert_return(usec != (uint64_t) -1, -EINVAL);
1614 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1615 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1616 assert_return(!event_pid_changed(s->event), -ECHILD);
1618 s->time.next = usec;
1620 source_set_pending(s, false);
1622 d = event_get_clock_data(s->event, s->type);
1625 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1626 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1627 d->needs_rearm = true;
1632 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1633 assert_return(s, -EINVAL);
1634 assert_return(usec, -EINVAL);
1635 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1636 assert_return(!event_pid_changed(s->event), -ECHILD);
1638 *usec = s->time.accuracy;
1642 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1643 struct clock_data *d;
1645 assert_return(s, -EINVAL);
1646 assert_return(usec != (uint64_t) -1, -EINVAL);
1647 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1648 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1649 assert_return(!event_pid_changed(s->event), -ECHILD);
1652 usec = DEFAULT_ACCURACY_USEC;
1654 s->time.accuracy = usec;
1656 source_set_pending(s, false);
1658 d = event_get_clock_data(s->event, s->type);
1661 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1662 d->needs_rearm = true;
1667 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1668 assert_return(s, -EINVAL);
1669 assert_return(clock, -EINVAL);
1670 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1671 assert_return(!event_pid_changed(s->event), -ECHILD);
1673 *clock = event_source_type_to_clock(s->type);
1677 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1678 assert_return(s, -EINVAL);
1679 assert_return(pid, -EINVAL);
1680 assert_return(s->type == SOURCE_CHILD, -EDOM);
1681 assert_return(!event_pid_changed(s->event), -ECHILD);
1683 *pid = s->child.pid;
1687 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1690 assert_return(s, -EINVAL);
1691 assert_return(s->type != SOURCE_EXIT, -EDOM);
1692 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1693 assert_return(!event_pid_changed(s->event), -ECHILD);
1695 if (s->prepare == callback)
1698 if (callback && s->prepare) {
1699 s->prepare = callback;
1703 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1707 s->prepare = callback;
1710 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1714 prioq_remove(s->event->prepare, s, &s->prepare_index);
1719 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1720 assert_return(s, NULL);
1725 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1728 assert_return(s, NULL);
1731 s->userdata = userdata;
1736 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1747 initialize_perturb(e);
1750 Find a good time to wake up again between times a and b. We
1751 have two goals here:
1753 a) We want to wake up as seldom as possible, hence prefer
1754 later times over earlier times.
1756 b) But if we have to wake up, then let's make sure to
1757 dispatch as much as possible on the entire system.
1759 We implement this by waking up everywhere at the same time
1760 within any given minute if we can, synchronised via the
1761 perturbation value determined from the boot ID. If we can't,
1762 then we try to find the same spot in every 10s, then 1s and
1763 then 250ms step. Otherwise, we pick the last possible time
1767 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1769 if (_unlikely_(c < USEC_PER_MINUTE))
1772 c -= USEC_PER_MINUTE;
1778 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1780 if (_unlikely_(c < USEC_PER_SEC*10))
1783 c -= USEC_PER_SEC*10;
1789 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1791 if (_unlikely_(c < USEC_PER_SEC))
1800 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1802 if (_unlikely_(c < USEC_PER_MSEC*250))
1805 c -= USEC_PER_MSEC*250;
1814 static int event_arm_timer(
1816 struct clock_data *d) {
1818 struct itimerspec its = {};
1819 sd_event_source *a, *b;
1826 if (!d->needs_rearm)
1829 d->needs_rearm = false;
1831 a = prioq_peek(d->earliest);
1832 if (!a || a->enabled == SD_EVENT_OFF) {
1837 if (d->next == USEC_INFINITY)
1841 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1845 d->next = USEC_INFINITY;
1849 b = prioq_peek(d->latest);
1850 assert_se(b && b->enabled != SD_EVENT_OFF);
1852 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1856 assert_se(d->fd >= 0);
1859 /* We don' want to disarm here, just mean some time looooong ago. */
1860 its.it_value.tv_sec = 0;
1861 its.it_value.tv_nsec = 1;
1863 timespec_store(&its.it_value, t);
1865 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1873 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1876 assert(s->type == SOURCE_IO);
1878 /* If the event source was already pending, we just OR in the
1879 * new revents, otherwise we reset the value. The ORing is
1880 * necessary to handle EPOLLONESHOT events properly where
1881 * readability might happen independently of writability, and
1882 * we need to keep track of both */
1885 s->io.revents |= revents;
1887 s->io.revents = revents;
1889 return source_set_pending(s, true);
1892 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1899 assert_return(events == EPOLLIN, -EIO);
1901 ss = read(fd, &x, sizeof(x));
1903 if (errno == EAGAIN || errno == EINTR)
1909 if (_unlikely_(ss != sizeof(x)))
1913 *next = USEC_INFINITY;
1918 static int process_timer(
1921 struct clock_data *d) {
1930 s = prioq_peek(d->earliest);
1933 s->enabled == SD_EVENT_OFF ||
1937 r = source_set_pending(s, true);
1941 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1942 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1943 d->needs_rearm = true;
1949 static int process_child(sd_event *e) {
1956 e->need_process_child = false;
1959 So, this is ugly. We iteratively invoke waitid() with P_PID
1960 + WNOHANG for each PID we wait for, instead of using
1961 P_ALL. This is because we only want to get child
1962 information of very specific child processes, and not all
1963 of them. We might not have processed the SIGCHLD even of a
1964 previous invocation and we don't want to maintain a
1965 unbounded *per-child* event queue, hence we really don't
1966 want anything flushed out of the kernel's queue that we
1967 don't care about. Since this is O(n) this means that if you
1968 have a lot of processes you probably want to handle SIGCHLD
1971 We do not reap the children here (by using WNOWAIT), this
1972 is only done after the event source is dispatched so that
1973 the callback still sees the process as a zombie.
1976 HASHMAP_FOREACH(s, e->child_sources, i) {
1977 assert(s->type == SOURCE_CHILD);
1982 if (s->enabled == SD_EVENT_OFF)
1985 zero(s->child.siginfo);
1986 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1987 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1991 if (s->child.siginfo.si_pid != 0) {
1993 s->child.siginfo.si_code == CLD_EXITED ||
1994 s->child.siginfo.si_code == CLD_KILLED ||
1995 s->child.siginfo.si_code == CLD_DUMPED;
1997 if (!zombie && (s->child.options & WEXITED)) {
1998 /* If the child isn't dead then let's
1999 * immediately remove the state change
2000 * from the queue, since there's no
2001 * benefit in leaving it queued */
2003 assert(s->child.options & (WSTOPPED|WCONTINUED));
2004 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
2007 r = source_set_pending(s, true);
2016 static int process_signal(sd_event *e, uint32_t events) {
2017 bool read_one = false;
2022 assert_return(events == EPOLLIN, -EIO);
2025 struct signalfd_siginfo si;
2027 sd_event_source *s = NULL;
2029 n = read(e->signal_fd, &si, sizeof(si));
2031 if (errno == EAGAIN || errno == EINTR)
2037 if (_unlikely_(n != sizeof(si)))
2040 assert(si.ssi_signo < _NSIG);
2044 if (si.ssi_signo == SIGCHLD) {
2045 r = process_child(e);
2052 if (e->signal_sources)
2053 s = e->signal_sources[si.ssi_signo];
2058 s->signal.siginfo = si;
2059 r = source_set_pending(s, true);
2065 static int source_dispatch(sd_event_source *s) {
2069 assert(s->pending || s->type == SOURCE_EXIT);
2071 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
2072 r = source_set_pending(s, false);
2077 if (s->type != SOURCE_POST) {
2081 /* If we execute a non-post source, let's mark all
2082 * post sources as pending */
2084 SET_FOREACH(z, s->event->post_sources, i) {
2085 if (z->enabled == SD_EVENT_OFF)
2088 r = source_set_pending(z, true);
2094 if (s->enabled == SD_EVENT_ONESHOT) {
2095 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
2100 s->dispatching = true;
2105 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
2108 case SOURCE_TIME_REALTIME:
2109 case SOURCE_TIME_BOOTTIME:
2110 case SOURCE_TIME_MONOTONIC:
2111 case SOURCE_TIME_REALTIME_ALARM:
2112 case SOURCE_TIME_BOOTTIME_ALARM:
2113 r = s->time.callback(s, s->time.next, s->userdata);
2117 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
2120 case SOURCE_CHILD: {
2123 zombie = s->child.siginfo.si_code == CLD_EXITED ||
2124 s->child.siginfo.si_code == CLD_KILLED ||
2125 s->child.siginfo.si_code == CLD_DUMPED;
2127 r = s->child.callback(s, &s->child.siginfo, s->userdata);
2129 /* Now, reap the PID for good. */
2131 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
2137 r = s->defer.callback(s, s->userdata);
2141 r = s->post.callback(s, s->userdata);
2145 r = s->exit.callback(s, s->userdata);
2148 case SOURCE_WATCHDOG:
2149 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
2150 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
2151 assert_not_reached("Wut? I shouldn't exist.");
2154 s->dispatching = false;
2158 log_debug("Event source '%s' returned error, disabling: %s", s->description, strerror(-r));
2160 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
2166 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2171 static int event_prepare(sd_event *e) {
2179 s = prioq_peek(e->prepare);
2180 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2183 s->prepare_iteration = e->iteration;
2184 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2190 s->dispatching = true;
2191 r = s->prepare(s, s->userdata);
2192 s->dispatching = false;
2196 log_debug("Prepare callback of event source '%s' returned error, disabling: %s", s->description, strerror(-r));
2198 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2204 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2210 static int dispatch_exit(sd_event *e) {
2216 p = prioq_peek(e->exit);
2217 if (!p || p->enabled == SD_EVENT_OFF) {
2218 e->state = SD_EVENT_FINISHED;
2224 e->state = SD_EVENT_EXITING;
2226 r = source_dispatch(p);
2228 e->state = SD_EVENT_PASSIVE;
2234 static sd_event_source* event_next_pending(sd_event *e) {
2239 p = prioq_peek(e->pending);
2243 if (p->enabled == SD_EVENT_OFF)
2249 static int arm_watchdog(sd_event *e) {
2250 struct itimerspec its = {};
2255 assert(e->watchdog_fd >= 0);
2257 t = sleep_between(e,
2258 e->watchdog_last + (e->watchdog_period / 2),
2259 e->watchdog_last + (e->watchdog_period * 3 / 4));
2261 timespec_store(&its.it_value, t);
2263 /* Make sure we never set the watchdog to 0, which tells the
2264 * kernel to disable it. */
2265 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
2266 its.it_value.tv_nsec = 1;
2268 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2275 static int process_watchdog(sd_event *e) {
2281 /* Don't notify watchdog too often */
2282 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2285 sd_notify(false, "WATCHDOG=1");
2286 e->watchdog_last = e->timestamp.monotonic;
2288 return arm_watchdog(e);
2291 _public_ int sd_event_prepare(sd_event *e) {
2294 assert_return(e, -EINVAL);
2295 assert_return(!event_pid_changed(e), -ECHILD);
2296 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2297 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2299 if (e->exit_requested)
2304 r = event_prepare(e);
2308 r = event_arm_timer(e, &e->realtime);
2312 r = event_arm_timer(e, &e->boottime);
2316 r = event_arm_timer(e, &e->monotonic);
2320 r = event_arm_timer(e, &e->realtime_alarm);
2324 r = event_arm_timer(e, &e->boottime_alarm);
2328 if (event_next_pending(e) || e->need_process_child)
2331 e->state = SD_EVENT_PREPARED;
2336 e->state = SD_EVENT_PREPARED;
2337 r = sd_event_wait(e, 0);
2339 e->state = SD_EVENT_PREPARED;
2344 _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
2345 struct epoll_event *ev_queue;
2346 unsigned ev_queue_max;
2349 assert_return(e, -EINVAL);
2350 assert_return(!event_pid_changed(e), -ECHILD);
2351 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2352 assert_return(e->state == SD_EVENT_PREPARED, -EBUSY);
2354 if (e->exit_requested) {
2355 e->state = SD_EVENT_PENDING;
2359 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2360 ev_queue = newa(struct epoll_event, ev_queue_max);
2362 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2363 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2365 if (errno == EINTR) {
2366 e->state = SD_EVENT_PENDING;
2375 dual_timestamp_get(&e->timestamp);
2376 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2378 for (i = 0; i < m; i++) {
2380 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2381 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2382 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME))
2383 r = flush_timer(e, e->boottime.fd, ev_queue[i].events, &e->boottime.next);
2384 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2385 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2386 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2387 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2388 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2389 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2390 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2391 r = process_signal(e, ev_queue[i].events);
2392 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2393 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2395 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2401 r = process_watchdog(e);
2405 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2409 r = process_timer(e, e->timestamp_boottime, &e->boottime);
2413 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2417 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2421 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2425 if (e->need_process_child) {
2426 r = process_child(e);
2431 if (event_next_pending(e)) {
2432 e->state = SD_EVENT_PENDING;
2440 e->state = SD_EVENT_PASSIVE;
2445 _public_ int sd_event_dispatch(sd_event *e) {
2449 assert_return(e, -EINVAL);
2450 assert_return(!event_pid_changed(e), -ECHILD);
2451 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2452 assert_return(e->state == SD_EVENT_PENDING, -EBUSY);
2454 if (e->exit_requested)
2455 return dispatch_exit(e);
2457 p = event_next_pending(e);
2461 e->state = SD_EVENT_RUNNING;
2462 r = source_dispatch(p);
2463 e->state = SD_EVENT_PASSIVE;
2470 e->state = SD_EVENT_PASSIVE;
2475 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2478 assert_return(e, -EINVAL);
2479 assert_return(!event_pid_changed(e), -ECHILD);
2480 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2481 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2483 r = sd_event_prepare(e);
2485 return sd_event_dispatch(e);
2489 r = sd_event_wait(e, timeout);
2491 return sd_event_dispatch(e);
2496 _public_ int sd_event_loop(sd_event *e) {
2499 assert_return(e, -EINVAL);
2500 assert_return(!event_pid_changed(e), -ECHILD);
2501 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2505 while (e->state != SD_EVENT_FINISHED) {
2506 r = sd_event_run(e, (uint64_t) -1);
2518 _public_ int sd_event_get_fd(sd_event *e) {
2520 assert_return(e, -EINVAL);
2521 assert_return(!event_pid_changed(e), -ECHILD);
2526 _public_ int sd_event_get_state(sd_event *e) {
2527 assert_return(e, -EINVAL);
2528 assert_return(!event_pid_changed(e), -ECHILD);
2533 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2534 assert_return(e, -EINVAL);
2535 assert_return(code, -EINVAL);
2536 assert_return(!event_pid_changed(e), -ECHILD);
2538 if (!e->exit_requested)
2541 *code = e->exit_code;
2545 _public_ int sd_event_exit(sd_event *e, int code) {
2546 assert_return(e, -EINVAL);
2547 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2548 assert_return(!event_pid_changed(e), -ECHILD);
2550 e->exit_requested = true;
2551 e->exit_code = code;
2556 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2557 assert_return(e, -EINVAL);
2558 assert_return(usec, -EINVAL);
2559 assert_return(!event_pid_changed(e), -ECHILD);
2561 /* If we haven't run yet, just get the actual time */
2562 if (!dual_timestamp_is_set(&e->timestamp))
2567 case CLOCK_REALTIME:
2568 case CLOCK_REALTIME_ALARM:
2569 *usec = e->timestamp.realtime;
2572 case CLOCK_MONOTONIC:
2573 *usec = e->timestamp.monotonic;
2576 case CLOCK_BOOTTIME:
2577 case CLOCK_BOOTTIME_ALARM:
2578 *usec = e->timestamp_boottime;
2585 _public_ int sd_event_default(sd_event **ret) {
2587 static thread_local sd_event *default_event = NULL;
2592 return !!default_event;
2594 if (default_event) {
2595 *ret = sd_event_ref(default_event);
2599 r = sd_event_new(&e);
2603 e->default_event_ptr = &default_event;
2611 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2612 assert_return(e, -EINVAL);
2613 assert_return(tid, -EINVAL);
2614 assert_return(!event_pid_changed(e), -ECHILD);
2624 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2627 assert_return(e, -EINVAL);
2628 assert_return(!event_pid_changed(e), -ECHILD);
2630 if (e->watchdog == !!b)
2634 struct epoll_event ev = {};
2636 r = sd_watchdog_enabled(false, &e->watchdog_period);
2640 /* Issue first ping immediately */
2641 sd_notify(false, "WATCHDOG=1");
2642 e->watchdog_last = now(CLOCK_MONOTONIC);
2644 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2645 if (e->watchdog_fd < 0)
2648 r = arm_watchdog(e);
2652 ev.events = EPOLLIN;
2653 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2655 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2662 if (e->watchdog_fd >= 0) {
2663 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2664 e->watchdog_fd = safe_close(e->watchdog_fd);
2672 e->watchdog_fd = safe_close(e->watchdog_fd);
2676 _public_ int sd_event_get_watchdog(sd_event *e) {
2677 assert_return(e, -EINVAL);
2678 assert_return(!event_pid_changed(e), -ECHILD);