1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
40 #define EPOLL_QUEUE_MAX 512U
41 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
43 typedef enum EventSourceType {
47 SOURCE_TIME_MONOTONIC,
48 SOURCE_TIME_REALTIME_ALARM,
49 SOURCE_TIME_BOOTTIME_ALARM,
56 _SOURCE_EVENT_SOURCE_TYPE_MAX,
57 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
60 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
62 struct sd_event_source {
67 sd_event_handler_t prepare;
69 EventSourceType type:5;
76 unsigned pending_index;
77 unsigned prepare_index;
78 unsigned pending_iteration;
79 unsigned prepare_iteration;
81 LIST_FIELDS(sd_event_source, sources);
85 sd_event_io_handler_t callback;
92 sd_event_time_handler_t callback;
93 usec_t next, accuracy;
94 unsigned earliest_index;
95 unsigned latest_index;
98 sd_event_signal_handler_t callback;
99 struct signalfd_siginfo siginfo;
103 sd_event_child_handler_t callback;
109 sd_event_handler_t callback;
112 sd_event_handler_t callback;
115 sd_event_handler_t callback;
116 unsigned prioq_index;
124 /* For all clocks we maintain two priority queues each, one
125 * ordered for the earliest times the events may be
126 * dispatched, and one ordered by the latest times they must
127 * have been dispatched. The range between the top entries in
128 * the two prioqs is the time window we can freely schedule
146 /* timerfd_create() only supports these five clocks so far. We
147 * can add support for more clocks when the kernel learns to
148 * deal with them, too. */
149 struct clock_data realtime;
150 struct clock_data boottime;
151 struct clock_data monotonic;
152 struct clock_data realtime_alarm;
153 struct clock_data boottime_alarm;
158 sd_event_source **signal_sources;
160 Hashmap *child_sources;
161 unsigned n_enabled_child_sources;
170 dual_timestamp timestamp;
171 usec_t timestamp_boottime;
174 bool exit_requested:1;
175 bool need_process_child:1;
181 sd_event **default_event_ptr;
183 usec_t watchdog_last, watchdog_period;
187 LIST_HEAD(sd_event_source, sources);
190 static void source_disconnect(sd_event_source *s);
192 static int pending_prioq_compare(const void *a, const void *b) {
193 const sd_event_source *x = a, *y = b;
198 /* Enabled ones first */
199 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
201 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
204 /* Lower priority values first */
205 if (x->priority < y->priority)
207 if (x->priority > y->priority)
210 /* Older entries first */
211 if (x->pending_iteration < y->pending_iteration)
213 if (x->pending_iteration > y->pending_iteration)
216 /* Stability for the rest */
225 static int prepare_prioq_compare(const void *a, const void *b) {
226 const sd_event_source *x = a, *y = b;
231 /* Move most recently prepared ones last, so that we can stop
232 * preparing as soon as we hit one that has already been
233 * prepared in the current iteration */
234 if (x->prepare_iteration < y->prepare_iteration)
236 if (x->prepare_iteration > y->prepare_iteration)
239 /* Enabled ones first */
240 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
242 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
245 /* Lower priority values first */
246 if (x->priority < y->priority)
248 if (x->priority > y->priority)
251 /* Stability for the rest */
260 static int earliest_time_prioq_compare(const void *a, const void *b) {
261 const sd_event_source *x = a, *y = b;
263 assert(EVENT_SOURCE_IS_TIME(x->type));
264 assert(x->type == y->type);
266 /* Enabled ones first */
267 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
269 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
272 /* Move the pending ones to the end */
273 if (!x->pending && y->pending)
275 if (x->pending && !y->pending)
279 if (x->time.next < y->time.next)
281 if (x->time.next > y->time.next)
284 /* Stability for the rest */
293 static int latest_time_prioq_compare(const void *a, const void *b) {
294 const sd_event_source *x = a, *y = b;
296 assert(EVENT_SOURCE_IS_TIME(x->type));
297 assert(x->type == y->type);
299 /* Enabled ones first */
300 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
302 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
305 /* Move the pending ones to the end */
306 if (!x->pending && y->pending)
308 if (x->pending && !y->pending)
312 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
314 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
317 /* Stability for the rest */
326 static int exit_prioq_compare(const void *a, const void *b) {
327 const sd_event_source *x = a, *y = b;
329 assert(x->type == SOURCE_EXIT);
330 assert(y->type == SOURCE_EXIT);
332 /* Enabled ones first */
333 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
335 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
338 /* Lower priority values first */
339 if (x->priority < y->priority)
341 if (x->priority > y->priority)
344 /* Stability for the rest */
353 static void free_clock_data(struct clock_data *d) {
357 prioq_free(d->earliest);
358 prioq_free(d->latest);
361 static void event_free(sd_event *e) {
366 while ((s = e->sources)) {
368 source_disconnect(s);
369 sd_event_source_unref(s);
372 assert(e->n_sources == 0);
374 if (e->default_event_ptr)
375 *(e->default_event_ptr) = NULL;
377 safe_close(e->epoll_fd);
378 safe_close(e->signal_fd);
379 safe_close(e->watchdog_fd);
381 free_clock_data(&e->realtime);
382 free_clock_data(&e->boottime);
383 free_clock_data(&e->monotonic);
384 free_clock_data(&e->realtime_alarm);
385 free_clock_data(&e->boottime_alarm);
387 prioq_free(e->pending);
388 prioq_free(e->prepare);
391 free(e->signal_sources);
393 hashmap_free(e->child_sources);
394 set_free(e->post_sources);
398 _public_ int sd_event_new(sd_event** ret) {
402 assert_return(ret, -EINVAL);
404 e = new0(sd_event, 1);
409 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->boottime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
410 e->realtime.next = e->boottime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = (usec_t) -1;
411 e->original_pid = getpid();
412 e->perturb = (usec_t) -1;
414 assert_se(sigemptyset(&e->sigset) == 0);
416 e->pending = prioq_new(pending_prioq_compare);
422 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
423 if (e->epoll_fd < 0) {
436 _public_ sd_event* sd_event_ref(sd_event *e) {
437 assert_return(e, NULL);
439 assert(e->n_ref >= 1);
445 _public_ sd_event* sd_event_unref(sd_event *e) {
450 assert(e->n_ref >= 1);
459 static bool event_pid_changed(sd_event *e) {
462 /* We don't support people creating am event loop and keeping
463 * it around over a fork(). Let's complain. */
465 return e->original_pid != getpid();
468 static int source_io_unregister(sd_event_source *s) {
472 assert(s->type == SOURCE_IO);
474 if (!s->io.registered)
477 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
481 s->io.registered = false;
485 static int source_io_register(
490 struct epoll_event ev = {};
494 assert(s->type == SOURCE_IO);
495 assert(enabled != SD_EVENT_OFF);
500 if (enabled == SD_EVENT_ONESHOT)
501 ev.events |= EPOLLONESHOT;
503 if (s->io.registered)
504 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
506 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
511 s->io.registered = true;
516 static clockid_t event_source_type_to_clock(EventSourceType t) {
520 case SOURCE_TIME_REALTIME:
521 return CLOCK_REALTIME;
523 case SOURCE_TIME_BOOTTIME:
524 return CLOCK_BOOTTIME;
526 case SOURCE_TIME_MONOTONIC:
527 return CLOCK_MONOTONIC;
529 case SOURCE_TIME_REALTIME_ALARM:
530 return CLOCK_REALTIME_ALARM;
532 case SOURCE_TIME_BOOTTIME_ALARM:
533 return CLOCK_BOOTTIME_ALARM;
536 return (clockid_t) -1;
540 static EventSourceType clock_to_event_source_type(clockid_t clock) {
545 return SOURCE_TIME_REALTIME;
548 return SOURCE_TIME_BOOTTIME;
550 case CLOCK_MONOTONIC:
551 return SOURCE_TIME_MONOTONIC;
553 case CLOCK_REALTIME_ALARM:
554 return SOURCE_TIME_REALTIME_ALARM;
556 case CLOCK_BOOTTIME_ALARM:
557 return SOURCE_TIME_BOOTTIME_ALARM;
560 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
564 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
569 case SOURCE_TIME_REALTIME:
572 case SOURCE_TIME_BOOTTIME:
575 case SOURCE_TIME_MONOTONIC:
576 return &e->monotonic;
578 case SOURCE_TIME_REALTIME_ALARM:
579 return &e->realtime_alarm;
581 case SOURCE_TIME_BOOTTIME_ALARM:
582 return &e->boottime_alarm;
589 static void source_disconnect(sd_event_source *s) {
597 assert(s->event->n_sources > 0);
603 source_io_unregister(s);
607 case SOURCE_TIME_REALTIME:
608 case SOURCE_TIME_BOOTTIME:
609 case SOURCE_TIME_MONOTONIC:
610 case SOURCE_TIME_REALTIME_ALARM:
611 case SOURCE_TIME_BOOTTIME_ALARM: {
612 struct clock_data *d;
614 d = event_get_clock_data(s->event, s->type);
617 prioq_remove(d->earliest, s, &s->time.earliest_index);
618 prioq_remove(d->latest, s, &s->time.latest_index);
623 if (s->signal.sig > 0) {
624 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
625 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
627 if (s->event->signal_sources)
628 s->event->signal_sources[s->signal.sig] = NULL;
634 if (s->child.pid > 0) {
635 if (s->enabled != SD_EVENT_OFF) {
636 assert(s->event->n_enabled_child_sources > 0);
637 s->event->n_enabled_child_sources--;
640 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
641 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
643 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
653 set_remove(s->event->post_sources, s);
657 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
661 assert_not_reached("Wut? I shouldn't exist.");
665 prioq_remove(s->event->pending, s, &s->pending_index);
668 prioq_remove(s->event->prepare, s, &s->prepare_index);
672 s->type = _SOURCE_EVENT_SOURCE_TYPE_INVALID;
674 LIST_REMOVE(sources, event->sources, s);
678 sd_event_unref(event);
681 static void source_free(sd_event_source *s) {
684 source_disconnect(s);
688 static int source_set_pending(sd_event_source *s, bool b) {
692 assert(s->type != SOURCE_EXIT);
700 s->pending_iteration = s->event->iteration;
702 r = prioq_put(s->event->pending, s, &s->pending_index);
708 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
710 if (EVENT_SOURCE_IS_TIME(s->type)) {
711 struct clock_data *d;
713 d = event_get_clock_data(s->event, s->type);
716 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
717 prioq_reshuffle(d->latest, s, &s->time.latest_index);
723 static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
728 s = new0(sd_event_source, 1);
734 s->floating = floating;
736 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
741 LIST_PREPEND(sources, e->sources, s);
747 _public_ int sd_event_add_io(
749 sd_event_source **ret,
752 sd_event_io_handler_t callback,
758 assert_return(e, -EINVAL);
759 assert_return(fd >= 0, -EINVAL);
760 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
761 assert_return(callback, -EINVAL);
762 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
763 assert_return(!event_pid_changed(e), -ECHILD);
765 s = source_new(e, !ret, SOURCE_IO);
770 s->io.events = events;
771 s->io.callback = callback;
772 s->userdata = userdata;
773 s->enabled = SD_EVENT_ON;
775 r = source_io_register(s, s->enabled, events);
787 static void initialize_perturb(sd_event *e) {
788 sd_id128_t bootid = {};
790 /* When we sleep for longer, we try to realign the wakeup to
791 the same time wihtin each minute/second/250ms, so that
792 events all across the system can be coalesced into a single
793 CPU wakeup. However, let's take some system-specific
794 randomness for this value, so that in a network of systems
795 with synced clocks timer events are distributed a
796 bit. Here, we calculate a perturbation usec offset from the
799 if (_likely_(e->perturb != (usec_t) -1))
802 if (sd_id128_get_boot(&bootid) >= 0)
803 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
806 static int event_setup_timer_fd(
808 struct clock_data *d,
811 struct epoll_event ev = {};
817 if (_likely_(d->fd >= 0))
820 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
825 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
827 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
837 _public_ int sd_event_add_time(
839 sd_event_source **ret,
843 sd_event_time_handler_t callback,
846 EventSourceType type;
848 struct clock_data *d;
851 assert_return(e, -EINVAL);
852 assert_return(usec != (uint64_t) -1, -EINVAL);
853 assert_return(accuracy != (uint64_t) -1, -EINVAL);
854 assert_return(callback, -EINVAL);
855 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
856 assert_return(!event_pid_changed(e), -ECHILD);
858 type = clock_to_event_source_type(clock);
859 assert_return(type >= 0, -ENOTSUP);
861 d = event_get_clock_data(e, type);
865 d->earliest = prioq_new(earliest_time_prioq_compare);
871 d->latest = prioq_new(latest_time_prioq_compare);
877 r = event_setup_timer_fd(e, d, clock);
882 s = source_new(e, !ret, type);
887 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
888 s->time.callback = callback;
889 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
890 s->userdata = userdata;
891 s->enabled = SD_EVENT_ONESHOT;
893 r = prioq_put(d->earliest, s, &s->time.earliest_index);
897 r = prioq_put(d->latest, s, &s->time.latest_index);
911 static int event_update_signal_fd(sd_event *e) {
912 struct epoll_event ev = {};
918 add_to_epoll = e->signal_fd < 0;
920 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
930 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
932 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
934 e->signal_fd = safe_close(e->signal_fd);
941 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
944 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
947 _public_ int sd_event_add_signal(
949 sd_event_source **ret,
951 sd_event_signal_handler_t callback,
958 assert_return(e, -EINVAL);
959 assert_return(sig > 0, -EINVAL);
960 assert_return(sig < _NSIG, -EINVAL);
961 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
962 assert_return(!event_pid_changed(e), -ECHILD);
965 callback = signal_exit_callback;
967 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
971 if (!sigismember(&ss, sig))
974 if (!e->signal_sources) {
975 e->signal_sources = new0(sd_event_source*, _NSIG);
976 if (!e->signal_sources)
978 } else if (e->signal_sources[sig])
981 s = source_new(e, !ret, SOURCE_SIGNAL);
986 s->signal.callback = callback;
987 s->userdata = userdata;
988 s->enabled = SD_EVENT_ON;
990 e->signal_sources[sig] = s;
991 assert_se(sigaddset(&e->sigset, sig) == 0);
993 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
994 r = event_update_signal_fd(e);
1007 _public_ int sd_event_add_child(
1009 sd_event_source **ret,
1012 sd_event_child_handler_t callback,
1018 assert_return(e, -EINVAL);
1019 assert_return(pid > 1, -EINVAL);
1020 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1021 assert_return(options != 0, -EINVAL);
1022 assert_return(callback, -EINVAL);
1023 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1024 assert_return(!event_pid_changed(e), -ECHILD);
1026 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
1030 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
1033 s = source_new(e, !ret, SOURCE_CHILD);
1038 s->child.options = options;
1039 s->child.callback = callback;
1040 s->userdata = userdata;
1041 s->enabled = SD_EVENT_ONESHOT;
1043 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
1049 e->n_enabled_child_sources ++;
1051 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
1053 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
1054 r = event_update_signal_fd(e);
1061 e->need_process_child = true;
1069 _public_ int sd_event_add_defer(
1071 sd_event_source **ret,
1072 sd_event_handler_t callback,
1078 assert_return(e, -EINVAL);
1079 assert_return(callback, -EINVAL);
1080 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1081 assert_return(!event_pid_changed(e), -ECHILD);
1083 s = source_new(e, !ret, SOURCE_DEFER);
1087 s->defer.callback = callback;
1088 s->userdata = userdata;
1089 s->enabled = SD_EVENT_ONESHOT;
1091 r = source_set_pending(s, true);
1103 _public_ int sd_event_add_post(
1105 sd_event_source **ret,
1106 sd_event_handler_t callback,
1112 assert_return(e, -EINVAL);
1113 assert_return(callback, -EINVAL);
1114 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1115 assert_return(!event_pid_changed(e), -ECHILD);
1117 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
1121 s = source_new(e, !ret, SOURCE_POST);
1125 s->post.callback = callback;
1126 s->userdata = userdata;
1127 s->enabled = SD_EVENT_ON;
1129 r = set_put(e->post_sources, s);
1141 _public_ int sd_event_add_exit(
1143 sd_event_source **ret,
1144 sd_event_handler_t callback,
1150 assert_return(e, -EINVAL);
1151 assert_return(callback, -EINVAL);
1152 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1153 assert_return(!event_pid_changed(e), -ECHILD);
1156 e->exit = prioq_new(exit_prioq_compare);
1161 s = source_new(e, !ret, SOURCE_EXIT);
1165 s->exit.callback = callback;
1166 s->userdata = userdata;
1167 s->exit.prioq_index = PRIOQ_IDX_NULL;
1168 s->enabled = SD_EVENT_ONESHOT;
1170 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1182 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1183 assert_return(s, NULL);
1185 assert(s->n_ref >= 1);
1191 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1196 assert(s->n_ref >= 1);
1199 if (s->n_ref <= 0) {
1200 /* Here's a special hack: when we are called from a
1201 * dispatch handler we won't free the event source
1202 * immediately, but we will detach the fd from the
1203 * epoll. This way it is safe for the caller to unref
1204 * the event source and immediately close the fd, but
1205 * we still retain a valid event source object after
1208 if (s->dispatching) {
1209 if (s->type == SOURCE_IO)
1210 source_io_unregister(s);
1212 source_disconnect(s);
1220 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1221 assert_return(s, NULL);
1226 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1227 assert_return(s, -EINVAL);
1228 assert_return(s->type != SOURCE_EXIT, -EDOM);
1229 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1230 assert_return(!event_pid_changed(s->event), -ECHILD);
1235 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1236 assert_return(s, -EINVAL);
1237 assert_return(s->type == SOURCE_IO, -EDOM);
1238 assert_return(!event_pid_changed(s->event), -ECHILD);
1243 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1246 assert_return(s, -EINVAL);
1247 assert_return(fd >= 0, -EINVAL);
1248 assert_return(s->type == SOURCE_IO, -EDOM);
1249 assert_return(!event_pid_changed(s->event), -ECHILD);
1254 if (s->enabled == SD_EVENT_OFF) {
1256 s->io.registered = false;
1260 saved_fd = s->io.fd;
1261 assert(s->io.registered);
1264 s->io.registered = false;
1266 r = source_io_register(s, s->enabled, s->io.events);
1268 s->io.fd = saved_fd;
1269 s->io.registered = true;
1273 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1279 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1280 assert_return(s, -EINVAL);
1281 assert_return(events, -EINVAL);
1282 assert_return(s->type == SOURCE_IO, -EDOM);
1283 assert_return(!event_pid_changed(s->event), -ECHILD);
1285 *events = s->io.events;
1289 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1292 assert_return(s, -EINVAL);
1293 assert_return(s->type == SOURCE_IO, -EDOM);
1294 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1295 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1296 assert_return(!event_pid_changed(s->event), -ECHILD);
1298 /* edge-triggered updates are never skipped, so we can reset edges */
1299 if (s->io.events == events && !(events & EPOLLET))
1302 if (s->enabled != SD_EVENT_OFF) {
1303 r = source_io_register(s, s->enabled, events);
1308 s->io.events = events;
1309 source_set_pending(s, false);
1314 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1315 assert_return(s, -EINVAL);
1316 assert_return(revents, -EINVAL);
1317 assert_return(s->type == SOURCE_IO, -EDOM);
1318 assert_return(s->pending, -ENODATA);
1319 assert_return(!event_pid_changed(s->event), -ECHILD);
1321 *revents = s->io.revents;
1325 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1326 assert_return(s, -EINVAL);
1327 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1328 assert_return(!event_pid_changed(s->event), -ECHILD);
1330 return s->signal.sig;
1333 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1334 assert_return(s, -EINVAL);
1335 assert_return(!event_pid_changed(s->event), -ECHILD);
1340 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1341 assert_return(s, -EINVAL);
1342 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1343 assert_return(!event_pid_changed(s->event), -ECHILD);
1345 if (s->priority == priority)
1348 s->priority = priority;
1351 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1354 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1356 if (s->type == SOURCE_EXIT)
1357 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1362 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1363 assert_return(s, -EINVAL);
1364 assert_return(m, -EINVAL);
1365 assert_return(!event_pid_changed(s->event), -ECHILD);
1371 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1374 assert_return(s, -EINVAL);
1375 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1376 assert_return(!event_pid_changed(s->event), -ECHILD);
1378 /* If we are dead anyway, we are fine with turning off
1379 * sources, but everything else needs to fail. */
1380 if (s->event->state == SD_EVENT_FINISHED)
1381 return m == SD_EVENT_OFF ? 0 : -ESTALE;
1383 if (s->enabled == m)
1386 if (m == SD_EVENT_OFF) {
1391 r = source_io_unregister(s);
1398 case SOURCE_TIME_REALTIME:
1399 case SOURCE_TIME_BOOTTIME:
1400 case SOURCE_TIME_MONOTONIC:
1401 case SOURCE_TIME_REALTIME_ALARM:
1402 case SOURCE_TIME_BOOTTIME_ALARM: {
1403 struct clock_data *d;
1406 d = event_get_clock_data(s->event, s->type);
1409 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1410 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1416 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1417 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1418 event_update_signal_fd(s->event);
1426 assert(s->event->n_enabled_child_sources > 0);
1427 s->event->n_enabled_child_sources--;
1429 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1430 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1431 event_update_signal_fd(s->event);
1438 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1447 assert_not_reached("Wut? I shouldn't exist.");
1454 r = source_io_register(s, m, s->io.events);
1461 case SOURCE_TIME_REALTIME:
1462 case SOURCE_TIME_BOOTTIME:
1463 case SOURCE_TIME_MONOTONIC:
1464 case SOURCE_TIME_REALTIME_ALARM:
1465 case SOURCE_TIME_BOOTTIME_ALARM: {
1466 struct clock_data *d;
1469 d = event_get_clock_data(s->event, s->type);
1472 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1473 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1480 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1481 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1482 event_update_signal_fd(s->event);
1487 if (s->enabled == SD_EVENT_OFF) {
1488 s->event->n_enabled_child_sources++;
1490 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1491 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1492 event_update_signal_fd(s->event);
1501 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1510 assert_not_reached("Wut? I shouldn't exist.");
1515 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1518 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1523 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1524 assert_return(s, -EINVAL);
1525 assert_return(usec, -EINVAL);
1526 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1527 assert_return(!event_pid_changed(s->event), -ECHILD);
1529 *usec = s->time.next;
1533 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1534 struct clock_data *d;
1536 assert_return(s, -EINVAL);
1537 assert_return(usec != (uint64_t) -1, -EINVAL);
1538 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1539 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1540 assert_return(!event_pid_changed(s->event), -ECHILD);
1542 s->time.next = usec;
1544 source_set_pending(s, false);
1546 d = event_get_clock_data(s->event, s->type);
1549 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1550 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1555 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1556 assert_return(s, -EINVAL);
1557 assert_return(usec, -EINVAL);
1558 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1559 assert_return(!event_pid_changed(s->event), -ECHILD);
1561 *usec = s->time.accuracy;
1565 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1566 struct clock_data *d;
1568 assert_return(s, -EINVAL);
1569 assert_return(usec != (uint64_t) -1, -EINVAL);
1570 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1571 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1572 assert_return(!event_pid_changed(s->event), -ECHILD);
1575 usec = DEFAULT_ACCURACY_USEC;
1577 s->time.accuracy = usec;
1579 source_set_pending(s, false);
1581 d = event_get_clock_data(s->event, s->type);
1584 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1589 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1590 assert_return(s, -EINVAL);
1591 assert_return(clock, -EINVAL);
1592 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1593 assert_return(!event_pid_changed(s->event), -ECHILD);
1595 *clock = event_source_type_to_clock(s->type);
1599 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1600 assert_return(s, -EINVAL);
1601 assert_return(pid, -EINVAL);
1602 assert_return(s->type == SOURCE_CHILD, -EDOM);
1603 assert_return(!event_pid_changed(s->event), -ECHILD);
1605 *pid = s->child.pid;
1609 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1612 assert_return(s, -EINVAL);
1613 assert_return(s->type != SOURCE_EXIT, -EDOM);
1614 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1615 assert_return(!event_pid_changed(s->event), -ECHILD);
1617 if (s->prepare == callback)
1620 if (callback && s->prepare) {
1621 s->prepare = callback;
1625 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1629 s->prepare = callback;
1632 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1636 prioq_remove(s->event->prepare, s, &s->prepare_index);
1641 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1642 assert_return(s, NULL);
1647 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1650 assert_return(s, NULL);
1653 s->userdata = userdata;
1658 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1669 initialize_perturb(e);
1672 Find a good time to wake up again between times a and b. We
1673 have two goals here:
1675 a) We want to wake up as seldom as possible, hence prefer
1676 later times over earlier times.
1678 b) But if we have to wake up, then let's make sure to
1679 dispatch as much as possible on the entire system.
1681 We implement this by waking up everywhere at the same time
1682 within any given minute if we can, synchronised via the
1683 perturbation value determined from the boot ID. If we can't,
1684 then we try to find the same spot in every 10s, then 1s and
1685 then 250ms step. Otherwise, we pick the last possible time
1689 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1691 if (_unlikely_(c < USEC_PER_MINUTE))
1694 c -= USEC_PER_MINUTE;
1700 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1702 if (_unlikely_(c < USEC_PER_SEC*10))
1705 c -= USEC_PER_SEC*10;
1711 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1713 if (_unlikely_(c < USEC_PER_SEC))
1722 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1724 if (_unlikely_(c < USEC_PER_MSEC*250))
1727 c -= USEC_PER_MSEC*250;
1736 static int event_arm_timer(
1738 struct clock_data *d) {
1740 struct itimerspec its = {};
1741 sd_event_source *a, *b;
1748 a = prioq_peek(d->earliest);
1749 if (!a || a->enabled == SD_EVENT_OFF) {
1754 if (d->next == (usec_t) -1)
1758 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1762 d->next = (usec_t) -1;
1766 b = prioq_peek(d->latest);
1767 assert_se(b && b->enabled != SD_EVENT_OFF);
1769 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1773 assert_se(d->fd >= 0);
1776 /* We don' want to disarm here, just mean some time looooong ago. */
1777 its.it_value.tv_sec = 0;
1778 its.it_value.tv_nsec = 1;
1780 timespec_store(&its.it_value, t);
1782 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1790 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1793 assert(s->type == SOURCE_IO);
1795 /* If the event source was already pending, we just OR in the
1796 * new revents, otherwise we reset the value. The ORing is
1797 * necessary to handle EPOLLONESHOT events properly where
1798 * readability might happen independently of writability, and
1799 * we need to keep track of both */
1802 s->io.revents |= revents;
1804 s->io.revents = revents;
1806 return source_set_pending(s, true);
1809 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1816 assert_return(events == EPOLLIN, -EIO);
1818 ss = read(fd, &x, sizeof(x));
1820 if (errno == EAGAIN || errno == EINTR)
1826 if (_unlikely_(ss != sizeof(x)))
1830 *next = (usec_t) -1;
1835 static int process_timer(
1838 struct clock_data *d) {
1847 s = prioq_peek(d->earliest);
1850 s->enabled == SD_EVENT_OFF ||
1854 r = source_set_pending(s, true);
1858 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1859 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1865 static int process_child(sd_event *e) {
1872 e->need_process_child = false;
1875 So, this is ugly. We iteratively invoke waitid() with P_PID
1876 + WNOHANG for each PID we wait for, instead of using
1877 P_ALL. This is because we only want to get child
1878 information of very specific child processes, and not all
1879 of them. We might not have processed the SIGCHLD even of a
1880 previous invocation and we don't want to maintain a
1881 unbounded *per-child* event queue, hence we really don't
1882 want anything flushed out of the kernel's queue that we
1883 don't care about. Since this is O(n) this means that if you
1884 have a lot of processes you probably want to handle SIGCHLD
1887 We do not reap the children here (by using WNOWAIT), this
1888 is only done after the event source is dispatched so that
1889 the callback still sees the process as a zombie.
1892 HASHMAP_FOREACH(s, e->child_sources, i) {
1893 assert(s->type == SOURCE_CHILD);
1898 if (s->enabled == SD_EVENT_OFF)
1901 zero(s->child.siginfo);
1902 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1903 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1907 if (s->child.siginfo.si_pid != 0) {
1909 s->child.siginfo.si_code == CLD_EXITED ||
1910 s->child.siginfo.si_code == CLD_KILLED ||
1911 s->child.siginfo.si_code == CLD_DUMPED;
1913 if (!zombie && (s->child.options & WEXITED)) {
1914 /* If the child isn't dead then let's
1915 * immediately remove the state change
1916 * from the queue, since there's no
1917 * benefit in leaving it queued */
1919 assert(s->child.options & (WSTOPPED|WCONTINUED));
1920 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1923 r = source_set_pending(s, true);
1932 static int process_signal(sd_event *e, uint32_t events) {
1933 bool read_one = false;
1938 assert_return(events == EPOLLIN, -EIO);
1941 struct signalfd_siginfo si;
1943 sd_event_source *s = NULL;
1945 ss = read(e->signal_fd, &si, sizeof(si));
1947 if (errno == EAGAIN || errno == EINTR)
1953 if (_unlikely_(ss != sizeof(si)))
1958 if (si.ssi_signo == SIGCHLD) {
1959 r = process_child(e);
1966 if (e->signal_sources)
1967 s = e->signal_sources[si.ssi_signo];
1972 s->signal.siginfo = si;
1973 r = source_set_pending(s, true);
1979 static int source_dispatch(sd_event_source *s) {
1983 assert(s->pending || s->type == SOURCE_EXIT);
1985 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1986 r = source_set_pending(s, false);
1991 if (s->type != SOURCE_POST) {
1995 /* If we execute a non-post source, let's mark all
1996 * post sources as pending */
1998 SET_FOREACH(z, s->event->post_sources, i) {
1999 if (z->enabled == SD_EVENT_OFF)
2002 r = source_set_pending(z, true);
2008 if (s->enabled == SD_EVENT_ONESHOT) {
2009 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
2014 s->dispatching = true;
2019 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
2022 case SOURCE_TIME_REALTIME:
2023 case SOURCE_TIME_BOOTTIME:
2024 case SOURCE_TIME_MONOTONIC:
2025 case SOURCE_TIME_REALTIME_ALARM:
2026 case SOURCE_TIME_BOOTTIME_ALARM:
2027 r = s->time.callback(s, s->time.next, s->userdata);
2031 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
2034 case SOURCE_CHILD: {
2037 zombie = s->child.siginfo.si_code == CLD_EXITED ||
2038 s->child.siginfo.si_code == CLD_KILLED ||
2039 s->child.siginfo.si_code == CLD_DUMPED;
2041 r = s->child.callback(s, &s->child.siginfo, s->userdata);
2043 /* Now, reap the PID for good. */
2045 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
2051 r = s->defer.callback(s, s->userdata);
2055 r = s->post.callback(s, s->userdata);
2059 r = s->exit.callback(s, s->userdata);
2062 case SOURCE_WATCHDOG:
2063 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
2064 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
2065 assert_not_reached("Wut? I shouldn't exist.");
2068 s->dispatching = false;
2071 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
2076 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2081 static int event_prepare(sd_event *e) {
2089 s = prioq_peek(e->prepare);
2090 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2093 s->prepare_iteration = e->iteration;
2094 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2100 s->dispatching = true;
2101 r = s->prepare(s, s->userdata);
2102 s->dispatching = false;
2105 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2110 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2116 static int dispatch_exit(sd_event *e) {
2122 p = prioq_peek(e->exit);
2123 if (!p || p->enabled == SD_EVENT_OFF) {
2124 e->state = SD_EVENT_FINISHED;
2130 e->state = SD_EVENT_EXITING;
2132 r = source_dispatch(p);
2134 e->state = SD_EVENT_PASSIVE;
2140 static sd_event_source* event_next_pending(sd_event *e) {
2145 p = prioq_peek(e->pending);
2149 if (p->enabled == SD_EVENT_OFF)
2155 static int arm_watchdog(sd_event *e) {
2156 struct itimerspec its = {};
2161 assert(e->watchdog_fd >= 0);
2163 t = sleep_between(e,
2164 e->watchdog_last + (e->watchdog_period / 2),
2165 e->watchdog_last + (e->watchdog_period * 3 / 4));
2167 timespec_store(&its.it_value, t);
2169 /* Make sure we never set the watchdog to 0, which tells the
2170 * kernel to disable it. */
2171 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
2172 its.it_value.tv_nsec = 1;
2174 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2181 static int process_watchdog(sd_event *e) {
2187 /* Don't notify watchdog too often */
2188 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2191 sd_notify(false, "WATCHDOG=1");
2192 e->watchdog_last = e->timestamp.monotonic;
2194 return arm_watchdog(e);
2197 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2198 struct epoll_event *ev_queue;
2199 unsigned ev_queue_max;
2204 assert_return(e, -EINVAL);
2205 assert_return(!event_pid_changed(e), -ECHILD);
2206 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2207 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2209 if (e->exit_requested)
2210 return dispatch_exit(e);
2214 e->state = SD_EVENT_RUNNING;
2216 r = event_prepare(e);
2220 r = event_arm_timer(e, &e->realtime);
2224 r = event_arm_timer(e, &e->boottime);
2228 r = event_arm_timer(e, &e->monotonic);
2232 r = event_arm_timer(e, &e->realtime_alarm);
2236 r = event_arm_timer(e, &e->boottime_alarm);
2240 if (event_next_pending(e) || e->need_process_child)
2243 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2244 ev_queue = newa(struct epoll_event, ev_queue_max);
2246 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2247 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2249 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2255 dual_timestamp_get(&e->timestamp);
2256 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2258 for (i = 0; i < m; i++) {
2260 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2261 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2262 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME))
2263 r = flush_timer(e, e->boottime.fd, ev_queue[i].events, &e->boottime.next);
2264 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2265 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2266 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2267 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2268 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2269 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2270 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2271 r = process_signal(e, ev_queue[i].events);
2272 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2273 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2275 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2281 r = process_watchdog(e);
2285 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2289 r = process_timer(e, e->timestamp_boottime, &e->boottime);
2293 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2297 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2301 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2305 if (e->need_process_child) {
2306 r = process_child(e);
2311 p = event_next_pending(e);
2317 r = source_dispatch(p);
2320 e->state = SD_EVENT_PASSIVE;
2326 _public_ int sd_event_loop(sd_event *e) {
2329 assert_return(e, -EINVAL);
2330 assert_return(!event_pid_changed(e), -ECHILD);
2331 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2335 while (e->state != SD_EVENT_FINISHED) {
2336 r = sd_event_run(e, (uint64_t) -1);
2348 _public_ int sd_event_get_state(sd_event *e) {
2349 assert_return(e, -EINVAL);
2350 assert_return(!event_pid_changed(e), -ECHILD);
2355 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2356 assert_return(e, -EINVAL);
2357 assert_return(code, -EINVAL);
2358 assert_return(!event_pid_changed(e), -ECHILD);
2360 if (!e->exit_requested)
2363 *code = e->exit_code;
2367 _public_ int sd_event_exit(sd_event *e, int code) {
2368 assert_return(e, -EINVAL);
2369 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2370 assert_return(!event_pid_changed(e), -ECHILD);
2372 e->exit_requested = true;
2373 e->exit_code = code;
2378 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2379 assert_return(e, -EINVAL);
2380 assert_return(usec, -EINVAL);
2381 assert_return(!event_pid_changed(e), -ECHILD);
2383 /* If we haven't run yet, just get the actual time */
2384 if (!dual_timestamp_is_set(&e->timestamp))
2389 case CLOCK_REALTIME:
2390 case CLOCK_REALTIME_ALARM:
2391 *usec = e->timestamp.realtime;
2394 case CLOCK_MONOTONIC:
2395 *usec = e->timestamp.monotonic;
2398 case CLOCK_BOOTTIME:
2399 case CLOCK_BOOTTIME_ALARM:
2400 *usec = e->timestamp_boottime;
2407 _public_ int sd_event_default(sd_event **ret) {
2409 static thread_local sd_event *default_event = NULL;
2414 return !!default_event;
2416 if (default_event) {
2417 *ret = sd_event_ref(default_event);
2421 r = sd_event_new(&e);
2425 e->default_event_ptr = &default_event;
2433 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2434 assert_return(e, -EINVAL);
2435 assert_return(tid, -EINVAL);
2436 assert_return(!event_pid_changed(e), -ECHILD);
2446 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2449 assert_return(e, -EINVAL);
2450 assert_return(!event_pid_changed(e), -ECHILD);
2452 if (e->watchdog == !!b)
2456 struct epoll_event ev = {};
2458 r = sd_watchdog_enabled(false, &e->watchdog_period);
2462 /* Issue first ping immediately */
2463 sd_notify(false, "WATCHDOG=1");
2464 e->watchdog_last = now(CLOCK_MONOTONIC);
2466 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2467 if (e->watchdog_fd < 0)
2470 r = arm_watchdog(e);
2474 ev.events = EPOLLIN;
2475 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2477 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2484 if (e->watchdog_fd >= 0) {
2485 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2486 e->watchdog_fd = safe_close(e->watchdog_fd);
2494 e->watchdog_fd = safe_close(e->watchdog_fd);
2498 _public_ int sd_event_get_watchdog(sd_event *e) {
2499 assert_return(e, -EINVAL);
2500 assert_return(!event_pid_changed(e), -ECHILD);