1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
40 #define EPOLL_QUEUE_MAX 512U
41 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
43 typedef enum EventSourceType {
47 SOURCE_TIME_MONOTONIC,
48 SOURCE_TIME_REALTIME_ALARM,
49 SOURCE_TIME_BOOTTIME_ALARM,
56 _SOURCE_EVENT_SOURCE_TYPE_MAX,
57 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
60 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
62 struct sd_event_source {
67 sd_event_handler_t prepare;
69 EventSourceType type:5;
76 unsigned pending_index;
77 unsigned prepare_index;
78 unsigned pending_iteration;
79 unsigned prepare_iteration;
81 LIST_FIELDS(sd_event_source, sources);
85 sd_event_io_handler_t callback;
92 sd_event_time_handler_t callback;
93 usec_t next, accuracy;
94 unsigned earliest_index;
95 unsigned latest_index;
98 sd_event_signal_handler_t callback;
99 struct signalfd_siginfo siginfo;
103 sd_event_child_handler_t callback;
109 sd_event_handler_t callback;
112 sd_event_handler_t callback;
115 sd_event_handler_t callback;
116 unsigned prioq_index;
124 /* For all clocks we maintain two priority queues each, one
125 * ordered for the earliest times the events may be
126 * dispatched, and one ordered by the latest times they must
127 * have been dispatched. The range between the top entries in
128 * the two prioqs is the time window we can freely schedule
148 /* timerfd_create() only supports these five clocks so far. We
149 * can add support for more clocks when the kernel learns to
150 * deal with them, too. */
151 struct clock_data realtime;
152 struct clock_data boottime;
153 struct clock_data monotonic;
154 struct clock_data realtime_alarm;
155 struct clock_data boottime_alarm;
160 sd_event_source **signal_sources;
162 Hashmap *child_sources;
163 unsigned n_enabled_child_sources;
172 dual_timestamp timestamp;
173 usec_t timestamp_boottime;
176 bool exit_requested:1;
177 bool need_process_child:1;
183 sd_event **default_event_ptr;
185 usec_t watchdog_last, watchdog_period;
189 LIST_HEAD(sd_event_source, sources);
192 static void source_disconnect(sd_event_source *s);
194 static int pending_prioq_compare(const void *a, const void *b) {
195 const sd_event_source *x = a, *y = b;
200 /* Enabled ones first */
201 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
203 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
206 /* Lower priority values first */
207 if (x->priority < y->priority)
209 if (x->priority > y->priority)
212 /* Older entries first */
213 if (x->pending_iteration < y->pending_iteration)
215 if (x->pending_iteration > y->pending_iteration)
218 /* Stability for the rest */
227 static int prepare_prioq_compare(const void *a, const void *b) {
228 const sd_event_source *x = a, *y = b;
233 /* Move most recently prepared ones last, so that we can stop
234 * preparing as soon as we hit one that has already been
235 * prepared in the current iteration */
236 if (x->prepare_iteration < y->prepare_iteration)
238 if (x->prepare_iteration > y->prepare_iteration)
241 /* Enabled ones first */
242 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
244 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
247 /* Lower priority values first */
248 if (x->priority < y->priority)
250 if (x->priority > y->priority)
253 /* Stability for the rest */
262 static int earliest_time_prioq_compare(const void *a, const void *b) {
263 const sd_event_source *x = a, *y = b;
265 assert(EVENT_SOURCE_IS_TIME(x->type));
266 assert(x->type == y->type);
268 /* Enabled ones first */
269 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
271 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
274 /* Move the pending ones to the end */
275 if (!x->pending && y->pending)
277 if (x->pending && !y->pending)
281 if (x->time.next < y->time.next)
283 if (x->time.next > y->time.next)
286 /* Stability for the rest */
295 static int latest_time_prioq_compare(const void *a, const void *b) {
296 const sd_event_source *x = a, *y = b;
298 assert(EVENT_SOURCE_IS_TIME(x->type));
299 assert(x->type == y->type);
301 /* Enabled ones first */
302 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
304 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
307 /* Move the pending ones to the end */
308 if (!x->pending && y->pending)
310 if (x->pending && !y->pending)
314 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
316 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
319 /* Stability for the rest */
328 static int exit_prioq_compare(const void *a, const void *b) {
329 const sd_event_source *x = a, *y = b;
331 assert(x->type == SOURCE_EXIT);
332 assert(y->type == SOURCE_EXIT);
334 /* Enabled ones first */
335 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
337 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
340 /* Lower priority values first */
341 if (x->priority < y->priority)
343 if (x->priority > y->priority)
346 /* Stability for the rest */
355 static void free_clock_data(struct clock_data *d) {
359 prioq_free(d->earliest);
360 prioq_free(d->latest);
363 static void event_free(sd_event *e) {
368 while ((s = e->sources)) {
370 source_disconnect(s);
371 sd_event_source_unref(s);
374 assert(e->n_sources == 0);
376 if (e->default_event_ptr)
377 *(e->default_event_ptr) = NULL;
379 safe_close(e->epoll_fd);
380 safe_close(e->signal_fd);
381 safe_close(e->watchdog_fd);
383 free_clock_data(&e->realtime);
384 free_clock_data(&e->boottime);
385 free_clock_data(&e->monotonic);
386 free_clock_data(&e->realtime_alarm);
387 free_clock_data(&e->boottime_alarm);
389 prioq_free(e->pending);
390 prioq_free(e->prepare);
393 free(e->signal_sources);
395 hashmap_free(e->child_sources);
396 set_free(e->post_sources);
400 _public_ int sd_event_new(sd_event** ret) {
404 assert_return(ret, -EINVAL);
406 e = new0(sd_event, 1);
411 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->boottime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
412 e->realtime.next = e->boottime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = USEC_INFINITY;
413 e->original_pid = getpid();
414 e->perturb = USEC_INFINITY;
416 assert_se(sigemptyset(&e->sigset) == 0);
418 e->pending = prioq_new(pending_prioq_compare);
424 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
425 if (e->epoll_fd < 0) {
438 _public_ sd_event* sd_event_ref(sd_event *e) {
439 assert_return(e, NULL);
441 assert(e->n_ref >= 1);
447 _public_ sd_event* sd_event_unref(sd_event *e) {
452 assert(e->n_ref >= 1);
461 static bool event_pid_changed(sd_event *e) {
464 /* We don't support people creating am event loop and keeping
465 * it around over a fork(). Let's complain. */
467 return e->original_pid != getpid();
470 static int source_io_unregister(sd_event_source *s) {
474 assert(s->type == SOURCE_IO);
476 if (!s->io.registered)
479 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
483 s->io.registered = false;
487 static int source_io_register(
492 struct epoll_event ev = {};
496 assert(s->type == SOURCE_IO);
497 assert(enabled != SD_EVENT_OFF);
502 if (enabled == SD_EVENT_ONESHOT)
503 ev.events |= EPOLLONESHOT;
505 if (s->io.registered)
506 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
508 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
513 s->io.registered = true;
518 static clockid_t event_source_type_to_clock(EventSourceType t) {
522 case SOURCE_TIME_REALTIME:
523 return CLOCK_REALTIME;
525 case SOURCE_TIME_BOOTTIME:
526 return CLOCK_BOOTTIME;
528 case SOURCE_TIME_MONOTONIC:
529 return CLOCK_MONOTONIC;
531 case SOURCE_TIME_REALTIME_ALARM:
532 return CLOCK_REALTIME_ALARM;
534 case SOURCE_TIME_BOOTTIME_ALARM:
535 return CLOCK_BOOTTIME_ALARM;
538 return (clockid_t) -1;
542 static EventSourceType clock_to_event_source_type(clockid_t clock) {
547 return SOURCE_TIME_REALTIME;
550 return SOURCE_TIME_BOOTTIME;
552 case CLOCK_MONOTONIC:
553 return SOURCE_TIME_MONOTONIC;
555 case CLOCK_REALTIME_ALARM:
556 return SOURCE_TIME_REALTIME_ALARM;
558 case CLOCK_BOOTTIME_ALARM:
559 return SOURCE_TIME_BOOTTIME_ALARM;
562 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
566 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
571 case SOURCE_TIME_REALTIME:
574 case SOURCE_TIME_BOOTTIME:
577 case SOURCE_TIME_MONOTONIC:
578 return &e->monotonic;
580 case SOURCE_TIME_REALTIME_ALARM:
581 return &e->realtime_alarm;
583 case SOURCE_TIME_BOOTTIME_ALARM:
584 return &e->boottime_alarm;
591 static void source_disconnect(sd_event_source *s) {
599 assert(s->event->n_sources > 0);
605 source_io_unregister(s);
609 case SOURCE_TIME_REALTIME:
610 case SOURCE_TIME_BOOTTIME:
611 case SOURCE_TIME_MONOTONIC:
612 case SOURCE_TIME_REALTIME_ALARM:
613 case SOURCE_TIME_BOOTTIME_ALARM: {
614 struct clock_data *d;
616 d = event_get_clock_data(s->event, s->type);
619 prioq_remove(d->earliest, s, &s->time.earliest_index);
620 prioq_remove(d->latest, s, &s->time.latest_index);
621 d->needs_rearm = true;
626 if (s->signal.sig > 0) {
627 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
628 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
630 if (s->event->signal_sources)
631 s->event->signal_sources[s->signal.sig] = NULL;
637 if (s->child.pid > 0) {
638 if (s->enabled != SD_EVENT_OFF) {
639 assert(s->event->n_enabled_child_sources > 0);
640 s->event->n_enabled_child_sources--;
643 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
644 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
646 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
656 set_remove(s->event->post_sources, s);
660 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
664 assert_not_reached("Wut? I shouldn't exist.");
668 prioq_remove(s->event->pending, s, &s->pending_index);
671 prioq_remove(s->event->prepare, s, &s->prepare_index);
675 s->type = _SOURCE_EVENT_SOURCE_TYPE_INVALID;
677 LIST_REMOVE(sources, event->sources, s);
681 sd_event_unref(event);
684 static void source_free(sd_event_source *s) {
687 source_disconnect(s);
691 static int source_set_pending(sd_event_source *s, bool b) {
695 assert(s->type != SOURCE_EXIT);
703 s->pending_iteration = s->event->iteration;
705 r = prioq_put(s->event->pending, s, &s->pending_index);
711 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
713 if (EVENT_SOURCE_IS_TIME(s->type)) {
714 struct clock_data *d;
716 d = event_get_clock_data(s->event, s->type);
719 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
720 prioq_reshuffle(d->latest, s, &s->time.latest_index);
721 d->needs_rearm = true;
727 static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
732 s = new0(sd_event_source, 1);
738 s->floating = floating;
740 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
745 LIST_PREPEND(sources, e->sources, s);
751 _public_ int sd_event_add_io(
753 sd_event_source **ret,
756 sd_event_io_handler_t callback,
762 assert_return(e, -EINVAL);
763 assert_return(fd >= 0, -EINVAL);
764 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
765 assert_return(callback, -EINVAL);
766 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
767 assert_return(!event_pid_changed(e), -ECHILD);
769 s = source_new(e, !ret, SOURCE_IO);
774 s->io.events = events;
775 s->io.callback = callback;
776 s->userdata = userdata;
777 s->enabled = SD_EVENT_ON;
779 r = source_io_register(s, s->enabled, events);
791 static void initialize_perturb(sd_event *e) {
792 sd_id128_t bootid = {};
794 /* When we sleep for longer, we try to realign the wakeup to
795 the same time wihtin each minute/second/250ms, so that
796 events all across the system can be coalesced into a single
797 CPU wakeup. However, let's take some system-specific
798 randomness for this value, so that in a network of systems
799 with synced clocks timer events are distributed a
800 bit. Here, we calculate a perturbation usec offset from the
803 if (_likely_(e->perturb != USEC_INFINITY))
806 if (sd_id128_get_boot(&bootid) >= 0)
807 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
810 static int event_setup_timer_fd(
812 struct clock_data *d,
815 struct epoll_event ev = {};
821 if (_likely_(d->fd >= 0))
824 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
829 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
831 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
841 _public_ int sd_event_add_time(
843 sd_event_source **ret,
847 sd_event_time_handler_t callback,
850 EventSourceType type;
852 struct clock_data *d;
855 assert_return(e, -EINVAL);
856 assert_return(usec != (uint64_t) -1, -EINVAL);
857 assert_return(accuracy != (uint64_t) -1, -EINVAL);
858 assert_return(callback, -EINVAL);
859 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
860 assert_return(!event_pid_changed(e), -ECHILD);
862 type = clock_to_event_source_type(clock);
863 assert_return(type >= 0, -ENOTSUP);
865 d = event_get_clock_data(e, type);
869 d->earliest = prioq_new(earliest_time_prioq_compare);
875 d->latest = prioq_new(latest_time_prioq_compare);
881 r = event_setup_timer_fd(e, d, clock);
886 s = source_new(e, !ret, type);
891 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
892 s->time.callback = callback;
893 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
894 s->userdata = userdata;
895 s->enabled = SD_EVENT_ONESHOT;
897 r = prioq_put(d->earliest, s, &s->time.earliest_index);
901 r = prioq_put(d->latest, s, &s->time.latest_index);
905 d->needs_rearm = true;
917 static int event_update_signal_fd(sd_event *e) {
918 struct epoll_event ev = {};
924 add_to_epoll = e->signal_fd < 0;
926 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
936 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
938 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
940 e->signal_fd = safe_close(e->signal_fd);
947 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
950 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
953 _public_ int sd_event_add_signal(
955 sd_event_source **ret,
957 sd_event_signal_handler_t callback,
964 assert_return(e, -EINVAL);
965 assert_return(sig > 0, -EINVAL);
966 assert_return(sig < _NSIG, -EINVAL);
967 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
968 assert_return(!event_pid_changed(e), -ECHILD);
971 callback = signal_exit_callback;
973 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
977 if (!sigismember(&ss, sig))
980 if (!e->signal_sources) {
981 e->signal_sources = new0(sd_event_source*, _NSIG);
982 if (!e->signal_sources)
984 } else if (e->signal_sources[sig])
987 s = source_new(e, !ret, SOURCE_SIGNAL);
992 s->signal.callback = callback;
993 s->userdata = userdata;
994 s->enabled = SD_EVENT_ON;
996 e->signal_sources[sig] = s;
997 assert_se(sigaddset(&e->sigset, sig) == 0);
999 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
1000 r = event_update_signal_fd(e);
1013 _public_ int sd_event_add_child(
1015 sd_event_source **ret,
1018 sd_event_child_handler_t callback,
1024 assert_return(e, -EINVAL);
1025 assert_return(pid > 1, -EINVAL);
1026 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
1027 assert_return(options != 0, -EINVAL);
1028 assert_return(callback, -EINVAL);
1029 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1030 assert_return(!event_pid_changed(e), -ECHILD);
1032 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
1036 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
1039 s = source_new(e, !ret, SOURCE_CHILD);
1044 s->child.options = options;
1045 s->child.callback = callback;
1046 s->userdata = userdata;
1047 s->enabled = SD_EVENT_ONESHOT;
1049 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
1055 e->n_enabled_child_sources ++;
1057 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
1059 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
1060 r = event_update_signal_fd(e);
1067 e->need_process_child = true;
1075 _public_ int sd_event_add_defer(
1077 sd_event_source **ret,
1078 sd_event_handler_t callback,
1084 assert_return(e, -EINVAL);
1085 assert_return(callback, -EINVAL);
1086 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1087 assert_return(!event_pid_changed(e), -ECHILD);
1089 s = source_new(e, !ret, SOURCE_DEFER);
1093 s->defer.callback = callback;
1094 s->userdata = userdata;
1095 s->enabled = SD_EVENT_ONESHOT;
1097 r = source_set_pending(s, true);
1109 _public_ int sd_event_add_post(
1111 sd_event_source **ret,
1112 sd_event_handler_t callback,
1118 assert_return(e, -EINVAL);
1119 assert_return(callback, -EINVAL);
1120 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1121 assert_return(!event_pid_changed(e), -ECHILD);
1123 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
1127 s = source_new(e, !ret, SOURCE_POST);
1131 s->post.callback = callback;
1132 s->userdata = userdata;
1133 s->enabled = SD_EVENT_ON;
1135 r = set_put(e->post_sources, s);
1147 _public_ int sd_event_add_exit(
1149 sd_event_source **ret,
1150 sd_event_handler_t callback,
1156 assert_return(e, -EINVAL);
1157 assert_return(callback, -EINVAL);
1158 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1159 assert_return(!event_pid_changed(e), -ECHILD);
1162 e->exit = prioq_new(exit_prioq_compare);
1167 s = source_new(e, !ret, SOURCE_EXIT);
1171 s->exit.callback = callback;
1172 s->userdata = userdata;
1173 s->exit.prioq_index = PRIOQ_IDX_NULL;
1174 s->enabled = SD_EVENT_ONESHOT;
1176 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1188 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1189 assert_return(s, NULL);
1191 assert(s->n_ref >= 1);
1197 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1202 assert(s->n_ref >= 1);
1205 if (s->n_ref <= 0) {
1206 /* Here's a special hack: when we are called from a
1207 * dispatch handler we won't free the event source
1208 * immediately, but we will detach the fd from the
1209 * epoll. This way it is safe for the caller to unref
1210 * the event source and immediately close the fd, but
1211 * we still retain a valid event source object after
1214 if (s->dispatching) {
1215 if (s->type == SOURCE_IO)
1216 source_io_unregister(s);
1218 source_disconnect(s);
1226 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1227 assert_return(s, NULL);
1232 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1233 assert_return(s, -EINVAL);
1234 assert_return(s->type != SOURCE_EXIT, -EDOM);
1235 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1236 assert_return(!event_pid_changed(s->event), -ECHILD);
1241 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1242 assert_return(s, -EINVAL);
1243 assert_return(s->type == SOURCE_IO, -EDOM);
1244 assert_return(!event_pid_changed(s->event), -ECHILD);
1249 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1252 assert_return(s, -EINVAL);
1253 assert_return(fd >= 0, -EINVAL);
1254 assert_return(s->type == SOURCE_IO, -EDOM);
1255 assert_return(!event_pid_changed(s->event), -ECHILD);
1260 if (s->enabled == SD_EVENT_OFF) {
1262 s->io.registered = false;
1266 saved_fd = s->io.fd;
1267 assert(s->io.registered);
1270 s->io.registered = false;
1272 r = source_io_register(s, s->enabled, s->io.events);
1274 s->io.fd = saved_fd;
1275 s->io.registered = true;
1279 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1285 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1286 assert_return(s, -EINVAL);
1287 assert_return(events, -EINVAL);
1288 assert_return(s->type == SOURCE_IO, -EDOM);
1289 assert_return(!event_pid_changed(s->event), -ECHILD);
1291 *events = s->io.events;
1295 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1298 assert_return(s, -EINVAL);
1299 assert_return(s->type == SOURCE_IO, -EDOM);
1300 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1301 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1302 assert_return(!event_pid_changed(s->event), -ECHILD);
1304 /* edge-triggered updates are never skipped, so we can reset edges */
1305 if (s->io.events == events && !(events & EPOLLET))
1308 if (s->enabled != SD_EVENT_OFF) {
1309 r = source_io_register(s, s->enabled, events);
1314 s->io.events = events;
1315 source_set_pending(s, false);
1320 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1321 assert_return(s, -EINVAL);
1322 assert_return(revents, -EINVAL);
1323 assert_return(s->type == SOURCE_IO, -EDOM);
1324 assert_return(s->pending, -ENODATA);
1325 assert_return(!event_pid_changed(s->event), -ECHILD);
1327 *revents = s->io.revents;
1331 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1332 assert_return(s, -EINVAL);
1333 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1334 assert_return(!event_pid_changed(s->event), -ECHILD);
1336 return s->signal.sig;
1339 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1340 assert_return(s, -EINVAL);
1341 assert_return(!event_pid_changed(s->event), -ECHILD);
1346 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1347 assert_return(s, -EINVAL);
1348 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1349 assert_return(!event_pid_changed(s->event), -ECHILD);
1351 if (s->priority == priority)
1354 s->priority = priority;
1357 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1360 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1362 if (s->type == SOURCE_EXIT)
1363 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1368 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1369 assert_return(s, -EINVAL);
1370 assert_return(m, -EINVAL);
1371 assert_return(!event_pid_changed(s->event), -ECHILD);
1377 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1380 assert_return(s, -EINVAL);
1381 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1382 assert_return(!event_pid_changed(s->event), -ECHILD);
1384 /* If we are dead anyway, we are fine with turning off
1385 * sources, but everything else needs to fail. */
1386 if (s->event->state == SD_EVENT_FINISHED)
1387 return m == SD_EVENT_OFF ? 0 : -ESTALE;
1389 if (s->enabled == m)
1392 if (m == SD_EVENT_OFF) {
1397 r = source_io_unregister(s);
1404 case SOURCE_TIME_REALTIME:
1405 case SOURCE_TIME_BOOTTIME:
1406 case SOURCE_TIME_MONOTONIC:
1407 case SOURCE_TIME_REALTIME_ALARM:
1408 case SOURCE_TIME_BOOTTIME_ALARM: {
1409 struct clock_data *d;
1412 d = event_get_clock_data(s->event, s->type);
1415 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1416 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1417 d->needs_rearm = true;
1423 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1424 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1425 event_update_signal_fd(s->event);
1433 assert(s->event->n_enabled_child_sources > 0);
1434 s->event->n_enabled_child_sources--;
1436 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1437 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1438 event_update_signal_fd(s->event);
1445 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1454 assert_not_reached("Wut? I shouldn't exist.");
1461 r = source_io_register(s, m, s->io.events);
1468 case SOURCE_TIME_REALTIME:
1469 case SOURCE_TIME_BOOTTIME:
1470 case SOURCE_TIME_MONOTONIC:
1471 case SOURCE_TIME_REALTIME_ALARM:
1472 case SOURCE_TIME_BOOTTIME_ALARM: {
1473 struct clock_data *d;
1476 d = event_get_clock_data(s->event, s->type);
1479 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1480 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1481 d->needs_rearm = true;
1488 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1489 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1490 event_update_signal_fd(s->event);
1495 if (s->enabled == SD_EVENT_OFF) {
1496 s->event->n_enabled_child_sources++;
1498 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1499 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1500 event_update_signal_fd(s->event);
1509 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1518 assert_not_reached("Wut? I shouldn't exist.");
1523 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1526 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1531 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1532 assert_return(s, -EINVAL);
1533 assert_return(usec, -EINVAL);
1534 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1535 assert_return(!event_pid_changed(s->event), -ECHILD);
1537 *usec = s->time.next;
1541 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1542 struct clock_data *d;
1544 assert_return(s, -EINVAL);
1545 assert_return(usec != (uint64_t) -1, -EINVAL);
1546 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1547 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1548 assert_return(!event_pid_changed(s->event), -ECHILD);
1550 s->time.next = usec;
1552 source_set_pending(s, false);
1554 d = event_get_clock_data(s->event, s->type);
1557 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1558 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1559 d->needs_rearm = true;
1564 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1565 assert_return(s, -EINVAL);
1566 assert_return(usec, -EINVAL);
1567 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1568 assert_return(!event_pid_changed(s->event), -ECHILD);
1570 *usec = s->time.accuracy;
1574 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1575 struct clock_data *d;
1577 assert_return(s, -EINVAL);
1578 assert_return(usec != (uint64_t) -1, -EINVAL);
1579 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1580 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1581 assert_return(!event_pid_changed(s->event), -ECHILD);
1584 usec = DEFAULT_ACCURACY_USEC;
1586 s->time.accuracy = usec;
1588 source_set_pending(s, false);
1590 d = event_get_clock_data(s->event, s->type);
1593 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1594 d->needs_rearm = true;
1599 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1600 assert_return(s, -EINVAL);
1601 assert_return(clock, -EINVAL);
1602 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1603 assert_return(!event_pid_changed(s->event), -ECHILD);
1605 *clock = event_source_type_to_clock(s->type);
1609 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1610 assert_return(s, -EINVAL);
1611 assert_return(pid, -EINVAL);
1612 assert_return(s->type == SOURCE_CHILD, -EDOM);
1613 assert_return(!event_pid_changed(s->event), -ECHILD);
1615 *pid = s->child.pid;
1619 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1622 assert_return(s, -EINVAL);
1623 assert_return(s->type != SOURCE_EXIT, -EDOM);
1624 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1625 assert_return(!event_pid_changed(s->event), -ECHILD);
1627 if (s->prepare == callback)
1630 if (callback && s->prepare) {
1631 s->prepare = callback;
1635 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1639 s->prepare = callback;
1642 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1646 prioq_remove(s->event->prepare, s, &s->prepare_index);
1651 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1652 assert_return(s, NULL);
1657 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1660 assert_return(s, NULL);
1663 s->userdata = userdata;
1668 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1679 initialize_perturb(e);
1682 Find a good time to wake up again between times a and b. We
1683 have two goals here:
1685 a) We want to wake up as seldom as possible, hence prefer
1686 later times over earlier times.
1688 b) But if we have to wake up, then let's make sure to
1689 dispatch as much as possible on the entire system.
1691 We implement this by waking up everywhere at the same time
1692 within any given minute if we can, synchronised via the
1693 perturbation value determined from the boot ID. If we can't,
1694 then we try to find the same spot in every 10s, then 1s and
1695 then 250ms step. Otherwise, we pick the last possible time
1699 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1701 if (_unlikely_(c < USEC_PER_MINUTE))
1704 c -= USEC_PER_MINUTE;
1710 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1712 if (_unlikely_(c < USEC_PER_SEC*10))
1715 c -= USEC_PER_SEC*10;
1721 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1723 if (_unlikely_(c < USEC_PER_SEC))
1732 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1734 if (_unlikely_(c < USEC_PER_MSEC*250))
1737 c -= USEC_PER_MSEC*250;
1746 static int event_arm_timer(
1748 struct clock_data *d) {
1750 struct itimerspec its = {};
1751 sd_event_source *a, *b;
1758 if (!d->needs_rearm)
1761 d->needs_rearm = false;
1763 a = prioq_peek(d->earliest);
1764 if (!a || a->enabled == SD_EVENT_OFF) {
1769 if (d->next == USEC_INFINITY)
1773 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1777 d->next = USEC_INFINITY;
1781 b = prioq_peek(d->latest);
1782 assert_se(b && b->enabled != SD_EVENT_OFF);
1784 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1788 assert_se(d->fd >= 0);
1791 /* We don' want to disarm here, just mean some time looooong ago. */
1792 its.it_value.tv_sec = 0;
1793 its.it_value.tv_nsec = 1;
1795 timespec_store(&its.it_value, t);
1797 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1805 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1808 assert(s->type == SOURCE_IO);
1810 /* If the event source was already pending, we just OR in the
1811 * new revents, otherwise we reset the value. The ORing is
1812 * necessary to handle EPOLLONESHOT events properly where
1813 * readability might happen independently of writability, and
1814 * we need to keep track of both */
1817 s->io.revents |= revents;
1819 s->io.revents = revents;
1821 return source_set_pending(s, true);
1824 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1831 assert_return(events == EPOLLIN, -EIO);
1833 ss = read(fd, &x, sizeof(x));
1835 if (errno == EAGAIN || errno == EINTR)
1841 if (_unlikely_(ss != sizeof(x)))
1845 *next = USEC_INFINITY;
1850 static int process_timer(
1853 struct clock_data *d) {
1862 s = prioq_peek(d->earliest);
1865 s->enabled == SD_EVENT_OFF ||
1869 r = source_set_pending(s, true);
1873 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1874 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1880 static int process_child(sd_event *e) {
1887 e->need_process_child = false;
1890 So, this is ugly. We iteratively invoke waitid() with P_PID
1891 + WNOHANG for each PID we wait for, instead of using
1892 P_ALL. This is because we only want to get child
1893 information of very specific child processes, and not all
1894 of them. We might not have processed the SIGCHLD even of a
1895 previous invocation and we don't want to maintain a
1896 unbounded *per-child* event queue, hence we really don't
1897 want anything flushed out of the kernel's queue that we
1898 don't care about. Since this is O(n) this means that if you
1899 have a lot of processes you probably want to handle SIGCHLD
1902 We do not reap the children here (by using WNOWAIT), this
1903 is only done after the event source is dispatched so that
1904 the callback still sees the process as a zombie.
1907 HASHMAP_FOREACH(s, e->child_sources, i) {
1908 assert(s->type == SOURCE_CHILD);
1913 if (s->enabled == SD_EVENT_OFF)
1916 zero(s->child.siginfo);
1917 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1918 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1922 if (s->child.siginfo.si_pid != 0) {
1924 s->child.siginfo.si_code == CLD_EXITED ||
1925 s->child.siginfo.si_code == CLD_KILLED ||
1926 s->child.siginfo.si_code == CLD_DUMPED;
1928 if (!zombie && (s->child.options & WEXITED)) {
1929 /* If the child isn't dead then let's
1930 * immediately remove the state change
1931 * from the queue, since there's no
1932 * benefit in leaving it queued */
1934 assert(s->child.options & (WSTOPPED|WCONTINUED));
1935 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1938 r = source_set_pending(s, true);
1947 static int process_signal(sd_event *e, uint32_t events) {
1948 bool read_one = false;
1953 assert_return(events == EPOLLIN, -EIO);
1956 struct signalfd_siginfo si;
1958 sd_event_source *s = NULL;
1960 ss = read(e->signal_fd, &si, sizeof(si));
1962 if (errno == EAGAIN || errno == EINTR)
1968 if (_unlikely_(ss != sizeof(si)))
1973 if (si.ssi_signo == SIGCHLD) {
1974 r = process_child(e);
1981 if (e->signal_sources)
1982 s = e->signal_sources[si.ssi_signo];
1987 s->signal.siginfo = si;
1988 r = source_set_pending(s, true);
1994 static int source_dispatch(sd_event_source *s) {
1998 assert(s->pending || s->type == SOURCE_EXIT);
2000 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
2001 r = source_set_pending(s, false);
2006 if (s->type != SOURCE_POST) {
2010 /* If we execute a non-post source, let's mark all
2011 * post sources as pending */
2013 SET_FOREACH(z, s->event->post_sources, i) {
2014 if (z->enabled == SD_EVENT_OFF)
2017 r = source_set_pending(z, true);
2023 if (s->enabled == SD_EVENT_ONESHOT) {
2024 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
2029 s->dispatching = true;
2034 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
2037 case SOURCE_TIME_REALTIME:
2038 case SOURCE_TIME_BOOTTIME:
2039 case SOURCE_TIME_MONOTONIC:
2040 case SOURCE_TIME_REALTIME_ALARM:
2041 case SOURCE_TIME_BOOTTIME_ALARM:
2042 r = s->time.callback(s, s->time.next, s->userdata);
2046 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
2049 case SOURCE_CHILD: {
2052 zombie = s->child.siginfo.si_code == CLD_EXITED ||
2053 s->child.siginfo.si_code == CLD_KILLED ||
2054 s->child.siginfo.si_code == CLD_DUMPED;
2056 r = s->child.callback(s, &s->child.siginfo, s->userdata);
2058 /* Now, reap the PID for good. */
2060 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
2066 r = s->defer.callback(s, s->userdata);
2070 r = s->post.callback(s, s->userdata);
2074 r = s->exit.callback(s, s->userdata);
2077 case SOURCE_WATCHDOG:
2078 case _SOURCE_EVENT_SOURCE_TYPE_MAX:
2079 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
2080 assert_not_reached("Wut? I shouldn't exist.");
2083 s->dispatching = false;
2086 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
2091 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2096 static int event_prepare(sd_event *e) {
2104 s = prioq_peek(e->prepare);
2105 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2108 s->prepare_iteration = e->iteration;
2109 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2115 s->dispatching = true;
2116 r = s->prepare(s, s->userdata);
2117 s->dispatching = false;
2120 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2125 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2131 static int dispatch_exit(sd_event *e) {
2137 p = prioq_peek(e->exit);
2138 if (!p || p->enabled == SD_EVENT_OFF) {
2139 e->state = SD_EVENT_FINISHED;
2145 e->state = SD_EVENT_EXITING;
2147 r = source_dispatch(p);
2149 e->state = SD_EVENT_PASSIVE;
2155 static sd_event_source* event_next_pending(sd_event *e) {
2160 p = prioq_peek(e->pending);
2164 if (p->enabled == SD_EVENT_OFF)
2170 static int arm_watchdog(sd_event *e) {
2171 struct itimerspec its = {};
2176 assert(e->watchdog_fd >= 0);
2178 t = sleep_between(e,
2179 e->watchdog_last + (e->watchdog_period / 2),
2180 e->watchdog_last + (e->watchdog_period * 3 / 4));
2182 timespec_store(&its.it_value, t);
2184 /* Make sure we never set the watchdog to 0, which tells the
2185 * kernel to disable it. */
2186 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
2187 its.it_value.tv_nsec = 1;
2189 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2196 static int process_watchdog(sd_event *e) {
2202 /* Don't notify watchdog too often */
2203 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2206 sd_notify(false, "WATCHDOG=1");
2207 e->watchdog_last = e->timestamp.monotonic;
2209 return arm_watchdog(e);
2212 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2213 struct epoll_event *ev_queue;
2214 unsigned ev_queue_max;
2219 assert_return(e, -EINVAL);
2220 assert_return(!event_pid_changed(e), -ECHILD);
2221 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2222 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2224 if (e->exit_requested)
2225 return dispatch_exit(e);
2229 e->state = SD_EVENT_RUNNING;
2231 r = event_prepare(e);
2235 r = event_arm_timer(e, &e->realtime);
2239 r = event_arm_timer(e, &e->boottime);
2243 r = event_arm_timer(e, &e->monotonic);
2247 r = event_arm_timer(e, &e->realtime_alarm);
2251 r = event_arm_timer(e, &e->boottime_alarm);
2255 if (event_next_pending(e) || e->need_process_child)
2258 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2259 ev_queue = newa(struct epoll_event, ev_queue_max);
2261 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2262 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2264 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2270 dual_timestamp_get(&e->timestamp);
2271 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2273 for (i = 0; i < m; i++) {
2275 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2276 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2277 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME))
2278 r = flush_timer(e, e->boottime.fd, ev_queue[i].events, &e->boottime.next);
2279 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2280 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2281 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2282 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2283 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2284 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2285 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2286 r = process_signal(e, ev_queue[i].events);
2287 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2288 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2290 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2296 r = process_watchdog(e);
2300 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2304 r = process_timer(e, e->timestamp_boottime, &e->boottime);
2308 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2312 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2316 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2320 if (e->need_process_child) {
2321 r = process_child(e);
2326 p = event_next_pending(e);
2332 r = source_dispatch(p);
2335 e->state = SD_EVENT_PASSIVE;
2341 _public_ int sd_event_loop(sd_event *e) {
2344 assert_return(e, -EINVAL);
2345 assert_return(!event_pid_changed(e), -ECHILD);
2346 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2350 while (e->state != SD_EVENT_FINISHED) {
2351 r = sd_event_run(e, (uint64_t) -1);
2363 _public_ int sd_event_get_state(sd_event *e) {
2364 assert_return(e, -EINVAL);
2365 assert_return(!event_pid_changed(e), -ECHILD);
2370 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2371 assert_return(e, -EINVAL);
2372 assert_return(code, -EINVAL);
2373 assert_return(!event_pid_changed(e), -ECHILD);
2375 if (!e->exit_requested)
2378 *code = e->exit_code;
2382 _public_ int sd_event_exit(sd_event *e, int code) {
2383 assert_return(e, -EINVAL);
2384 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2385 assert_return(!event_pid_changed(e), -ECHILD);
2387 e->exit_requested = true;
2388 e->exit_code = code;
2393 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2394 assert_return(e, -EINVAL);
2395 assert_return(usec, -EINVAL);
2396 assert_return(!event_pid_changed(e), -ECHILD);
2398 /* If we haven't run yet, just get the actual time */
2399 if (!dual_timestamp_is_set(&e->timestamp))
2404 case CLOCK_REALTIME:
2405 case CLOCK_REALTIME_ALARM:
2406 *usec = e->timestamp.realtime;
2409 case CLOCK_MONOTONIC:
2410 *usec = e->timestamp.monotonic;
2413 case CLOCK_BOOTTIME:
2414 case CLOCK_BOOTTIME_ALARM:
2415 *usec = e->timestamp_boottime;
2422 _public_ int sd_event_default(sd_event **ret) {
2424 static thread_local sd_event *default_event = NULL;
2429 return !!default_event;
2431 if (default_event) {
2432 *ret = sd_event_ref(default_event);
2436 r = sd_event_new(&e);
2440 e->default_event_ptr = &default_event;
2448 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2449 assert_return(e, -EINVAL);
2450 assert_return(tid, -EINVAL);
2451 assert_return(!event_pid_changed(e), -ECHILD);
2461 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2464 assert_return(e, -EINVAL);
2465 assert_return(!event_pid_changed(e), -ECHILD);
2467 if (e->watchdog == !!b)
2471 struct epoll_event ev = {};
2473 r = sd_watchdog_enabled(false, &e->watchdog_period);
2477 /* Issue first ping immediately */
2478 sd_notify(false, "WATCHDOG=1");
2479 e->watchdog_last = now(CLOCK_MONOTONIC);
2481 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2482 if (e->watchdog_fd < 0)
2485 r = arm_watchdog(e);
2489 ev.events = EPOLLIN;
2490 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2492 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2499 if (e->watchdog_fd >= 0) {
2500 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2501 e->watchdog_fd = safe_close(e->watchdog_fd);
2509 e->watchdog_fd = safe_close(e->watchdog_fd);
2513 _public_ int sd_event_get_watchdog(sd_event *e) {
2514 assert_return(e, -EINVAL);
2515 assert_return(!event_pid_changed(e), -ECHILD);