1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
39 #define EPOLL_QUEUE_MAX 512U
40 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
42 typedef enum EventSourceType {
45 SOURCE_TIME_MONOTONIC,
46 SOURCE_TIME_REALTIME_ALARM,
47 SOURCE_TIME_BOOTTIME_ALARM,
54 _SOUFCE_EVENT_SOURCE_TYPE_MAX,
55 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
58 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
60 struct sd_event_source {
65 sd_event_handler_t prepare;
67 EventSourceType type:5;
73 unsigned pending_index;
74 unsigned prepare_index;
75 unsigned pending_iteration;
76 unsigned prepare_iteration;
80 sd_event_io_handler_t callback;
87 sd_event_time_handler_t callback;
88 usec_t next, accuracy;
89 unsigned earliest_index;
90 unsigned latest_index;
93 sd_event_signal_handler_t callback;
94 struct signalfd_siginfo siginfo;
98 sd_event_child_handler_t callback;
104 sd_event_handler_t callback;
107 sd_event_handler_t callback;
110 sd_event_handler_t callback;
111 unsigned prioq_index;
119 /* For all clocks we maintain two priority queues each, one
120 * ordered for the earliest times the events may be
121 * dispatched, and one ordered by the latest times they must
122 * have been dispatched. The range between the top entries in
123 * the two prioqs is the time window we can freely schedule
141 /* timerfd_create() only supports these four clocks so far. We
142 * can add support for more clocks when the kernel learns to
143 * deal with them, too. */
144 struct clock_data realtime;
145 struct clock_data monotonic;
146 struct clock_data realtime_alarm;
147 struct clock_data boottime_alarm;
152 sd_event_source **signal_sources;
154 Hashmap *child_sources;
155 unsigned n_enabled_child_sources;
164 dual_timestamp timestamp;
165 usec_t timestamp_boottime;
168 bool exit_requested:1;
169 bool need_process_child:1;
175 sd_event **default_event_ptr;
177 usec_t watchdog_last, watchdog_period;
182 static int pending_prioq_compare(const void *a, const void *b) {
183 const sd_event_source *x = a, *y = b;
188 /* Enabled ones first */
189 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
191 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
194 /* Lower priority values first */
195 if (x->priority < y->priority)
197 if (x->priority > y->priority)
200 /* Older entries first */
201 if (x->pending_iteration < y->pending_iteration)
203 if (x->pending_iteration > y->pending_iteration)
206 /* Stability for the rest */
215 static int prepare_prioq_compare(const void *a, const void *b) {
216 const sd_event_source *x = a, *y = b;
221 /* Move most recently prepared ones last, so that we can stop
222 * preparing as soon as we hit one that has already been
223 * prepared in the current iteration */
224 if (x->prepare_iteration < y->prepare_iteration)
226 if (x->prepare_iteration > y->prepare_iteration)
229 /* Enabled ones first */
230 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
232 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
235 /* Lower priority values first */
236 if (x->priority < y->priority)
238 if (x->priority > y->priority)
241 /* Stability for the rest */
250 static int earliest_time_prioq_compare(const void *a, const void *b) {
251 const sd_event_source *x = a, *y = b;
253 assert(EVENT_SOURCE_IS_TIME(x->type));
254 assert(x->type == y->type);
256 /* Enabled ones first */
257 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
259 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
262 /* Move the pending ones to the end */
263 if (!x->pending && y->pending)
265 if (x->pending && !y->pending)
269 if (x->time.next < y->time.next)
271 if (x->time.next > y->time.next)
274 /* Stability for the rest */
283 static int latest_time_prioq_compare(const void *a, const void *b) {
284 const sd_event_source *x = a, *y = b;
286 assert(EVENT_SOURCE_IS_TIME(x->type));
287 assert(x->type == y->type);
289 /* Enabled ones first */
290 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
292 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
295 /* Move the pending ones to the end */
296 if (!x->pending && y->pending)
298 if (x->pending && !y->pending)
302 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
304 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
307 /* Stability for the rest */
316 static int exit_prioq_compare(const void *a, const void *b) {
317 const sd_event_source *x = a, *y = b;
319 assert(x->type == SOURCE_EXIT);
320 assert(y->type == SOURCE_EXIT);
322 /* Enabled ones first */
323 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
325 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
328 /* Lower priority values first */
329 if (x->priority < y->priority)
331 if (x->priority > y->priority)
334 /* Stability for the rest */
343 static void free_clock_data(struct clock_data *d) {
347 prioq_free(d->earliest);
348 prioq_free(d->latest);
351 static void event_free(sd_event *e) {
353 assert(e->n_sources == 0);
355 if (e->default_event_ptr)
356 *(e->default_event_ptr) = NULL;
358 safe_close(e->epoll_fd);
359 safe_close(e->signal_fd);
360 safe_close(e->watchdog_fd);
362 free_clock_data(&e->realtime);
363 free_clock_data(&e->monotonic);
364 free_clock_data(&e->realtime_alarm);
365 free_clock_data(&e->boottime_alarm);
367 prioq_free(e->pending);
368 prioq_free(e->prepare);
371 free(e->signal_sources);
373 hashmap_free(e->child_sources);
374 set_free(e->post_sources);
378 _public_ int sd_event_new(sd_event** ret) {
382 assert_return(ret, -EINVAL);
384 e = new0(sd_event, 1);
389 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
390 e->realtime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = (usec_t) -1;
391 e->original_pid = getpid();
392 e->perturb = (usec_t) -1;
394 assert_se(sigemptyset(&e->sigset) == 0);
396 e->pending = prioq_new(pending_prioq_compare);
402 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
403 if (e->epoll_fd < 0) {
416 _public_ sd_event* sd_event_ref(sd_event *e) {
417 assert_return(e, NULL);
419 assert(e->n_ref >= 1);
425 _public_ sd_event* sd_event_unref(sd_event *e) {
430 assert(e->n_ref >= 1);
439 static bool event_pid_changed(sd_event *e) {
442 /* We don't support people creating am event loop and keeping
443 * it around over a fork(). Let's complain. */
445 return e->original_pid != getpid();
448 static int source_io_unregister(sd_event_source *s) {
452 assert(s->type == SOURCE_IO);
454 if (!s->io.registered)
457 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
461 s->io.registered = false;
465 static int source_io_register(
470 struct epoll_event ev = {};
474 assert(s->type == SOURCE_IO);
475 assert(enabled != SD_EVENT_OFF);
480 if (enabled == SD_EVENT_ONESHOT)
481 ev.events |= EPOLLONESHOT;
483 if (s->io.registered)
484 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
486 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
491 s->io.registered = true;
496 static clockid_t event_source_type_to_clock(EventSourceType t) {
500 case SOURCE_TIME_REALTIME:
501 return CLOCK_REALTIME;
503 case SOURCE_TIME_MONOTONIC:
504 return CLOCK_MONOTONIC;
506 case SOURCE_TIME_REALTIME_ALARM:
507 return CLOCK_REALTIME_ALARM;
509 case SOURCE_TIME_BOOTTIME_ALARM:
510 return CLOCK_BOOTTIME_ALARM;
513 return (clockid_t) -1;
517 static EventSourceType clock_to_event_source_type(clockid_t clock) {
522 return SOURCE_TIME_REALTIME;
524 case CLOCK_MONOTONIC:
525 return SOURCE_TIME_MONOTONIC;
527 case CLOCK_REALTIME_ALARM:
528 return SOURCE_TIME_REALTIME_ALARM;
530 case CLOCK_BOOTTIME_ALARM:
531 return SOURCE_TIME_BOOTTIME_ALARM;
534 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
538 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
543 case SOURCE_TIME_REALTIME:
546 case SOURCE_TIME_MONOTONIC:
547 return &e->monotonic;
549 case SOURCE_TIME_REALTIME_ALARM:
550 return &e->realtime_alarm;
552 case SOURCE_TIME_BOOTTIME_ALARM:
553 return &e->boottime_alarm;
560 static void source_free(sd_event_source *s) {
564 assert(s->event->n_sources > 0);
570 source_io_unregister(s);
574 case SOURCE_TIME_REALTIME:
575 case SOURCE_TIME_MONOTONIC:
576 case SOURCE_TIME_REALTIME_ALARM:
577 case SOURCE_TIME_BOOTTIME_ALARM: {
578 struct clock_data *d;
580 d = event_get_clock_data(s->event, s->type);
583 prioq_remove(d->earliest, s, &s->time.earliest_index);
584 prioq_remove(d->latest, s, &s->time.latest_index);
589 if (s->signal.sig > 0) {
590 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
591 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
593 if (s->event->signal_sources)
594 s->event->signal_sources[s->signal.sig] = NULL;
600 if (s->child.pid > 0) {
601 if (s->enabled != SD_EVENT_OFF) {
602 assert(s->event->n_enabled_child_sources > 0);
603 s->event->n_enabled_child_sources--;
606 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
607 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
609 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
619 set_remove(s->event->post_sources, s);
623 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
627 assert_not_reached("Wut? I shouldn't exist.");
631 prioq_remove(s->event->pending, s, &s->pending_index);
634 prioq_remove(s->event->prepare, s, &s->prepare_index);
636 s->event->n_sources--;
637 sd_event_unref(s->event);
643 static int source_set_pending(sd_event_source *s, bool b) {
647 assert(s->type != SOURCE_EXIT);
655 s->pending_iteration = s->event->iteration;
657 r = prioq_put(s->event->pending, s, &s->pending_index);
663 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
665 if (EVENT_SOURCE_IS_TIME(s->type)) {
666 struct clock_data *d;
668 d = event_get_clock_data(s->event, s->type);
671 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
672 prioq_reshuffle(d->latest, s, &s->time.latest_index);
678 static sd_event_source *source_new(sd_event *e, EventSourceType type) {
683 s = new0(sd_event_source, 1);
688 s->event = sd_event_ref(e);
690 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
697 _public_ int sd_event_add_io(
699 sd_event_source **ret,
702 sd_event_io_handler_t callback,
708 assert_return(e, -EINVAL);
709 assert_return(fd >= 0, -EINVAL);
710 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
711 assert_return(callback, -EINVAL);
712 assert_return(ret, -EINVAL);
713 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
714 assert_return(!event_pid_changed(e), -ECHILD);
716 s = source_new(e, SOURCE_IO);
721 s->io.events = events;
722 s->io.callback = callback;
723 s->userdata = userdata;
724 s->enabled = SD_EVENT_ON;
726 r = source_io_register(s, s->enabled, events);
736 static void initialize_perturb(sd_event *e) {
737 sd_id128_t bootid = {};
739 /* When we sleep for longer, we try to realign the wakeup to
740 the same time wihtin each minute/second/250ms, so that
741 events all across the system can be coalesced into a single
742 CPU wakeup. However, let's take some system-specific
743 randomness for this value, so that in a network of systems
744 with synced clocks timer events are distributed a
745 bit. Here, we calculate a perturbation usec offset from the
748 if (_likely_(e->perturb != (usec_t) -1))
751 if (sd_id128_get_boot(&bootid) >= 0)
752 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
755 static int event_setup_timer_fd(
757 struct clock_data *d,
760 struct epoll_event ev = {};
766 if (_likely_(d->fd >= 0))
769 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
774 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
776 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
786 _public_ int sd_event_add_time(
788 sd_event_source **ret,
792 sd_event_time_handler_t callback,
795 EventSourceType type;
797 struct clock_data *d;
800 assert_return(e, -EINVAL);
801 assert_return(ret, -EINVAL);
802 assert_return(usec != (uint64_t) -1, -EINVAL);
803 assert_return(accuracy != (uint64_t) -1, -EINVAL);
804 assert_return(callback, -EINVAL);
805 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
806 assert_return(!event_pid_changed(e), -ECHILD);
808 type = clock_to_event_source_type(clock);
809 assert_return(type >= 0, -ENOTSUP);
811 d = event_get_clock_data(e, type);
815 d->earliest = prioq_new(earliest_time_prioq_compare);
821 d->latest = prioq_new(latest_time_prioq_compare);
827 r = event_setup_timer_fd(e, d, clock);
832 s = source_new(e, type);
837 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
838 s->time.callback = callback;
839 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
840 s->userdata = userdata;
841 s->enabled = SD_EVENT_ONESHOT;
843 r = prioq_put(d->earliest, s, &s->time.earliest_index);
847 r = prioq_put(d->latest, s, &s->time.latest_index);
859 static int event_update_signal_fd(sd_event *e) {
860 struct epoll_event ev = {};
866 add_to_epoll = e->signal_fd < 0;
868 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
878 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
880 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
882 e->signal_fd = safe_close(e->signal_fd);
889 static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
892 return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
895 _public_ int sd_event_add_signal(
897 sd_event_source **ret,
899 sd_event_signal_handler_t callback,
906 assert_return(e, -EINVAL);
907 assert_return(sig > 0, -EINVAL);
908 assert_return(sig < _NSIG, -EINVAL);
909 assert_return(ret, -EINVAL);
910 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
911 assert_return(!event_pid_changed(e), -ECHILD);
914 callback = signal_exit_callback;
916 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
920 if (!sigismember(&ss, sig))
923 if (!e->signal_sources) {
924 e->signal_sources = new0(sd_event_source*, _NSIG);
925 if (!e->signal_sources)
927 } else if (e->signal_sources[sig])
930 s = source_new(e, SOURCE_SIGNAL);
935 s->signal.callback = callback;
936 s->userdata = userdata;
937 s->enabled = SD_EVENT_ON;
939 e->signal_sources[sig] = s;
940 assert_se(sigaddset(&e->sigset, sig) == 0);
942 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
943 r = event_update_signal_fd(e);
954 _public_ int sd_event_add_child(
956 sd_event_source **ret,
959 sd_event_child_handler_t callback,
965 assert_return(e, -EINVAL);
966 assert_return(pid > 1, -EINVAL);
967 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
968 assert_return(options != 0, -EINVAL);
969 assert_return(callback, -EINVAL);
970 assert_return(ret, -EINVAL);
971 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
972 assert_return(!event_pid_changed(e), -ECHILD);
974 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
978 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
981 s = source_new(e, SOURCE_CHILD);
986 s->child.options = options;
987 s->child.callback = callback;
988 s->userdata = userdata;
989 s->enabled = SD_EVENT_ONESHOT;
991 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
997 e->n_enabled_child_sources ++;
999 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
1001 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
1002 r = event_update_signal_fd(e);
1009 e->need_process_child = true;
1015 _public_ int sd_event_add_defer(
1017 sd_event_source **ret,
1018 sd_event_handler_t callback,
1024 assert_return(e, -EINVAL);
1025 assert_return(callback, -EINVAL);
1026 assert_return(ret, -EINVAL);
1027 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1028 assert_return(!event_pid_changed(e), -ECHILD);
1030 s = source_new(e, SOURCE_DEFER);
1034 s->defer.callback = callback;
1035 s->userdata = userdata;
1036 s->enabled = SD_EVENT_ONESHOT;
1038 r = source_set_pending(s, true);
1048 _public_ int sd_event_add_post(
1050 sd_event_source **ret,
1051 sd_event_handler_t callback,
1057 assert_return(e, -EINVAL);
1058 assert_return(callback, -EINVAL);
1059 assert_return(ret, -EINVAL);
1060 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1061 assert_return(!event_pid_changed(e), -ECHILD);
1063 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
1067 s = source_new(e, SOURCE_POST);
1071 s->post.callback = callback;
1072 s->userdata = userdata;
1073 s->enabled = SD_EVENT_ON;
1075 r = set_put(e->post_sources, s);
1085 _public_ int sd_event_add_exit(
1087 sd_event_source **ret,
1088 sd_event_handler_t callback,
1094 assert_return(e, -EINVAL);
1095 assert_return(callback, -EINVAL);
1096 assert_return(ret, -EINVAL);
1097 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1098 assert_return(!event_pid_changed(e), -ECHILD);
1101 e->exit = prioq_new(exit_prioq_compare);
1106 s = source_new(e, SOURCE_EXIT);
1110 s->exit.callback = callback;
1111 s->userdata = userdata;
1112 s->exit.prioq_index = PRIOQ_IDX_NULL;
1113 s->enabled = SD_EVENT_ONESHOT;
1115 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1125 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1126 assert_return(s, NULL);
1128 assert(s->n_ref >= 1);
1134 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1139 assert(s->n_ref >= 1);
1142 if (s->n_ref <= 0) {
1143 /* Here's a special hack: when we are called from a
1144 * dispatch handler we won't free the event source
1145 * immediately, but we will detach the fd from the
1146 * epoll. This way it is safe for the caller to unref
1147 * the event source and immediately close the fd, but
1148 * we still retain a valid event source object after
1151 if (s->dispatching) {
1152 if (s->type == SOURCE_IO)
1153 source_io_unregister(s);
1161 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1162 assert_return(s, NULL);
1167 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1168 assert_return(s, -EINVAL);
1169 assert_return(s->type != SOURCE_EXIT, -EDOM);
1170 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1171 assert_return(!event_pid_changed(s->event), -ECHILD);
1176 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1177 assert_return(s, -EINVAL);
1178 assert_return(s->type == SOURCE_IO, -EDOM);
1179 assert_return(!event_pid_changed(s->event), -ECHILD);
1184 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1187 assert_return(s, -EINVAL);
1188 assert_return(fd >= 0, -EINVAL);
1189 assert_return(s->type == SOURCE_IO, -EDOM);
1190 assert_return(!event_pid_changed(s->event), -ECHILD);
1195 if (s->enabled == SD_EVENT_OFF) {
1197 s->io.registered = false;
1201 saved_fd = s->io.fd;
1202 assert(s->io.registered);
1205 s->io.registered = false;
1207 r = source_io_register(s, s->enabled, s->io.events);
1209 s->io.fd = saved_fd;
1210 s->io.registered = true;
1214 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1220 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1221 assert_return(s, -EINVAL);
1222 assert_return(events, -EINVAL);
1223 assert_return(s->type == SOURCE_IO, -EDOM);
1224 assert_return(!event_pid_changed(s->event), -ECHILD);
1226 *events = s->io.events;
1230 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1233 assert_return(s, -EINVAL);
1234 assert_return(s->type == SOURCE_IO, -EDOM);
1235 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1236 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1237 assert_return(!event_pid_changed(s->event), -ECHILD);
1239 if (s->io.events == events)
1242 if (s->enabled != SD_EVENT_OFF) {
1243 r = source_io_register(s, s->enabled, events);
1248 s->io.events = events;
1249 source_set_pending(s, false);
1254 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1255 assert_return(s, -EINVAL);
1256 assert_return(revents, -EINVAL);
1257 assert_return(s->type == SOURCE_IO, -EDOM);
1258 assert_return(s->pending, -ENODATA);
1259 assert_return(!event_pid_changed(s->event), -ECHILD);
1261 *revents = s->io.revents;
1265 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1266 assert_return(s, -EINVAL);
1267 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1268 assert_return(!event_pid_changed(s->event), -ECHILD);
1270 return s->signal.sig;
1273 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1274 assert_return(s, -EINVAL);
1275 assert_return(!event_pid_changed(s->event), -ECHILD);
1280 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1281 assert_return(s, -EINVAL);
1282 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1283 assert_return(!event_pid_changed(s->event), -ECHILD);
1285 if (s->priority == priority)
1288 s->priority = priority;
1291 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1294 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1296 if (s->type == SOURCE_EXIT)
1297 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1302 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1303 assert_return(s, -EINVAL);
1304 assert_return(m, -EINVAL);
1305 assert_return(!event_pid_changed(s->event), -ECHILD);
1311 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1314 assert_return(s, -EINVAL);
1315 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1316 assert_return(!event_pid_changed(s->event), -ECHILD);
1318 /* If we are dead anyway, we are fine with turning off
1319 * sources, but everything else needs to fail. */
1320 if (s->event->state == SD_EVENT_FINISHED)
1321 return m == SD_EVENT_OFF ? 0 : -ESTALE;
1323 if (s->enabled == m)
1326 if (m == SD_EVENT_OFF) {
1331 r = source_io_unregister(s);
1338 case SOURCE_TIME_REALTIME:
1339 case SOURCE_TIME_MONOTONIC:
1340 case SOURCE_TIME_REALTIME_ALARM:
1341 case SOURCE_TIME_BOOTTIME_ALARM: {
1342 struct clock_data *d;
1345 d = event_get_clock_data(s->event, s->type);
1348 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1349 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1355 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1356 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1357 event_update_signal_fd(s->event);
1365 assert(s->event->n_enabled_child_sources > 0);
1366 s->event->n_enabled_child_sources--;
1368 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1369 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1370 event_update_signal_fd(s->event);
1377 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1386 assert_not_reached("Wut? I shouldn't exist.");
1393 r = source_io_register(s, m, s->io.events);
1400 case SOURCE_TIME_REALTIME:
1401 case SOURCE_TIME_MONOTONIC:
1402 case SOURCE_TIME_REALTIME_ALARM:
1403 case SOURCE_TIME_BOOTTIME_ALARM: {
1404 struct clock_data *d;
1407 d = event_get_clock_data(s->event, s->type);
1410 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1411 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1418 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1419 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1420 event_update_signal_fd(s->event);
1425 if (s->enabled == SD_EVENT_OFF) {
1426 s->event->n_enabled_child_sources++;
1428 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1429 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1430 event_update_signal_fd(s->event);
1439 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1448 assert_not_reached("Wut? I shouldn't exist.");
1453 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1456 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1461 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1462 assert_return(s, -EINVAL);
1463 assert_return(usec, -EINVAL);
1464 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1465 assert_return(!event_pid_changed(s->event), -ECHILD);
1467 *usec = s->time.next;
1471 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1472 struct clock_data *d;
1474 assert_return(s, -EINVAL);
1475 assert_return(usec != (uint64_t) -1, -EINVAL);
1476 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1477 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1478 assert_return(!event_pid_changed(s->event), -ECHILD);
1480 s->time.next = usec;
1482 source_set_pending(s, false);
1484 d = event_get_clock_data(s->event, s->type);
1487 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1488 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1493 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1494 assert_return(s, -EINVAL);
1495 assert_return(usec, -EINVAL);
1496 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1497 assert_return(!event_pid_changed(s->event), -ECHILD);
1499 *usec = s->time.accuracy;
1503 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1504 struct clock_data *d;
1506 assert_return(s, -EINVAL);
1507 assert_return(usec != (uint64_t) -1, -EINVAL);
1508 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1509 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1510 assert_return(!event_pid_changed(s->event), -ECHILD);
1513 usec = DEFAULT_ACCURACY_USEC;
1515 s->time.accuracy = usec;
1517 source_set_pending(s, false);
1519 d = event_get_clock_data(s->event, s->type);
1522 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1527 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1528 assert_return(s, -EINVAL);
1529 assert_return(clock, -EINVAL);
1530 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1531 assert_return(!event_pid_changed(s->event), -ECHILD);
1533 *clock = event_source_type_to_clock(s->type);
1537 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1538 assert_return(s, -EINVAL);
1539 assert_return(pid, -EINVAL);
1540 assert_return(s->type == SOURCE_CHILD, -EDOM);
1541 assert_return(!event_pid_changed(s->event), -ECHILD);
1543 *pid = s->child.pid;
1547 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1550 assert_return(s, -EINVAL);
1551 assert_return(s->type != SOURCE_EXIT, -EDOM);
1552 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1553 assert_return(!event_pid_changed(s->event), -ECHILD);
1555 if (s->prepare == callback)
1558 if (callback && s->prepare) {
1559 s->prepare = callback;
1563 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1567 s->prepare = callback;
1570 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1574 prioq_remove(s->event->prepare, s, &s->prepare_index);
1579 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1580 assert_return(s, NULL);
1585 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1588 assert_return(s, NULL);
1591 s->userdata = userdata;
1596 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1607 initialize_perturb(e);
1610 Find a good time to wake up again between times a and b. We
1611 have two goals here:
1613 a) We want to wake up as seldom as possible, hence prefer
1614 later times over earlier times.
1616 b) But if we have to wake up, then let's make sure to
1617 dispatch as much as possible on the entire system.
1619 We implement this by waking up everywhere at the same time
1620 within any given minute if we can, synchronised via the
1621 perturbation value determined from the boot ID. If we can't,
1622 then we try to find the same spot in every 10s, then 1s and
1623 then 250ms step. Otherwise, we pick the last possible time
1627 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1629 if (_unlikely_(c < USEC_PER_MINUTE))
1632 c -= USEC_PER_MINUTE;
1638 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1640 if (_unlikely_(c < USEC_PER_SEC*10))
1643 c -= USEC_PER_SEC*10;
1649 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1651 if (_unlikely_(c < USEC_PER_SEC))
1660 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1662 if (_unlikely_(c < USEC_PER_MSEC*250))
1665 c -= USEC_PER_MSEC*250;
1674 static int event_arm_timer(
1676 struct clock_data *d) {
1678 struct itimerspec its = {};
1679 sd_event_source *a, *b;
1686 a = prioq_peek(d->earliest);
1687 if (!a || a->enabled == SD_EVENT_OFF) {
1692 if (d->next == (usec_t) -1)
1696 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1700 d->next = (usec_t) -1;
1704 b = prioq_peek(d->latest);
1705 assert_se(b && b->enabled != SD_EVENT_OFF);
1707 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1711 assert_se(d->fd >= 0);
1714 /* We don' want to disarm here, just mean some time looooong ago. */
1715 its.it_value.tv_sec = 0;
1716 its.it_value.tv_nsec = 1;
1718 timespec_store(&its.it_value, t);
1720 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1728 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1731 assert(s->type == SOURCE_IO);
1733 /* If the event source was already pending, we just OR in the
1734 * new revents, otherwise we reset the value. The ORing is
1735 * necessary to handle EPOLLONESHOT events properly where
1736 * readability might happen independently of writability, and
1737 * we need to keep track of both */
1740 s->io.revents |= revents;
1742 s->io.revents = revents;
1744 return source_set_pending(s, true);
1747 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1754 assert_return(events == EPOLLIN, -EIO);
1756 ss = read(fd, &x, sizeof(x));
1758 if (errno == EAGAIN || errno == EINTR)
1764 if (_unlikely_(ss != sizeof(x)))
1768 *next = (usec_t) -1;
1773 static int process_timer(
1776 struct clock_data *d) {
1785 s = prioq_peek(d->earliest);
1788 s->enabled == SD_EVENT_OFF ||
1792 r = source_set_pending(s, true);
1796 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1797 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1803 static int process_child(sd_event *e) {
1810 e->need_process_child = false;
1813 So, this is ugly. We iteratively invoke waitid() with P_PID
1814 + WNOHANG for each PID we wait for, instead of using
1815 P_ALL. This is because we only want to get child
1816 information of very specific child processes, and not all
1817 of them. We might not have processed the SIGCHLD even of a
1818 previous invocation and we don't want to maintain a
1819 unbounded *per-child* event queue, hence we really don't
1820 want anything flushed out of the kernel's queue that we
1821 don't care about. Since this is O(n) this means that if you
1822 have a lot of processes you probably want to handle SIGCHLD
1825 We do not reap the children here (by using WNOWAIT), this
1826 is only done after the event source is dispatched so that
1827 the callback still sees the process as a zombie.
1830 HASHMAP_FOREACH(s, e->child_sources, i) {
1831 assert(s->type == SOURCE_CHILD);
1836 if (s->enabled == SD_EVENT_OFF)
1839 zero(s->child.siginfo);
1840 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1841 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1845 if (s->child.siginfo.si_pid != 0) {
1847 s->child.siginfo.si_code == CLD_EXITED ||
1848 s->child.siginfo.si_code == CLD_KILLED ||
1849 s->child.siginfo.si_code == CLD_DUMPED;
1851 if (!zombie && (s->child.options & WEXITED)) {
1852 /* If the child isn't dead then let's
1853 * immediately remove the state change
1854 * from the queue, since there's no
1855 * benefit in leaving it queued */
1857 assert(s->child.options & (WSTOPPED|WCONTINUED));
1858 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1861 r = source_set_pending(s, true);
1870 static int process_signal(sd_event *e, uint32_t events) {
1871 bool read_one = false;
1875 assert(e->signal_sources);
1877 assert_return(events == EPOLLIN, -EIO);
1880 struct signalfd_siginfo si;
1884 ss = read(e->signal_fd, &si, sizeof(si));
1886 if (errno == EAGAIN || errno == EINTR)
1892 if (_unlikely_(ss != sizeof(si)))
1897 s = e->signal_sources[si.ssi_signo];
1898 if (si.ssi_signo == SIGCHLD) {
1899 r = process_child(e);
1908 s->signal.siginfo = si;
1909 r = source_set_pending(s, true);
1915 static int source_dispatch(sd_event_source *s) {
1919 assert(s->pending || s->type == SOURCE_EXIT);
1921 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1922 r = source_set_pending(s, false);
1927 if (s->type != SOURCE_POST) {
1931 /* If we execute a non-post source, let's mark all
1932 * post sources as pending */
1934 SET_FOREACH(z, s->event->post_sources, i) {
1935 if (z->enabled == SD_EVENT_OFF)
1938 r = source_set_pending(z, true);
1944 if (s->enabled == SD_EVENT_ONESHOT) {
1945 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
1950 s->dispatching = true;
1955 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
1958 case SOURCE_TIME_REALTIME:
1959 case SOURCE_TIME_MONOTONIC:
1960 case SOURCE_TIME_REALTIME_ALARM:
1961 case SOURCE_TIME_BOOTTIME_ALARM:
1962 r = s->time.callback(s, s->time.next, s->userdata);
1966 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
1969 case SOURCE_CHILD: {
1972 zombie = s->child.siginfo.si_code == CLD_EXITED ||
1973 s->child.siginfo.si_code == CLD_KILLED ||
1974 s->child.siginfo.si_code == CLD_DUMPED;
1976 r = s->child.callback(s, &s->child.siginfo, s->userdata);
1978 /* Now, reap the PID for good. */
1980 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
1986 r = s->defer.callback(s, s->userdata);
1990 r = s->post.callback(s, s->userdata);
1994 r = s->exit.callback(s, s->userdata);
1997 case SOURCE_WATCHDOG:
1998 case _SOUFCE_EVENT_SOURCE_TYPE_MAX:
1999 case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
2000 assert_not_reached("Wut? I shouldn't exist.");
2003 s->dispatching = false;
2006 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
2011 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2016 static int event_prepare(sd_event *e) {
2024 s = prioq_peek(e->prepare);
2025 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2028 s->prepare_iteration = e->iteration;
2029 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2035 s->dispatching = true;
2036 r = s->prepare(s, s->userdata);
2037 s->dispatching = false;
2040 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2045 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2051 static int dispatch_exit(sd_event *e) {
2057 p = prioq_peek(e->exit);
2058 if (!p || p->enabled == SD_EVENT_OFF) {
2059 e->state = SD_EVENT_FINISHED;
2065 e->state = SD_EVENT_EXITING;
2067 r = source_dispatch(p);
2069 e->state = SD_EVENT_PASSIVE;
2075 static sd_event_source* event_next_pending(sd_event *e) {
2080 p = prioq_peek(e->pending);
2084 if (p->enabled == SD_EVENT_OFF)
2090 static int arm_watchdog(sd_event *e) {
2091 struct itimerspec its = {};
2096 assert(e->watchdog_fd >= 0);
2098 t = sleep_between(e,
2099 e->watchdog_last + (e->watchdog_period / 2),
2100 e->watchdog_last + (e->watchdog_period * 3 / 4));
2102 timespec_store(&its.it_value, t);
2104 /* Make sure we never set the watchdog to 0, which tells the
2105 * kernel to disable it. */
2106 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
2107 its.it_value.tv_nsec = 1;
2109 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2116 static int process_watchdog(sd_event *e) {
2122 /* Don't notify watchdog too often */
2123 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2126 sd_notify(false, "WATCHDOG=1");
2127 e->watchdog_last = e->timestamp.monotonic;
2129 return arm_watchdog(e);
2132 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2133 struct epoll_event *ev_queue;
2134 unsigned ev_queue_max;
2138 assert_return(e, -EINVAL);
2139 assert_return(!event_pid_changed(e), -ECHILD);
2140 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2141 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2143 if (e->exit_requested)
2144 return dispatch_exit(e);
2148 e->state = SD_EVENT_RUNNING;
2150 r = event_prepare(e);
2154 r = event_arm_timer(e, &e->realtime);
2158 r = event_arm_timer(e, &e->monotonic);
2162 r = event_arm_timer(e, &e->realtime_alarm);
2166 r = event_arm_timer(e, &e->boottime_alarm);
2170 if (event_next_pending(e) || e->need_process_child)
2173 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2174 ev_queue = newa(struct epoll_event, ev_queue_max);
2176 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2177 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2179 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2183 dual_timestamp_get(&e->timestamp);
2184 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2186 for (i = 0; i < m; i++) {
2188 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2189 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2190 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2191 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2192 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2193 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2194 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2195 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2196 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2197 r = process_signal(e, ev_queue[i].events);
2198 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2199 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2201 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2207 r = process_watchdog(e);
2211 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2215 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2219 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2223 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2227 if (e->need_process_child) {
2228 r = process_child(e);
2233 p = event_next_pending(e);
2239 r = source_dispatch(p);
2242 e->state = SD_EVENT_PASSIVE;
2248 _public_ int sd_event_loop(sd_event *e) {
2251 assert_return(e, -EINVAL);
2252 assert_return(!event_pid_changed(e), -ECHILD);
2253 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2257 while (e->state != SD_EVENT_FINISHED) {
2258 r = sd_event_run(e, (uint64_t) -1);
2270 _public_ int sd_event_get_state(sd_event *e) {
2271 assert_return(e, -EINVAL);
2272 assert_return(!event_pid_changed(e), -ECHILD);
2277 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2278 assert_return(e, -EINVAL);
2279 assert_return(code, -EINVAL);
2280 assert_return(!event_pid_changed(e), -ECHILD);
2282 if (!e->exit_requested)
2285 *code = e->exit_code;
2289 _public_ int sd_event_exit(sd_event *e, int code) {
2290 assert_return(e, -EINVAL);
2291 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2292 assert_return(!event_pid_changed(e), -ECHILD);
2294 e->exit_requested = true;
2295 e->exit_code = code;
2300 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2301 assert_return(e, -EINVAL);
2302 assert_return(usec, -EINVAL);
2303 assert_return(!event_pid_changed(e), -ECHILD);
2305 /* If we haven't run yet, just get the actual time */
2306 if (!dual_timestamp_is_set(&e->timestamp))
2311 case CLOCK_REALTIME:
2312 case CLOCK_REALTIME_ALARM:
2313 *usec = e->timestamp.realtime;
2316 case CLOCK_MONOTONIC:
2317 *usec = e->timestamp.monotonic;
2320 case CLOCK_BOOTTIME_ALARM:
2321 *usec = e->timestamp_boottime;
2328 _public_ int sd_event_default(sd_event **ret) {
2330 static thread_local sd_event *default_event = NULL;
2335 return !!default_event;
2337 if (default_event) {
2338 *ret = sd_event_ref(default_event);
2342 r = sd_event_new(&e);
2346 e->default_event_ptr = &default_event;
2354 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2355 assert_return(e, -EINVAL);
2356 assert_return(tid, -EINVAL);
2357 assert_return(!event_pid_changed(e), -ECHILD);
2367 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2370 assert_return(e, -EINVAL);
2371 assert_return(!event_pid_changed(e), -ECHILD);
2373 if (e->watchdog == !!b)
2377 struct epoll_event ev = {};
2379 r = sd_watchdog_enabled(false, &e->watchdog_period);
2383 /* Issue first ping immediately */
2384 sd_notify(false, "WATCHDOG=1");
2385 e->watchdog_last = now(CLOCK_MONOTONIC);
2387 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2388 if (e->watchdog_fd < 0)
2391 r = arm_watchdog(e);
2395 ev.events = EPOLLIN;
2396 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2398 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2405 if (e->watchdog_fd >= 0) {
2406 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2407 e->watchdog_fd = safe_close(e->watchdog_fd);
2415 e->watchdog_fd = safe_close(e->watchdog_fd);
2419 _public_ int sd_event_get_watchdog(sd_event *e) {
2420 assert_return(e, -EINVAL);
2421 assert_return(!event_pid_changed(e), -ECHILD);