1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
39 #define EPOLL_QUEUE_MAX 512U
40 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
42 typedef enum EventSourceType {
54 struct sd_event_source {
59 sd_event_handler_t prepare;
61 EventSourceType type:4;
67 unsigned pending_index;
68 unsigned prepare_index;
69 unsigned pending_iteration;
70 unsigned prepare_iteration;
74 sd_event_io_handler_t callback;
81 sd_event_time_handler_t callback;
82 usec_t next, accuracy;
83 unsigned earliest_index;
84 unsigned latest_index;
87 sd_event_signal_handler_t callback;
88 struct signalfd_siginfo siginfo;
92 sd_event_child_handler_t callback;
98 sd_event_handler_t callback;
101 sd_event_handler_t callback;
104 sd_event_handler_t callback;
105 unsigned prioq_index;
122 /* For both clocks we maintain two priority queues each, one
123 * ordered for the earliest times the events may be
124 * dispatched, and one ordered by the latest times they must
125 * have been dispatched. The range between the top entries in
126 * the two prioqs is the time window we can freely schedule
128 Prioq *monotonic_earliest;
129 Prioq *monotonic_latest;
130 Prioq *realtime_earliest;
131 Prioq *realtime_latest;
133 usec_t realtime_next, monotonic_next;
137 sd_event_source **signal_sources;
139 Hashmap *child_sources;
140 unsigned n_enabled_child_sources;
149 dual_timestamp timestamp;
152 bool exit_requested:1;
153 bool need_process_child:1;
159 sd_event **default_event_ptr;
161 usec_t watchdog_last, watchdog_period;
166 static int pending_prioq_compare(const void *a, const void *b) {
167 const sd_event_source *x = a, *y = b;
172 /* Enabled ones first */
173 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
175 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
178 /* Lower priority values first */
179 if (x->priority < y->priority)
181 if (x->priority > y->priority)
184 /* Older entries first */
185 if (x->pending_iteration < y->pending_iteration)
187 if (x->pending_iteration > y->pending_iteration)
190 /* Stability for the rest */
199 static int prepare_prioq_compare(const void *a, const void *b) {
200 const sd_event_source *x = a, *y = b;
205 /* Move most recently prepared ones last, so that we can stop
206 * preparing as soon as we hit one that has already been
207 * prepared in the current iteration */
208 if (x->prepare_iteration < y->prepare_iteration)
210 if (x->prepare_iteration > y->prepare_iteration)
213 /* Enabled ones first */
214 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
216 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
219 /* Lower priority values first */
220 if (x->priority < y->priority)
222 if (x->priority > y->priority)
225 /* Stability for the rest */
234 static int earliest_time_prioq_compare(const void *a, const void *b) {
235 const sd_event_source *x = a, *y = b;
237 assert(x->type == SOURCE_MONOTONIC || x->type == SOURCE_REALTIME);
238 assert(y->type == SOURCE_MONOTONIC || y->type == SOURCE_REALTIME);
240 /* Enabled ones first */
241 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
243 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
246 /* Move the pending ones to the end */
247 if (!x->pending && y->pending)
249 if (x->pending && !y->pending)
253 if (x->time.next < y->time.next)
255 if (x->time.next > y->time.next)
258 /* Stability for the rest */
267 static int latest_time_prioq_compare(const void *a, const void *b) {
268 const sd_event_source *x = a, *y = b;
270 assert((x->type == SOURCE_MONOTONIC && y->type == SOURCE_MONOTONIC) ||
271 (x->type == SOURCE_REALTIME && y->type == SOURCE_REALTIME));
273 /* Enabled ones first */
274 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
276 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
279 /* Move the pending ones to the end */
280 if (!x->pending && y->pending)
282 if (x->pending && !y->pending)
286 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
288 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
291 /* Stability for the rest */
300 static int exit_prioq_compare(const void *a, const void *b) {
301 const sd_event_source *x = a, *y = b;
303 assert(x->type == SOURCE_EXIT);
304 assert(y->type == SOURCE_EXIT);
306 /* Enabled ones first */
307 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
309 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
312 /* Lower priority values first */
313 if (x->priority < y->priority)
315 if (x->priority > y->priority)
318 /* Stability for the rest */
327 static void event_free(sd_event *e) {
329 assert(e->n_sources == 0);
331 if (e->default_event_ptr)
332 *(e->default_event_ptr) = NULL;
334 safe_close(e->epoll_fd);
335 safe_close(e->signal_fd);
336 safe_close(e->realtime_fd);
337 safe_close(e->monotonic_fd);
338 safe_close(e->watchdog_fd);
340 prioq_free(e->pending);
341 prioq_free(e->prepare);
342 prioq_free(e->monotonic_earliest);
343 prioq_free(e->monotonic_latest);
344 prioq_free(e->realtime_earliest);
345 prioq_free(e->realtime_latest);
348 free(e->signal_sources);
350 hashmap_free(e->child_sources);
351 set_free(e->post_sources);
355 _public_ int sd_event_new(sd_event** ret) {
359 assert_return(ret, -EINVAL);
361 e = new0(sd_event, 1);
366 e->signal_fd = e->realtime_fd = e->monotonic_fd = e->watchdog_fd = e->epoll_fd = -1;
367 e->realtime_next = e->monotonic_next = (usec_t) -1;
368 e->original_pid = getpid();
370 assert_se(sigemptyset(&e->sigset) == 0);
372 e->pending = prioq_new(pending_prioq_compare);
378 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
379 if (e->epoll_fd < 0) {
392 _public_ sd_event* sd_event_ref(sd_event *e) {
393 assert_return(e, NULL);
395 assert(e->n_ref >= 1);
401 _public_ sd_event* sd_event_unref(sd_event *e) {
406 assert(e->n_ref >= 1);
415 static bool event_pid_changed(sd_event *e) {
418 /* We don't support people creating am event loop and keeping
419 * it around over a fork(). Let's complain. */
421 return e->original_pid != getpid();
424 static int source_io_unregister(sd_event_source *s) {
428 assert(s->type == SOURCE_IO);
430 if (!s->io.registered)
433 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
437 s->io.registered = false;
441 static int source_io_register(
446 struct epoll_event ev = {};
450 assert(s->type == SOURCE_IO);
451 assert(enabled != SD_EVENT_OFF);
456 if (enabled == SD_EVENT_ONESHOT)
457 ev.events |= EPOLLONESHOT;
459 if (s->io.registered)
460 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
462 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
467 s->io.registered = true;
472 static void source_free(sd_event_source *s) {
476 assert(s->event->n_sources > 0);
482 source_io_unregister(s);
486 case SOURCE_MONOTONIC:
487 prioq_remove(s->event->monotonic_earliest, s, &s->time.earliest_index);
488 prioq_remove(s->event->monotonic_latest, s, &s->time.latest_index);
491 case SOURCE_REALTIME:
492 prioq_remove(s->event->realtime_earliest, s, &s->time.earliest_index);
493 prioq_remove(s->event->realtime_latest, s, &s->time.latest_index);
497 if (s->signal.sig > 0) {
498 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
499 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
501 if (s->event->signal_sources)
502 s->event->signal_sources[s->signal.sig] = NULL;
508 if (s->child.pid > 0) {
509 if (s->enabled != SD_EVENT_OFF) {
510 assert(s->event->n_enabled_child_sources > 0);
511 s->event->n_enabled_child_sources--;
514 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
515 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
517 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
527 set_remove(s->event->post_sources, s);
531 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
534 case SOURCE_WATCHDOG:
535 assert_not_reached("Wut? I shouldn't exist.");
539 prioq_remove(s->event->pending, s, &s->pending_index);
542 prioq_remove(s->event->prepare, s, &s->prepare_index);
544 s->event->n_sources--;
545 sd_event_unref(s->event);
551 static int source_set_pending(sd_event_source *s, bool b) {
555 assert(s->type != SOURCE_EXIT);
563 s->pending_iteration = s->event->iteration;
565 r = prioq_put(s->event->pending, s, &s->pending_index);
571 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
573 if (s->type == SOURCE_REALTIME) {
574 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
575 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
576 } else if (s->type == SOURCE_MONOTONIC) {
577 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
578 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
584 static sd_event_source *source_new(sd_event *e, EventSourceType type) {
589 s = new0(sd_event_source, 1);
594 s->event = sd_event_ref(e);
596 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
603 _public_ int sd_event_add_io(
605 sd_event_source **ret,
608 sd_event_io_handler_t callback,
614 assert_return(e, -EINVAL);
615 assert_return(fd >= 0, -EINVAL);
616 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
617 assert_return(callback, -EINVAL);
618 assert_return(ret, -EINVAL);
619 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
620 assert_return(!event_pid_changed(e), -ECHILD);
622 s = source_new(e, SOURCE_IO);
627 s->io.events = events;
628 s->io.callback = callback;
629 s->userdata = userdata;
630 s->enabled = SD_EVENT_ON;
632 r = source_io_register(s, s->enabled, events);
642 static int event_setup_timer_fd(
644 EventSourceType type,
648 sd_id128_t bootid = {};
649 struct epoll_event ev = {};
655 if (_likely_(*timer_fd >= 0))
658 fd = timerfd_create(id, TFD_NONBLOCK|TFD_CLOEXEC);
663 ev.data.ptr = INT_TO_PTR(type);
665 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
671 /* When we sleep for longer, we try to realign the wakeup to
672 the same time wihtin each minute/second/250ms, so that
673 events all across the system can be coalesced into a single
674 CPU wakeup. However, let's take some system-specific
675 randomness for this value, so that in a network of systems
676 with synced clocks timer events are distributed a
677 bit. Here, we calculate a perturbation usec offset from the
680 if (sd_id128_get_boot(&bootid) >= 0)
681 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
687 static int event_add_time_internal(
689 sd_event_source **ret,
690 EventSourceType type,
697 sd_event_time_handler_t callback,
703 assert_return(e, -EINVAL);
704 assert_return(callback, -EINVAL);
705 assert_return(ret, -EINVAL);
706 assert_return(usec != (uint64_t) -1, -EINVAL);
707 assert_return(accuracy != (uint64_t) -1, -EINVAL);
708 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
709 assert_return(!event_pid_changed(e), -ECHILD);
716 *earliest = prioq_new(earliest_time_prioq_compare);
722 *latest = prioq_new(latest_time_prioq_compare);
728 r = event_setup_timer_fd(e, type, timer_fd, id);
733 s = source_new(e, type);
738 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
739 s->time.callback = callback;
740 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
741 s->userdata = userdata;
742 s->enabled = SD_EVENT_ONESHOT;
744 r = prioq_put(*earliest, s, &s->time.earliest_index);
748 r = prioq_put(*latest, s, &s->time.latest_index);
760 _public_ int sd_event_add_monotonic(sd_event *e,
761 sd_event_source **ret,
764 sd_event_time_handler_t callback,
767 return event_add_time_internal(e, ret, SOURCE_MONOTONIC, &e->monotonic_fd, CLOCK_MONOTONIC, &e->monotonic_earliest, &e->monotonic_latest, usec, accuracy, callback, userdata);
770 _public_ int sd_event_add_realtime(sd_event *e,
771 sd_event_source **ret,
774 sd_event_time_handler_t callback,
777 return event_add_time_internal(e, ret, SOURCE_REALTIME, &e->realtime_fd, CLOCK_REALTIME, &e->realtime_earliest, &e->realtime_latest, usec, accuracy, callback, userdata);
780 static int event_update_signal_fd(sd_event *e) {
781 struct epoll_event ev = {};
787 add_to_epoll = e->signal_fd < 0;
789 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
799 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
801 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
803 e->signal_fd = safe_close(e->signal_fd);
810 _public_ int sd_event_add_signal(
812 sd_event_source **ret,
814 sd_event_signal_handler_t callback,
821 assert_return(e, -EINVAL);
822 assert_return(sig > 0, -EINVAL);
823 assert_return(sig < _NSIG, -EINVAL);
824 assert_return(callback, -EINVAL);
825 assert_return(ret, -EINVAL);
826 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
827 assert_return(!event_pid_changed(e), -ECHILD);
829 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
833 if (!sigismember(&ss, sig))
836 if (!e->signal_sources) {
837 e->signal_sources = new0(sd_event_source*, _NSIG);
838 if (!e->signal_sources)
840 } else if (e->signal_sources[sig])
843 s = source_new(e, SOURCE_SIGNAL);
848 s->signal.callback = callback;
849 s->userdata = userdata;
850 s->enabled = SD_EVENT_ON;
852 e->signal_sources[sig] = s;
853 assert_se(sigaddset(&e->sigset, sig) == 0);
855 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
856 r = event_update_signal_fd(e);
867 _public_ int sd_event_add_child(
869 sd_event_source **ret,
872 sd_event_child_handler_t callback,
878 assert_return(e, -EINVAL);
879 assert_return(pid > 1, -EINVAL);
880 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
881 assert_return(options != 0, -EINVAL);
882 assert_return(callback, -EINVAL);
883 assert_return(ret, -EINVAL);
884 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
885 assert_return(!event_pid_changed(e), -ECHILD);
887 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
891 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
894 s = source_new(e, SOURCE_CHILD);
899 s->child.options = options;
900 s->child.callback = callback;
901 s->userdata = userdata;
902 s->enabled = SD_EVENT_ONESHOT;
904 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
910 e->n_enabled_child_sources ++;
912 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
914 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
915 r = event_update_signal_fd(e);
922 e->need_process_child = true;
928 _public_ int sd_event_add_defer(
930 sd_event_source **ret,
931 sd_event_handler_t callback,
937 assert_return(e, -EINVAL);
938 assert_return(callback, -EINVAL);
939 assert_return(ret, -EINVAL);
940 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
941 assert_return(!event_pid_changed(e), -ECHILD);
943 s = source_new(e, SOURCE_DEFER);
947 s->defer.callback = callback;
948 s->userdata = userdata;
949 s->enabled = SD_EVENT_ONESHOT;
951 r = source_set_pending(s, true);
961 _public_ int sd_event_add_post(
963 sd_event_source **ret,
964 sd_event_handler_t callback,
970 assert_return(e, -EINVAL);
971 assert_return(callback, -EINVAL);
972 assert_return(ret, -EINVAL);
973 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
974 assert_return(!event_pid_changed(e), -ECHILD);
976 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
980 s = source_new(e, SOURCE_POST);
984 s->post.callback = callback;
985 s->userdata = userdata;
986 s->enabled = SD_EVENT_ON;
988 r = set_put(e->post_sources, s);
998 _public_ int sd_event_add_exit(
1000 sd_event_source **ret,
1001 sd_event_handler_t callback,
1007 assert_return(e, -EINVAL);
1008 assert_return(callback, -EINVAL);
1009 assert_return(ret, -EINVAL);
1010 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1011 assert_return(!event_pid_changed(e), -ECHILD);
1014 e->exit = prioq_new(exit_prioq_compare);
1019 s = source_new(e, SOURCE_EXIT);
1023 s->exit.callback = callback;
1024 s->userdata = userdata;
1025 s->exit.prioq_index = PRIOQ_IDX_NULL;
1026 s->enabled = SD_EVENT_ONESHOT;
1028 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1038 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1039 assert_return(s, NULL);
1041 assert(s->n_ref >= 1);
1047 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1052 assert(s->n_ref >= 1);
1055 if (s->n_ref <= 0) {
1056 /* Here's a special hack: when we are called from a
1057 * dispatch handler we won't free the event source
1058 * immediately, but we will detach the fd from the
1059 * epoll. This way it is safe for the caller to unref
1060 * the event source and immediately close the fd, but
1061 * we still retain a valid event source object after
1064 if (s->dispatching) {
1065 if (s->type == SOURCE_IO)
1066 source_io_unregister(s);
1074 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1075 assert_return(s, NULL);
1080 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1081 assert_return(s, -EINVAL);
1082 assert_return(s->type != SOURCE_EXIT, -EDOM);
1083 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1084 assert_return(!event_pid_changed(s->event), -ECHILD);
1089 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1090 assert_return(s, -EINVAL);
1091 assert_return(s->type == SOURCE_IO, -EDOM);
1092 assert_return(!event_pid_changed(s->event), -ECHILD);
1097 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1100 assert_return(s, -EINVAL);
1101 assert_return(fd >= 0, -EINVAL);
1102 assert_return(s->type == SOURCE_IO, -EDOM);
1103 assert_return(!event_pid_changed(s->event), -ECHILD);
1108 if (s->enabled == SD_EVENT_OFF) {
1110 s->io.registered = false;
1114 saved_fd = s->io.fd;
1115 assert(s->io.registered);
1118 s->io.registered = false;
1120 r = source_io_register(s, s->enabled, s->io.events);
1122 s->io.fd = saved_fd;
1123 s->io.registered = true;
1127 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1133 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1134 assert_return(s, -EINVAL);
1135 assert_return(events, -EINVAL);
1136 assert_return(s->type == SOURCE_IO, -EDOM);
1137 assert_return(!event_pid_changed(s->event), -ECHILD);
1139 *events = s->io.events;
1143 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1146 assert_return(s, -EINVAL);
1147 assert_return(s->type == SOURCE_IO, -EDOM);
1148 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1149 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1150 assert_return(!event_pid_changed(s->event), -ECHILD);
1152 if (s->io.events == events)
1155 if (s->enabled != SD_EVENT_OFF) {
1156 r = source_io_register(s, s->enabled, events);
1161 s->io.events = events;
1162 source_set_pending(s, false);
1167 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1168 assert_return(s, -EINVAL);
1169 assert_return(revents, -EINVAL);
1170 assert_return(s->type == SOURCE_IO, -EDOM);
1171 assert_return(s->pending, -ENODATA);
1172 assert_return(!event_pid_changed(s->event), -ECHILD);
1174 *revents = s->io.revents;
1178 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1179 assert_return(s, -EINVAL);
1180 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1181 assert_return(!event_pid_changed(s->event), -ECHILD);
1183 return s->signal.sig;
1186 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1187 assert_return(s, -EINVAL);
1188 assert_return(!event_pid_changed(s->event), -ECHILD);
1193 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1194 assert_return(s, -EINVAL);
1195 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1196 assert_return(!event_pid_changed(s->event), -ECHILD);
1198 if (s->priority == priority)
1201 s->priority = priority;
1204 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1207 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1209 if (s->type == SOURCE_EXIT)
1210 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1215 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1216 assert_return(s, -EINVAL);
1217 assert_return(m, -EINVAL);
1218 assert_return(!event_pid_changed(s->event), -ECHILD);
1224 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1227 assert_return(s, -EINVAL);
1228 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1229 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1230 assert_return(!event_pid_changed(s->event), -ECHILD);
1232 if (s->enabled == m)
1235 if (m == SD_EVENT_OFF) {
1240 r = source_io_unregister(s);
1247 case SOURCE_MONOTONIC:
1249 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1250 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1253 case SOURCE_REALTIME:
1255 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1256 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1261 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1262 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1263 event_update_signal_fd(s->event);
1271 assert(s->event->n_enabled_child_sources > 0);
1272 s->event->n_enabled_child_sources--;
1274 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1275 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1276 event_update_signal_fd(s->event);
1283 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1291 case SOURCE_WATCHDOG:
1292 assert_not_reached("Wut? I shouldn't exist.");
1299 r = source_io_register(s, m, s->io.events);
1306 case SOURCE_MONOTONIC:
1308 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1309 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1312 case SOURCE_REALTIME:
1314 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1315 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1321 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1322 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1323 event_update_signal_fd(s->event);
1328 if (s->enabled == SD_EVENT_OFF) {
1329 s->event->n_enabled_child_sources++;
1331 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1332 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1333 event_update_signal_fd(s->event);
1342 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1350 case SOURCE_WATCHDOG:
1351 assert_not_reached("Wut? I shouldn't exist.");
1356 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1359 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1364 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1365 assert_return(s, -EINVAL);
1366 assert_return(usec, -EINVAL);
1367 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1368 assert_return(!event_pid_changed(s->event), -ECHILD);
1370 *usec = s->time.next;
1374 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1375 assert_return(s, -EINVAL);
1376 assert_return(usec != (uint64_t) -1, -EINVAL);
1377 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1378 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1379 assert_return(!event_pid_changed(s->event), -ECHILD);
1381 s->time.next = usec;
1383 source_set_pending(s, false);
1385 if (s->type == SOURCE_REALTIME) {
1386 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1387 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1389 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1390 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1396 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1397 assert_return(s, -EINVAL);
1398 assert_return(usec, -EINVAL);
1399 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1400 assert_return(!event_pid_changed(s->event), -ECHILD);
1402 *usec = s->time.accuracy;
1406 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1407 assert_return(s, -EINVAL);
1408 assert_return(usec != (uint64_t) -1, -EINVAL);
1409 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1410 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1411 assert_return(!event_pid_changed(s->event), -ECHILD);
1414 usec = DEFAULT_ACCURACY_USEC;
1416 s->time.accuracy = usec;
1418 source_set_pending(s, false);
1420 if (s->type == SOURCE_REALTIME)
1421 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1423 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1428 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1429 assert_return(s, -EINVAL);
1430 assert_return(pid, -EINVAL);
1431 assert_return(s->type == SOURCE_CHILD, -EDOM);
1432 assert_return(!event_pid_changed(s->event), -ECHILD);
1434 *pid = s->child.pid;
1438 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1441 assert_return(s, -EINVAL);
1442 assert_return(s->type != SOURCE_EXIT, -EDOM);
1443 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1444 assert_return(!event_pid_changed(s->event), -ECHILD);
1446 if (s->prepare == callback)
1449 if (callback && s->prepare) {
1450 s->prepare = callback;
1454 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1458 s->prepare = callback;
1461 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1465 prioq_remove(s->event->prepare, s, &s->prepare_index);
1470 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1471 assert_return(s, NULL);
1476 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1479 assert_return(s, NULL);
1482 s->userdata = userdata;
1487 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1499 Find a good time to wake up again between times a and b. We
1500 have two goals here:
1502 a) We want to wake up as seldom as possible, hence prefer
1503 later times over earlier times.
1505 b) But if we have to wake up, then let's make sure to
1506 dispatch as much as possible on the entire system.
1508 We implement this by waking up everywhere at the same time
1509 within any given minute if we can, synchronised via the
1510 perturbation value determined from the boot ID. If we can't,
1511 then we try to find the same spot in every 10s, then 1s and
1512 then 250ms step. Otherwise, we pick the last possible time
1516 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1518 if (_unlikely_(c < USEC_PER_MINUTE))
1521 c -= USEC_PER_MINUTE;
1527 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1529 if (_unlikely_(c < USEC_PER_SEC*10))
1532 c -= USEC_PER_SEC*10;
1538 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1540 if (_unlikely_(c < USEC_PER_SEC))
1549 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1551 if (_unlikely_(c < USEC_PER_MSEC*250))
1554 c -= USEC_PER_MSEC*250;
1563 static int event_arm_timer(
1570 struct itimerspec its = {};
1571 sd_event_source *a, *b;
1578 a = prioq_peek(earliest);
1579 if (!a || a->enabled == SD_EVENT_OFF) {
1584 if (*next == (usec_t) -1)
1588 r = timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &its, NULL);
1592 *next = (usec_t) -1;
1597 b = prioq_peek(latest);
1598 assert_se(b && b->enabled != SD_EVENT_OFF);
1600 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1604 assert_se(timer_fd >= 0);
1607 /* We don' want to disarm here, just mean some time looooong ago. */
1608 its.it_value.tv_sec = 0;
1609 its.it_value.tv_nsec = 1;
1611 timespec_store(&its.it_value, t);
1613 r = timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &its, NULL);
1621 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1624 assert(s->type == SOURCE_IO);
1626 /* If the event source was already pending, we just OR in the
1627 * new revents, otherwise we reset the value. The ORing is
1628 * necessary to handle EPOLLONESHOT events properly where
1629 * readability might happen independently of writability, and
1630 * we need to keep track of both */
1633 s->io.revents |= revents;
1635 s->io.revents = revents;
1637 return source_set_pending(s, true);
1640 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1647 assert_return(events == EPOLLIN, -EIO);
1649 ss = read(fd, &x, sizeof(x));
1651 if (errno == EAGAIN || errno == EINTR)
1657 if (_unlikely_(ss != sizeof(x)))
1661 *next = (usec_t) -1;
1666 static int process_timer(
1678 s = prioq_peek(earliest);
1681 s->enabled == SD_EVENT_OFF ||
1685 r = source_set_pending(s, true);
1689 prioq_reshuffle(earliest, s, &s->time.earliest_index);
1690 prioq_reshuffle(latest, s, &s->time.latest_index);
1696 static int process_child(sd_event *e) {
1703 e->need_process_child = false;
1706 So, this is ugly. We iteratively invoke waitid() with P_PID
1707 + WNOHANG for each PID we wait for, instead of using
1708 P_ALL. This is because we only want to get child
1709 information of very specific child processes, and not all
1710 of them. We might not have processed the SIGCHLD even of a
1711 previous invocation and we don't want to maintain a
1712 unbounded *per-child* event queue, hence we really don't
1713 want anything flushed out of the kernel's queue that we
1714 don't care about. Since this is O(n) this means that if you
1715 have a lot of processes you probably want to handle SIGCHLD
1718 We do not reap the children here (by using WNOWAIT), this
1719 is only done after the event source is dispatched so that
1720 the callback still sees the process as a zombie.
1723 HASHMAP_FOREACH(s, e->child_sources, i) {
1724 assert(s->type == SOURCE_CHILD);
1729 if (s->enabled == SD_EVENT_OFF)
1732 zero(s->child.siginfo);
1733 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1734 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1738 if (s->child.siginfo.si_pid != 0) {
1740 s->child.siginfo.si_code == CLD_EXITED ||
1741 s->child.siginfo.si_code == CLD_KILLED ||
1742 s->child.siginfo.si_code == CLD_DUMPED;
1744 if (!zombie && (s->child.options & WEXITED)) {
1745 /* If the child isn't dead then let's
1746 * immediately remove the state change
1747 * from the queue, since there's no
1748 * benefit in leaving it queued */
1750 assert(s->child.options & (WSTOPPED|WCONTINUED));
1751 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1754 r = source_set_pending(s, true);
1763 static int process_signal(sd_event *e, uint32_t events) {
1764 bool read_one = false;
1768 assert(e->signal_sources);
1770 assert_return(events == EPOLLIN, -EIO);
1773 struct signalfd_siginfo si;
1777 ss = read(e->signal_fd, &si, sizeof(si));
1779 if (errno == EAGAIN || errno == EINTR)
1785 if (_unlikely_(ss != sizeof(si)))
1790 s = e->signal_sources[si.ssi_signo];
1791 if (si.ssi_signo == SIGCHLD) {
1792 r = process_child(e);
1801 s->signal.siginfo = si;
1802 r = source_set_pending(s, true);
1808 static int source_dispatch(sd_event_source *s) {
1812 assert(s->pending || s->type == SOURCE_EXIT);
1814 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1815 r = source_set_pending(s, false);
1820 if (s->type != SOURCE_POST) {
1824 /* If we execute a non-post source, let's mark all
1825 * post sources as pending */
1827 SET_FOREACH(z, s->event->post_sources, i) {
1828 if (z->enabled == SD_EVENT_OFF)
1831 r = source_set_pending(z, true);
1837 if (s->enabled == SD_EVENT_ONESHOT) {
1838 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
1843 s->dispatching = true;
1848 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
1851 case SOURCE_MONOTONIC:
1852 r = s->time.callback(s, s->time.next, s->userdata);
1855 case SOURCE_REALTIME:
1856 r = s->time.callback(s, s->time.next, s->userdata);
1860 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
1863 case SOURCE_CHILD: {
1866 zombie = s->child.siginfo.si_code == CLD_EXITED ||
1867 s->child.siginfo.si_code == CLD_KILLED ||
1868 s->child.siginfo.si_code == CLD_DUMPED;
1870 r = s->child.callback(s, &s->child.siginfo, s->userdata);
1872 /* Now, reap the PID for good. */
1874 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
1880 r = s->defer.callback(s, s->userdata);
1884 r = s->post.callback(s, s->userdata);
1888 r = s->exit.callback(s, s->userdata);
1891 case SOURCE_WATCHDOG:
1892 assert_not_reached("Wut? I shouldn't exist.");
1895 s->dispatching = false;
1898 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
1903 sd_event_source_set_enabled(s, SD_EVENT_OFF);
1908 static int event_prepare(sd_event *e) {
1916 s = prioq_peek(e->prepare);
1917 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
1920 s->prepare_iteration = e->iteration;
1921 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
1927 s->dispatching = true;
1928 r = s->prepare(s, s->userdata);
1929 s->dispatching = false;
1932 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
1937 sd_event_source_set_enabled(s, SD_EVENT_OFF);
1943 static int dispatch_exit(sd_event *e) {
1949 p = prioq_peek(e->exit);
1950 if (!p || p->enabled == SD_EVENT_OFF) {
1951 e->state = SD_EVENT_FINISHED;
1957 e->state = SD_EVENT_EXITING;
1959 r = source_dispatch(p);
1961 e->state = SD_EVENT_PASSIVE;
1967 static sd_event_source* event_next_pending(sd_event *e) {
1972 p = prioq_peek(e->pending);
1976 if (p->enabled == SD_EVENT_OFF)
1982 static int arm_watchdog(sd_event *e) {
1983 struct itimerspec its = {};
1988 assert(e->watchdog_fd >= 0);
1990 t = sleep_between(e,
1991 e->watchdog_last + (e->watchdog_period / 2),
1992 e->watchdog_last + (e->watchdog_period * 3 / 4));
1994 timespec_store(&its.it_value, t);
1996 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2003 static int process_watchdog(sd_event *e) {
2009 /* Don't notify watchdog too often */
2010 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2013 sd_notify(false, "WATCHDOG=1");
2014 e->watchdog_last = e->timestamp.monotonic;
2016 return arm_watchdog(e);
2019 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2020 struct epoll_event *ev_queue;
2021 unsigned ev_queue_max;
2025 assert_return(e, -EINVAL);
2026 assert_return(!event_pid_changed(e), -ECHILD);
2027 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2028 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2030 if (e->exit_requested)
2031 return dispatch_exit(e);
2035 e->state = SD_EVENT_RUNNING;
2037 r = event_prepare(e);
2041 r = event_arm_timer(e, e->monotonic_fd, e->monotonic_earliest, e->monotonic_latest, &e->monotonic_next);
2045 r = event_arm_timer(e, e->realtime_fd, e->realtime_earliest, e->realtime_latest, &e->realtime_next);
2049 if (event_next_pending(e) || e->need_process_child)
2051 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2052 ev_queue = newa(struct epoll_event, ev_queue_max);
2054 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2055 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2057 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2061 dual_timestamp_get(&e->timestamp);
2063 for (i = 0; i < m; i++) {
2065 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_MONOTONIC))
2066 r = flush_timer(e, e->monotonic_fd, ev_queue[i].events, &e->monotonic_next);
2067 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_REALTIME))
2068 r = flush_timer(e, e->realtime_fd, ev_queue[i].events, &e->realtime_next);
2069 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2070 r = process_signal(e, ev_queue[i].events);
2071 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2072 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2074 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2080 r = process_watchdog(e);
2084 r = process_timer(e, e->timestamp.monotonic, e->monotonic_earliest, e->monotonic_latest);
2088 r = process_timer(e, e->timestamp.realtime, e->realtime_earliest, e->realtime_latest);
2092 if (e->need_process_child) {
2093 r = process_child(e);
2098 p = event_next_pending(e);
2104 r = source_dispatch(p);
2107 e->state = SD_EVENT_PASSIVE;
2113 _public_ int sd_event_loop(sd_event *e) {
2116 assert_return(e, -EINVAL);
2117 assert_return(!event_pid_changed(e), -ECHILD);
2118 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2122 while (e->state != SD_EVENT_FINISHED) {
2123 r = sd_event_run(e, (uint64_t) -1);
2135 _public_ int sd_event_get_state(sd_event *e) {
2136 assert_return(e, -EINVAL);
2137 assert_return(!event_pid_changed(e), -ECHILD);
2142 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2143 assert_return(e, -EINVAL);
2144 assert_return(code, -EINVAL);
2145 assert_return(!event_pid_changed(e), -ECHILD);
2147 if (!e->exit_requested)
2150 *code = e->exit_code;
2154 _public_ int sd_event_exit(sd_event *e, int code) {
2155 assert_return(e, -EINVAL);
2156 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2157 assert_return(!event_pid_changed(e), -ECHILD);
2159 e->exit_requested = true;
2160 e->exit_code = code;
2165 _public_ int sd_event_get_now_realtime(sd_event *e, uint64_t *usec) {
2166 assert_return(e, -EINVAL);
2167 assert_return(usec, -EINVAL);
2168 assert_return(dual_timestamp_is_set(&e->timestamp), -ENODATA);
2169 assert_return(!event_pid_changed(e), -ECHILD);
2171 *usec = e->timestamp.realtime;
2175 _public_ int sd_event_get_now_monotonic(sd_event *e, uint64_t *usec) {
2176 assert_return(e, -EINVAL);
2177 assert_return(usec, -EINVAL);
2178 assert_return(dual_timestamp_is_set(&e->timestamp), -ENODATA);
2179 assert_return(!event_pid_changed(e), -ECHILD);
2181 *usec = e->timestamp.monotonic;
2185 _public_ int sd_event_default(sd_event **ret) {
2187 static thread_local sd_event *default_event = NULL;
2192 return !!default_event;
2194 if (default_event) {
2195 *ret = sd_event_ref(default_event);
2199 r = sd_event_new(&e);
2203 e->default_event_ptr = &default_event;
2211 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2212 assert_return(e, -EINVAL);
2213 assert_return(tid, -EINVAL);
2214 assert_return(!event_pid_changed(e), -ECHILD);
2224 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2227 assert_return(e, -EINVAL);
2228 assert_return(!event_pid_changed(e), -ECHILD);
2230 if (e->watchdog == !!b)
2234 struct epoll_event ev = {};
2236 r = sd_watchdog_enabled(false, &e->watchdog_period);
2240 /* Issue first ping immediately */
2241 sd_notify(false, "WATCHDOG=1");
2242 e->watchdog_last = now(CLOCK_MONOTONIC);
2244 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2245 if (e->watchdog_fd < 0)
2248 r = arm_watchdog(e);
2252 ev.events = EPOLLIN;
2253 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2255 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2262 if (e->watchdog_fd >= 0) {
2263 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2264 e->watchdog_fd = safe_close(e->watchdog_fd);
2272 e->watchdog_fd = safe_close(e->watchdog_fd);
2276 _public_ int sd_event_get_watchdog(sd_event *e) {
2277 assert_return(e, -EINVAL);
2278 assert_return(!event_pid_changed(e), -ECHILD);