1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
39 #define EPOLL_QUEUE_MAX 512U
40 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
42 typedef enum EventSourceType {
54 struct sd_event_source {
59 sd_event_handler_t prepare;
61 EventSourceType type:4;
67 unsigned pending_index;
68 unsigned prepare_index;
69 unsigned pending_iteration;
70 unsigned prepare_iteration;
74 sd_event_io_handler_t callback;
81 sd_event_time_handler_t callback;
82 usec_t next, accuracy;
83 unsigned earliest_index;
84 unsigned latest_index;
87 sd_event_signal_handler_t callback;
88 struct signalfd_siginfo siginfo;
92 sd_event_child_handler_t callback;
98 sd_event_handler_t callback;
101 sd_event_handler_t callback;
104 sd_event_handler_t callback;
105 unsigned prioq_index;
122 /* For both clocks we maintain two priority queues each, one
123 * ordered for the earliest times the events may be
124 * dispatched, and one ordered by the latest times they must
125 * have been dispatched. The range between the top entries in
126 * the two prioqs is the time window we can freely schedule
128 Prioq *monotonic_earliest;
129 Prioq *monotonic_latest;
130 Prioq *realtime_earliest;
131 Prioq *realtime_latest;
133 usec_t realtime_next, monotonic_next;
137 sd_event_source **signal_sources;
139 Hashmap *child_sources;
140 unsigned n_enabled_child_sources;
149 dual_timestamp timestamp;
152 bool exit_requested:1;
153 bool need_process_child:1;
159 sd_event **default_event_ptr;
161 usec_t watchdog_last, watchdog_period;
166 static int pending_prioq_compare(const void *a, const void *b) {
167 const sd_event_source *x = a, *y = b;
172 /* Enabled ones first */
173 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
175 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
178 /* Lower priority values first */
179 if (x->priority < y->priority)
181 if (x->priority > y->priority)
184 /* Older entries first */
185 if (x->pending_iteration < y->pending_iteration)
187 if (x->pending_iteration > y->pending_iteration)
190 /* Stability for the rest */
199 static int prepare_prioq_compare(const void *a, const void *b) {
200 const sd_event_source *x = a, *y = b;
205 /* Move most recently prepared ones last, so that we can stop
206 * preparing as soon as we hit one that has already been
207 * prepared in the current iteration */
208 if (x->prepare_iteration < y->prepare_iteration)
210 if (x->prepare_iteration > y->prepare_iteration)
213 /* Enabled ones first */
214 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
216 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
219 /* Lower priority values first */
220 if (x->priority < y->priority)
222 if (x->priority > y->priority)
225 /* Stability for the rest */
234 static int earliest_time_prioq_compare(const void *a, const void *b) {
235 const sd_event_source *x = a, *y = b;
237 assert(x->type == SOURCE_MONOTONIC || x->type == SOURCE_REALTIME);
238 assert(y->type == SOURCE_MONOTONIC || y->type == SOURCE_REALTIME);
240 /* Enabled ones first */
241 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
243 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
246 /* Move the pending ones to the end */
247 if (!x->pending && y->pending)
249 if (x->pending && !y->pending)
253 if (x->time.next < y->time.next)
255 if (x->time.next > y->time.next)
258 /* Stability for the rest */
267 static int latest_time_prioq_compare(const void *a, const void *b) {
268 const sd_event_source *x = a, *y = b;
270 assert((x->type == SOURCE_MONOTONIC && y->type == SOURCE_MONOTONIC) ||
271 (x->type == SOURCE_REALTIME && y->type == SOURCE_REALTIME));
273 /* Enabled ones first */
274 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
276 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
279 /* Move the pending ones to the end */
280 if (!x->pending && y->pending)
282 if (x->pending && !y->pending)
286 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
288 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
291 /* Stability for the rest */
300 static int exit_prioq_compare(const void *a, const void *b) {
301 const sd_event_source *x = a, *y = b;
303 assert(x->type == SOURCE_EXIT);
304 assert(y->type == SOURCE_EXIT);
306 /* Enabled ones first */
307 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
309 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
312 /* Lower priority values first */
313 if (x->priority < y->priority)
315 if (x->priority > y->priority)
318 /* Stability for the rest */
327 static void event_free(sd_event *e) {
329 assert(e->n_sources == 0);
331 if (e->default_event_ptr)
332 *(e->default_event_ptr) = NULL;
334 if (e->epoll_fd >= 0)
335 close_nointr_nofail(e->epoll_fd);
337 if (e->signal_fd >= 0)
338 close_nointr_nofail(e->signal_fd);
340 if (e->realtime_fd >= 0)
341 close_nointr_nofail(e->realtime_fd);
343 if (e->monotonic_fd >= 0)
344 close_nointr_nofail(e->monotonic_fd);
346 if (e->watchdog_fd >= 0)
347 close_nointr_nofail(e->watchdog_fd);
349 prioq_free(e->pending);
350 prioq_free(e->prepare);
351 prioq_free(e->monotonic_earliest);
352 prioq_free(e->monotonic_latest);
353 prioq_free(e->realtime_earliest);
354 prioq_free(e->realtime_latest);
357 free(e->signal_sources);
359 hashmap_free(e->child_sources);
360 set_free(e->post_sources);
364 _public_ int sd_event_new(sd_event** ret) {
368 assert_return(ret, -EINVAL);
370 e = new0(sd_event, 1);
375 e->signal_fd = e->realtime_fd = e->monotonic_fd = e->watchdog_fd = e->epoll_fd = -1;
376 e->realtime_next = e->monotonic_next = (usec_t) -1;
377 e->original_pid = getpid();
379 assert_se(sigemptyset(&e->sigset) == 0);
381 e->pending = prioq_new(pending_prioq_compare);
387 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
388 if (e->epoll_fd < 0) {
401 _public_ sd_event* sd_event_ref(sd_event *e) {
402 assert_return(e, NULL);
404 assert(e->n_ref >= 1);
410 _public_ sd_event* sd_event_unref(sd_event *e) {
415 assert(e->n_ref >= 1);
424 static bool event_pid_changed(sd_event *e) {
427 /* We don't support people creating am event loop and keeping
428 * it around over a fork(). Let's complain. */
430 return e->original_pid != getpid();
433 static int source_io_unregister(sd_event_source *s) {
437 assert(s->type == SOURCE_IO);
439 if (!s->io.registered)
442 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
446 s->io.registered = false;
450 static int source_io_register(
455 struct epoll_event ev = {};
459 assert(s->type == SOURCE_IO);
460 assert(enabled != SD_EVENT_OFF);
465 if (enabled == SD_EVENT_ONESHOT)
466 ev.events |= EPOLLONESHOT;
468 if (s->io.registered)
469 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
471 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
476 s->io.registered = true;
481 static void source_free(sd_event_source *s) {
485 assert(s->event->n_sources > 0);
491 source_io_unregister(s);
495 case SOURCE_MONOTONIC:
496 prioq_remove(s->event->monotonic_earliest, s, &s->time.earliest_index);
497 prioq_remove(s->event->monotonic_latest, s, &s->time.latest_index);
500 case SOURCE_REALTIME:
501 prioq_remove(s->event->realtime_earliest, s, &s->time.earliest_index);
502 prioq_remove(s->event->realtime_latest, s, &s->time.latest_index);
506 if (s->signal.sig > 0) {
507 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
508 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
510 if (s->event->signal_sources)
511 s->event->signal_sources[s->signal.sig] = NULL;
517 if (s->child.pid > 0) {
518 if (s->enabled != SD_EVENT_OFF) {
519 assert(s->event->n_enabled_child_sources > 0);
520 s->event->n_enabled_child_sources--;
523 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
524 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
526 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
536 set_remove(s->event->post_sources, s);
540 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
543 case SOURCE_WATCHDOG:
544 assert_not_reached("Wut? I shouldn't exist.");
548 prioq_remove(s->event->pending, s, &s->pending_index);
551 prioq_remove(s->event->prepare, s, &s->prepare_index);
553 s->event->n_sources--;
554 sd_event_unref(s->event);
560 static int source_set_pending(sd_event_source *s, bool b) {
564 assert(s->type != SOURCE_EXIT);
572 s->pending_iteration = s->event->iteration;
574 r = prioq_put(s->event->pending, s, &s->pending_index);
580 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
582 if (s->type == SOURCE_REALTIME) {
583 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
584 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
585 } else if (s->type == SOURCE_MONOTONIC) {
586 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
587 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
593 static sd_event_source *source_new(sd_event *e, EventSourceType type) {
598 s = new0(sd_event_source, 1);
603 s->event = sd_event_ref(e);
605 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
612 _public_ int sd_event_add_io(
614 sd_event_source **ret,
617 sd_event_io_handler_t callback,
623 assert_return(e, -EINVAL);
624 assert_return(fd >= 0, -EINVAL);
625 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
626 assert_return(callback, -EINVAL);
627 assert_return(ret, -EINVAL);
628 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
629 assert_return(!event_pid_changed(e), -ECHILD);
631 s = source_new(e, SOURCE_IO);
636 s->io.events = events;
637 s->io.callback = callback;
638 s->userdata = userdata;
639 s->enabled = SD_EVENT_ON;
641 r = source_io_register(s, s->enabled, events);
651 static int event_setup_timer_fd(
653 EventSourceType type,
657 sd_id128_t bootid = {};
658 struct epoll_event ev = {};
664 if (_likely_(*timer_fd >= 0))
667 fd = timerfd_create(id, TFD_NONBLOCK|TFD_CLOEXEC);
672 ev.data.ptr = INT_TO_PTR(type);
674 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
676 close_nointr_nofail(fd);
680 /* When we sleep for longer, we try to realign the wakeup to
681 the same time wihtin each minute/second/250ms, so that
682 events all across the system can be coalesced into a single
683 CPU wakeup. However, let's take some system-specific
684 randomness for this value, so that in a network of systems
685 with synced clocks timer events are distributed a
686 bit. Here, we calculate a perturbation usec offset from the
689 if (sd_id128_get_boot(&bootid) >= 0)
690 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
696 static int event_add_time_internal(
698 sd_event_source **ret,
699 EventSourceType type,
706 sd_event_time_handler_t callback,
712 assert_return(e, -EINVAL);
713 assert_return(callback, -EINVAL);
714 assert_return(ret, -EINVAL);
715 assert_return(usec != (uint64_t) -1, -EINVAL);
716 assert_return(accuracy != (uint64_t) -1, -EINVAL);
717 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
718 assert_return(!event_pid_changed(e), -ECHILD);
725 *earliest = prioq_new(earliest_time_prioq_compare);
731 *latest = prioq_new(latest_time_prioq_compare);
737 r = event_setup_timer_fd(e, type, timer_fd, id);
742 s = source_new(e, type);
747 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
748 s->time.callback = callback;
749 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
750 s->userdata = userdata;
751 s->enabled = SD_EVENT_ONESHOT;
753 r = prioq_put(*earliest, s, &s->time.earliest_index);
757 r = prioq_put(*latest, s, &s->time.latest_index);
769 _public_ int sd_event_add_monotonic(sd_event *e,
770 sd_event_source **ret,
773 sd_event_time_handler_t callback,
776 return event_add_time_internal(e, ret, SOURCE_MONOTONIC, &e->monotonic_fd, CLOCK_MONOTONIC, &e->monotonic_earliest, &e->monotonic_latest, usec, accuracy, callback, userdata);
779 _public_ int sd_event_add_realtime(sd_event *e,
780 sd_event_source **ret,
783 sd_event_time_handler_t callback,
786 return event_add_time_internal(e, ret, SOURCE_REALTIME, &e->realtime_fd, CLOCK_REALTIME, &e->realtime_earliest, &e->realtime_latest, usec, accuracy, callback, userdata);
789 static int event_update_signal_fd(sd_event *e) {
790 struct epoll_event ev = {};
796 add_to_epoll = e->signal_fd < 0;
798 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
808 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
810 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
812 close_nointr_nofail(e->signal_fd);
821 _public_ int sd_event_add_signal(
823 sd_event_source **ret,
825 sd_event_signal_handler_t callback,
832 assert_return(e, -EINVAL);
833 assert_return(sig > 0, -EINVAL);
834 assert_return(sig < _NSIG, -EINVAL);
835 assert_return(callback, -EINVAL);
836 assert_return(ret, -EINVAL);
837 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
838 assert_return(!event_pid_changed(e), -ECHILD);
840 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
844 if (!sigismember(&ss, sig))
847 if (!e->signal_sources) {
848 e->signal_sources = new0(sd_event_source*, _NSIG);
849 if (!e->signal_sources)
851 } else if (e->signal_sources[sig])
854 s = source_new(e, SOURCE_SIGNAL);
859 s->signal.callback = callback;
860 s->userdata = userdata;
861 s->enabled = SD_EVENT_ON;
863 e->signal_sources[sig] = s;
864 assert_se(sigaddset(&e->sigset, sig) == 0);
866 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
867 r = event_update_signal_fd(e);
878 _public_ int sd_event_add_child(
880 sd_event_source **ret,
883 sd_event_child_handler_t callback,
889 assert_return(e, -EINVAL);
890 assert_return(pid > 1, -EINVAL);
891 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
892 assert_return(options != 0, -EINVAL);
893 assert_return(callback, -EINVAL);
894 assert_return(ret, -EINVAL);
895 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
896 assert_return(!event_pid_changed(e), -ECHILD);
898 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
902 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
905 s = source_new(e, SOURCE_CHILD);
910 s->child.options = options;
911 s->child.callback = callback;
912 s->userdata = userdata;
913 s->enabled = SD_EVENT_ONESHOT;
915 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
921 e->n_enabled_child_sources ++;
923 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
925 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
926 r = event_update_signal_fd(e);
933 e->need_process_child = true;
939 _public_ int sd_event_add_defer(
941 sd_event_source **ret,
942 sd_event_handler_t callback,
948 assert_return(e, -EINVAL);
949 assert_return(callback, -EINVAL);
950 assert_return(ret, -EINVAL);
951 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
952 assert_return(!event_pid_changed(e), -ECHILD);
954 s = source_new(e, SOURCE_DEFER);
958 s->defer.callback = callback;
959 s->userdata = userdata;
960 s->enabled = SD_EVENT_ONESHOT;
962 r = source_set_pending(s, true);
972 _public_ int sd_event_add_post(
974 sd_event_source **ret,
975 sd_event_handler_t callback,
981 assert_return(e, -EINVAL);
982 assert_return(callback, -EINVAL);
983 assert_return(ret, -EINVAL);
984 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
985 assert_return(!event_pid_changed(e), -ECHILD);
987 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
991 s = source_new(e, SOURCE_POST);
995 s->post.callback = callback;
996 s->userdata = userdata;
997 s->enabled = SD_EVENT_ON;
999 r = set_put(e->post_sources, s);
1009 _public_ int sd_event_add_exit(
1011 sd_event_source **ret,
1012 sd_event_handler_t callback,
1018 assert_return(e, -EINVAL);
1019 assert_return(callback, -EINVAL);
1020 assert_return(ret, -EINVAL);
1021 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1022 assert_return(!event_pid_changed(e), -ECHILD);
1025 e->exit = prioq_new(exit_prioq_compare);
1030 s = source_new(e, SOURCE_EXIT);
1034 s->exit.callback = callback;
1035 s->userdata = userdata;
1036 s->exit.prioq_index = PRIOQ_IDX_NULL;
1037 s->enabled = SD_EVENT_ONESHOT;
1039 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1049 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1050 assert_return(s, NULL);
1052 assert(s->n_ref >= 1);
1058 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1063 assert(s->n_ref >= 1);
1066 if (s->n_ref <= 0) {
1067 /* Here's a special hack: when we are called from a
1068 * dispatch handler we won't free the event source
1069 * immediately, but we will detach the fd from the
1070 * epoll. This way it is safe for the caller to unref
1071 * the event source and immediately close the fd, but
1072 * we still retain a valid event source object after
1075 if (s->dispatching) {
1076 if (s->type == SOURCE_IO)
1077 source_io_unregister(s);
1085 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1086 assert_return(s, NULL);
1091 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1092 assert_return(s, -EINVAL);
1093 assert_return(s->type != SOURCE_EXIT, -EDOM);
1094 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1095 assert_return(!event_pid_changed(s->event), -ECHILD);
1100 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1101 assert_return(s, -EINVAL);
1102 assert_return(s->type == SOURCE_IO, -EDOM);
1103 assert_return(!event_pid_changed(s->event), -ECHILD);
1108 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1111 assert_return(s, -EINVAL);
1112 assert_return(fd >= 0, -EINVAL);
1113 assert_return(s->type == SOURCE_IO, -EDOM);
1114 assert_return(!event_pid_changed(s->event), -ECHILD);
1119 if (s->enabled == SD_EVENT_OFF) {
1121 s->io.registered = false;
1125 saved_fd = s->io.fd;
1126 assert(s->io.registered);
1129 s->io.registered = false;
1131 r = source_io_register(s, s->enabled, s->io.events);
1133 s->io.fd = saved_fd;
1134 s->io.registered = true;
1138 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1144 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1145 assert_return(s, -EINVAL);
1146 assert_return(events, -EINVAL);
1147 assert_return(s->type == SOURCE_IO, -EDOM);
1148 assert_return(!event_pid_changed(s->event), -ECHILD);
1150 *events = s->io.events;
1154 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1157 assert_return(s, -EINVAL);
1158 assert_return(s->type == SOURCE_IO, -EDOM);
1159 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1160 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1161 assert_return(!event_pid_changed(s->event), -ECHILD);
1163 if (s->io.events == events)
1166 if (s->enabled != SD_EVENT_OFF) {
1167 r = source_io_register(s, s->enabled, events);
1172 s->io.events = events;
1173 source_set_pending(s, false);
1178 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1179 assert_return(s, -EINVAL);
1180 assert_return(revents, -EINVAL);
1181 assert_return(s->type == SOURCE_IO, -EDOM);
1182 assert_return(s->pending, -ENODATA);
1183 assert_return(!event_pid_changed(s->event), -ECHILD);
1185 *revents = s->io.revents;
1189 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1190 assert_return(s, -EINVAL);
1191 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1192 assert_return(!event_pid_changed(s->event), -ECHILD);
1194 return s->signal.sig;
1197 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1198 assert_return(s, -EINVAL);
1199 assert_return(!event_pid_changed(s->event), -ECHILD);
1204 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1205 assert_return(s, -EINVAL);
1206 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1207 assert_return(!event_pid_changed(s->event), -ECHILD);
1209 if (s->priority == priority)
1212 s->priority = priority;
1215 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1218 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1220 if (s->type == SOURCE_EXIT)
1221 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1226 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1227 assert_return(s, -EINVAL);
1228 assert_return(m, -EINVAL);
1229 assert_return(!event_pid_changed(s->event), -ECHILD);
1235 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1238 assert_return(s, -EINVAL);
1239 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1240 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1241 assert_return(!event_pid_changed(s->event), -ECHILD);
1243 if (s->enabled == m)
1246 if (m == SD_EVENT_OFF) {
1251 r = source_io_unregister(s);
1258 case SOURCE_MONOTONIC:
1260 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1261 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1264 case SOURCE_REALTIME:
1266 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1267 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1272 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1273 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1274 event_update_signal_fd(s->event);
1282 assert(s->event->n_enabled_child_sources > 0);
1283 s->event->n_enabled_child_sources--;
1285 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1286 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1287 event_update_signal_fd(s->event);
1294 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1302 case SOURCE_WATCHDOG:
1303 assert_not_reached("Wut? I shouldn't exist.");
1310 r = source_io_register(s, m, s->io.events);
1317 case SOURCE_MONOTONIC:
1319 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1320 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1323 case SOURCE_REALTIME:
1325 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1326 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1332 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1333 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1334 event_update_signal_fd(s->event);
1339 if (s->enabled == SD_EVENT_OFF) {
1340 s->event->n_enabled_child_sources++;
1342 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1343 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1344 event_update_signal_fd(s->event);
1353 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1361 case SOURCE_WATCHDOG:
1362 assert_not_reached("Wut? I shouldn't exist.");
1367 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1370 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1375 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1376 assert_return(s, -EINVAL);
1377 assert_return(usec, -EINVAL);
1378 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1379 assert_return(!event_pid_changed(s->event), -ECHILD);
1381 *usec = s->time.next;
1385 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1386 assert_return(s, -EINVAL);
1387 assert_return(usec != (uint64_t) -1, -EINVAL);
1388 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1389 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1390 assert_return(!event_pid_changed(s->event), -ECHILD);
1392 s->time.next = usec;
1394 source_set_pending(s, false);
1396 if (s->type == SOURCE_REALTIME) {
1397 prioq_reshuffle(s->event->realtime_earliest, s, &s->time.earliest_index);
1398 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1400 prioq_reshuffle(s->event->monotonic_earliest, s, &s->time.earliest_index);
1401 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1407 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1408 assert_return(s, -EINVAL);
1409 assert_return(usec, -EINVAL);
1410 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1411 assert_return(!event_pid_changed(s->event), -ECHILD);
1413 *usec = s->time.accuracy;
1417 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1418 assert_return(s, -EINVAL);
1419 assert_return(usec != (uint64_t) -1, -EINVAL);
1420 assert_return(s->type == SOURCE_REALTIME || s->type == SOURCE_MONOTONIC, -EDOM);
1421 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1422 assert_return(!event_pid_changed(s->event), -ECHILD);
1425 usec = DEFAULT_ACCURACY_USEC;
1427 s->time.accuracy = usec;
1429 source_set_pending(s, false);
1431 if (s->type == SOURCE_REALTIME)
1432 prioq_reshuffle(s->event->realtime_latest, s, &s->time.latest_index);
1434 prioq_reshuffle(s->event->monotonic_latest, s, &s->time.latest_index);
1439 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1440 assert_return(s, -EINVAL);
1441 assert_return(pid, -EINVAL);
1442 assert_return(s->type == SOURCE_CHILD, -EDOM);
1443 assert_return(!event_pid_changed(s->event), -ECHILD);
1445 *pid = s->child.pid;
1449 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1452 assert_return(s, -EINVAL);
1453 assert_return(s->type != SOURCE_EXIT, -EDOM);
1454 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1455 assert_return(!event_pid_changed(s->event), -ECHILD);
1457 if (s->prepare == callback)
1460 if (callback && s->prepare) {
1461 s->prepare = callback;
1465 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1469 s->prepare = callback;
1472 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1476 prioq_remove(s->event->prepare, s, &s->prepare_index);
1481 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1482 assert_return(s, NULL);
1487 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1490 assert_return(s, NULL);
1493 s->userdata = userdata;
1498 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1510 Find a good time to wake up again between times a and b. We
1511 have two goals here:
1513 a) We want to wake up as seldom as possible, hence prefer
1514 later times over earlier times.
1516 b) But if we have to wake up, then let's make sure to
1517 dispatch as much as possible on the entire system.
1519 We implement this by waking up everywhere at the same time
1520 within any given minute if we can, synchronised via the
1521 perturbation value determined from the boot ID. If we can't,
1522 then we try to find the same spot in every 10s, then 1s and
1523 then 250ms step. Otherwise, we pick the last possible time
1527 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1529 if (_unlikely_(c < USEC_PER_MINUTE))
1532 c -= USEC_PER_MINUTE;
1538 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1540 if (_unlikely_(c < USEC_PER_SEC*10))
1543 c -= USEC_PER_SEC*10;
1549 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1551 if (_unlikely_(c < USEC_PER_SEC))
1560 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1562 if (_unlikely_(c < USEC_PER_MSEC*250))
1565 c -= USEC_PER_MSEC*250;
1574 static int event_arm_timer(
1581 struct itimerspec its = {};
1582 sd_event_source *a, *b;
1589 a = prioq_peek(earliest);
1590 if (!a || a->enabled == SD_EVENT_OFF) {
1595 if (*next == (usec_t) -1)
1599 r = timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &its, NULL);
1603 *next = (usec_t) -1;
1608 b = prioq_peek(latest);
1609 assert_se(b && b->enabled != SD_EVENT_OFF);
1611 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1615 assert_se(timer_fd >= 0);
1618 /* We don' want to disarm here, just mean some time looooong ago. */
1619 its.it_value.tv_sec = 0;
1620 its.it_value.tv_nsec = 1;
1622 timespec_store(&its.it_value, t);
1624 r = timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &its, NULL);
1632 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1635 assert(s->type == SOURCE_IO);
1637 /* If the event source was already pending, we just OR in the
1638 * new revents, otherwise we reset the value. The ORing is
1639 * necessary to handle EPOLLONESHOT events properly where
1640 * readability might happen independently of writability, and
1641 * we need to keep track of both */
1644 s->io.revents |= revents;
1646 s->io.revents = revents;
1648 return source_set_pending(s, true);
1651 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1658 assert_return(events == EPOLLIN, -EIO);
1660 ss = read(fd, &x, sizeof(x));
1662 if (errno == EAGAIN || errno == EINTR)
1668 if (_unlikely_(ss != sizeof(x)))
1672 *next = (usec_t) -1;
1677 static int process_timer(
1689 s = prioq_peek(earliest);
1692 s->enabled == SD_EVENT_OFF ||
1696 r = source_set_pending(s, true);
1700 prioq_reshuffle(earliest, s, &s->time.earliest_index);
1701 prioq_reshuffle(latest, s, &s->time.latest_index);
1707 static int process_child(sd_event *e) {
1714 e->need_process_child = false;
1717 So, this is ugly. We iteratively invoke waitid() with P_PID
1718 + WNOHANG for each PID we wait for, instead of using
1719 P_ALL. This is because we only want to get child
1720 information of very specific child processes, and not all
1721 of them. We might not have processed the SIGCHLD even of a
1722 previous invocation and we don't want to maintain a
1723 unbounded *per-child* event queue, hence we really don't
1724 want anything flushed out of the kernel's queue that we
1725 don't care about. Since this is O(n) this means that if you
1726 have a lot of processes you probably want to handle SIGCHLD
1729 We do not reap the children here (by using WNOWAIT), this
1730 is only done after the event source is dispatched so that
1731 the callback still sees the process as a zombie.
1734 HASHMAP_FOREACH(s, e->child_sources, i) {
1735 assert(s->type == SOURCE_CHILD);
1740 if (s->enabled == SD_EVENT_OFF)
1743 zero(s->child.siginfo);
1744 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1745 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1749 if (s->child.siginfo.si_pid != 0) {
1751 s->child.siginfo.si_code == CLD_EXITED ||
1752 s->child.siginfo.si_code == CLD_KILLED ||
1753 s->child.siginfo.si_code == CLD_DUMPED;
1755 if (!zombie && (s->child.options & WEXITED)) {
1756 /* If the child isn't dead then let's
1757 * immediately remove the state change
1758 * from the queue, since there's no
1759 * benefit in leaving it queued */
1761 assert(s->child.options & (WSTOPPED|WCONTINUED));
1762 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1765 r = source_set_pending(s, true);
1774 static int process_signal(sd_event *e, uint32_t events) {
1775 bool read_one = false;
1779 assert(e->signal_sources);
1781 assert_return(events == EPOLLIN, -EIO);
1784 struct signalfd_siginfo si;
1788 ss = read(e->signal_fd, &si, sizeof(si));
1790 if (errno == EAGAIN || errno == EINTR)
1796 if (_unlikely_(ss != sizeof(si)))
1801 s = e->signal_sources[si.ssi_signo];
1802 if (si.ssi_signo == SIGCHLD) {
1803 r = process_child(e);
1812 s->signal.siginfo = si;
1813 r = source_set_pending(s, true);
1819 static int source_dispatch(sd_event_source *s) {
1823 assert(s->pending || s->type == SOURCE_EXIT);
1825 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1826 r = source_set_pending(s, false);
1831 if (s->type != SOURCE_POST) {
1835 /* If we execute a non-post source, let's mark all
1836 * post sources as pending */
1838 SET_FOREACH(z, s->event->post_sources, i) {
1839 if (z->enabled == SD_EVENT_OFF)
1842 r = source_set_pending(z, true);
1848 if (s->enabled == SD_EVENT_ONESHOT) {
1849 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
1854 s->dispatching = true;
1859 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
1862 case SOURCE_MONOTONIC:
1863 r = s->time.callback(s, s->time.next, s->userdata);
1866 case SOURCE_REALTIME:
1867 r = s->time.callback(s, s->time.next, s->userdata);
1871 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
1874 case SOURCE_CHILD: {
1877 zombie = s->child.siginfo.si_code == CLD_EXITED ||
1878 s->child.siginfo.si_code == CLD_KILLED ||
1879 s->child.siginfo.si_code == CLD_DUMPED;
1881 r = s->child.callback(s, &s->child.siginfo, s->userdata);
1883 /* Now, reap the PID for good. */
1885 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
1891 r = s->defer.callback(s, s->userdata);
1895 r = s->post.callback(s, s->userdata);
1899 r = s->exit.callback(s, s->userdata);
1902 case SOURCE_WATCHDOG:
1903 assert_not_reached("Wut? I shouldn't exist.");
1906 s->dispatching = false;
1909 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
1914 sd_event_source_set_enabled(s, SD_EVENT_OFF);
1919 static int event_prepare(sd_event *e) {
1927 s = prioq_peek(e->prepare);
1928 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
1931 s->prepare_iteration = e->iteration;
1932 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
1938 s->dispatching = true;
1939 r = s->prepare(s, s->userdata);
1940 s->dispatching = false;
1943 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
1948 sd_event_source_set_enabled(s, SD_EVENT_OFF);
1954 static int dispatch_exit(sd_event *e) {
1960 p = prioq_peek(e->exit);
1961 if (!p || p->enabled == SD_EVENT_OFF) {
1962 e->state = SD_EVENT_FINISHED;
1968 e->state = SD_EVENT_EXITING;
1970 r = source_dispatch(p);
1972 e->state = SD_EVENT_PASSIVE;
1978 static sd_event_source* event_next_pending(sd_event *e) {
1983 p = prioq_peek(e->pending);
1987 if (p->enabled == SD_EVENT_OFF)
1993 static int arm_watchdog(sd_event *e) {
1994 struct itimerspec its = {};
1999 assert(e->watchdog_fd >= 0);
2001 t = sleep_between(e,
2002 e->watchdog_last + (e->watchdog_period / 2),
2003 e->watchdog_last + (e->watchdog_period * 3 / 4));
2005 timespec_store(&its.it_value, t);
2007 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2014 static int process_watchdog(sd_event *e) {
2020 /* Don't notify watchdog too often */
2021 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2024 sd_notify(false, "WATCHDOG=1");
2025 e->watchdog_last = e->timestamp.monotonic;
2027 return arm_watchdog(e);
2030 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2031 struct epoll_event *ev_queue;
2032 unsigned ev_queue_max;
2036 assert_return(e, -EINVAL);
2037 assert_return(!event_pid_changed(e), -ECHILD);
2038 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2039 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2041 if (e->exit_requested)
2042 return dispatch_exit(e);
2046 e->state = SD_EVENT_RUNNING;
2048 r = event_prepare(e);
2052 r = event_arm_timer(e, e->monotonic_fd, e->monotonic_earliest, e->monotonic_latest, &e->monotonic_next);
2056 r = event_arm_timer(e, e->realtime_fd, e->realtime_earliest, e->realtime_latest, &e->realtime_next);
2060 if (event_next_pending(e) || e->need_process_child)
2062 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2063 ev_queue = newa(struct epoll_event, ev_queue_max);
2065 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2066 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2068 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2072 dual_timestamp_get(&e->timestamp);
2074 for (i = 0; i < m; i++) {
2076 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_MONOTONIC))
2077 r = flush_timer(e, e->monotonic_fd, ev_queue[i].events, &e->monotonic_next);
2078 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_REALTIME))
2079 r = flush_timer(e, e->realtime_fd, ev_queue[i].events, &e->realtime_next);
2080 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2081 r = process_signal(e, ev_queue[i].events);
2082 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2083 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2085 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2091 r = process_watchdog(e);
2095 r = process_timer(e, e->timestamp.monotonic, e->monotonic_earliest, e->monotonic_latest);
2099 r = process_timer(e, e->timestamp.realtime, e->realtime_earliest, e->realtime_latest);
2103 if (e->need_process_child) {
2104 r = process_child(e);
2109 p = event_next_pending(e);
2115 r = source_dispatch(p);
2118 e->state = SD_EVENT_PASSIVE;
2124 _public_ int sd_event_loop(sd_event *e) {
2127 assert_return(e, -EINVAL);
2128 assert_return(!event_pid_changed(e), -ECHILD);
2129 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2133 while (e->state != SD_EVENT_FINISHED) {
2134 r = sd_event_run(e, (uint64_t) -1);
2146 _public_ int sd_event_get_state(sd_event *e) {
2147 assert_return(e, -EINVAL);
2148 assert_return(!event_pid_changed(e), -ECHILD);
2153 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2154 assert_return(e, -EINVAL);
2155 assert_return(code, -EINVAL);
2156 assert_return(!event_pid_changed(e), -ECHILD);
2158 if (!e->exit_requested)
2161 *code = e->exit_code;
2165 _public_ int sd_event_exit(sd_event *e, int code) {
2166 assert_return(e, -EINVAL);
2167 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2168 assert_return(!event_pid_changed(e), -ECHILD);
2170 e->exit_requested = true;
2171 e->exit_code = code;
2176 _public_ int sd_event_get_now_realtime(sd_event *e, uint64_t *usec) {
2177 assert_return(e, -EINVAL);
2178 assert_return(usec, -EINVAL);
2179 assert_return(dual_timestamp_is_set(&e->timestamp), -ENODATA);
2180 assert_return(!event_pid_changed(e), -ECHILD);
2182 *usec = e->timestamp.realtime;
2186 _public_ int sd_event_get_now_monotonic(sd_event *e, uint64_t *usec) {
2187 assert_return(e, -EINVAL);
2188 assert_return(usec, -EINVAL);
2189 assert_return(dual_timestamp_is_set(&e->timestamp), -ENODATA);
2190 assert_return(!event_pid_changed(e), -ECHILD);
2192 *usec = e->timestamp.monotonic;
2196 _public_ int sd_event_default(sd_event **ret) {
2198 static thread_local sd_event *default_event = NULL;
2203 return !!default_event;
2205 if (default_event) {
2206 *ret = sd_event_ref(default_event);
2210 r = sd_event_new(&e);
2214 e->default_event_ptr = &default_event;
2222 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2223 assert_return(e, -EINVAL);
2224 assert_return(tid, -EINVAL);
2225 assert_return(!event_pid_changed(e), -ECHILD);
2235 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2238 assert_return(e, -EINVAL);
2239 assert_return(!event_pid_changed(e), -ECHILD);
2241 if (e->watchdog == !!b)
2245 struct epoll_event ev = {};
2247 r = sd_watchdog_enabled(false, &e->watchdog_period);
2251 /* Issue first ping immediately */
2252 sd_notify(false, "WATCHDOG=1");
2253 e->watchdog_last = now(CLOCK_MONOTONIC);
2255 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2256 if (e->watchdog_fd < 0)
2259 r = arm_watchdog(e);
2263 ev.events = EPOLLIN;
2264 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2266 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2273 if (e->watchdog_fd >= 0) {
2274 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2275 close_nointr_nofail(e->watchdog_fd);
2276 e->watchdog_fd = -1;
2284 close_nointr_nofail(e->watchdog_fd);
2285 e->watchdog_fd = -1;
2289 _public_ int sd_event_get_watchdog(sd_event *e) {
2290 assert_return(e, -EINVAL);
2291 assert_return(!event_pid_changed(e), -ECHILD);