1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
39 #define EPOLL_QUEUE_MAX 512U
40 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
42 typedef enum EventSourceType {
45 SOURCE_TIME_MONOTONIC,
46 SOURCE_TIME_REALTIME_ALARM,
47 SOURCE_TIME_BOOTTIME_ALARM,
54 _SOUFCE_EVENT_SOURCE_TYPE_MAX,
55 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
58 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
60 struct sd_event_source {
65 sd_event_handler_t prepare;
67 EventSourceType type:5;
73 unsigned pending_index;
74 unsigned prepare_index;
75 unsigned pending_iteration;
76 unsigned prepare_iteration;
80 sd_event_io_handler_t callback;
87 sd_event_time_handler_t callback;
88 usec_t next, accuracy;
89 unsigned earliest_index;
90 unsigned latest_index;
93 sd_event_signal_handler_t callback;
94 struct signalfd_siginfo siginfo;
98 sd_event_child_handler_t callback;
104 sd_event_handler_t callback;
107 sd_event_handler_t callback;
110 sd_event_handler_t callback;
111 unsigned prioq_index;
119 /* For all clocks we maintain two priority queues each, one
120 * ordered for the earliest times the events may be
121 * dispatched, and one ordered by the latest times they must
122 * have been dispatched. The range between the top entries in
123 * the two prioqs is the time window we can freely schedule
141 /* timerfd_create() only supports these four clocks so far. We
142 * can add support for more clocks when the kernel learns to
143 * deal with them, too. */
144 struct clock_data realtime;
145 struct clock_data monotonic;
146 struct clock_data realtime_alarm;
147 struct clock_data boottime_alarm;
152 sd_event_source **signal_sources;
154 Hashmap *child_sources;
155 unsigned n_enabled_child_sources;
164 dual_timestamp timestamp;
165 usec_t timestamp_boottime;
168 bool exit_requested:1;
169 bool need_process_child:1;
175 sd_event **default_event_ptr;
177 usec_t watchdog_last, watchdog_period;
182 static int pending_prioq_compare(const void *a, const void *b) {
183 const sd_event_source *x = a, *y = b;
188 /* Enabled ones first */
189 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
191 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
194 /* Lower priority values first */
195 if (x->priority < y->priority)
197 if (x->priority > y->priority)
200 /* Older entries first */
201 if (x->pending_iteration < y->pending_iteration)
203 if (x->pending_iteration > y->pending_iteration)
206 /* Stability for the rest */
215 static int prepare_prioq_compare(const void *a, const void *b) {
216 const sd_event_source *x = a, *y = b;
221 /* Move most recently prepared ones last, so that we can stop
222 * preparing as soon as we hit one that has already been
223 * prepared in the current iteration */
224 if (x->prepare_iteration < y->prepare_iteration)
226 if (x->prepare_iteration > y->prepare_iteration)
229 /* Enabled ones first */
230 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
232 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
235 /* Lower priority values first */
236 if (x->priority < y->priority)
238 if (x->priority > y->priority)
241 /* Stability for the rest */
250 static int earliest_time_prioq_compare(const void *a, const void *b) {
251 const sd_event_source *x = a, *y = b;
253 assert(EVENT_SOURCE_IS_TIME(x->type));
254 assert(x->type == y->type);
256 /* Enabled ones first */
257 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
259 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
262 /* Move the pending ones to the end */
263 if (!x->pending && y->pending)
265 if (x->pending && !y->pending)
269 if (x->time.next < y->time.next)
271 if (x->time.next > y->time.next)
274 /* Stability for the rest */
283 static int latest_time_prioq_compare(const void *a, const void *b) {
284 const sd_event_source *x = a, *y = b;
286 assert(EVENT_SOURCE_IS_TIME(x->type));
287 assert(x->type == y->type);
289 /* Enabled ones first */
290 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
292 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
295 /* Move the pending ones to the end */
296 if (!x->pending && y->pending)
298 if (x->pending && !y->pending)
302 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
304 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
307 /* Stability for the rest */
316 static int exit_prioq_compare(const void *a, const void *b) {
317 const sd_event_source *x = a, *y = b;
319 assert(x->type == SOURCE_EXIT);
320 assert(y->type == SOURCE_EXIT);
322 /* Enabled ones first */
323 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
325 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
328 /* Lower priority values first */
329 if (x->priority < y->priority)
331 if (x->priority > y->priority)
334 /* Stability for the rest */
343 static void free_clock_data(struct clock_data *d) {
347 prioq_free(d->earliest);
348 prioq_free(d->latest);
351 static void event_free(sd_event *e) {
353 assert(e->n_sources == 0);
355 if (e->default_event_ptr)
356 *(e->default_event_ptr) = NULL;
358 safe_close(e->epoll_fd);
359 safe_close(e->signal_fd);
360 safe_close(e->watchdog_fd);
362 free_clock_data(&e->realtime);
363 free_clock_data(&e->monotonic);
364 free_clock_data(&e->realtime_alarm);
365 free_clock_data(&e->boottime_alarm);
367 prioq_free(e->pending);
368 prioq_free(e->prepare);
371 free(e->signal_sources);
373 hashmap_free(e->child_sources);
374 set_free(e->post_sources);
378 _public_ int sd_event_new(sd_event** ret) {
382 assert_return(ret, -EINVAL);
384 e = new0(sd_event, 1);
389 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
390 e->realtime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = (usec_t) -1;
391 e->original_pid = getpid();
392 e->perturb = (usec_t) -1;
394 assert_se(sigemptyset(&e->sigset) == 0);
396 e->pending = prioq_new(pending_prioq_compare);
402 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
403 if (e->epoll_fd < 0) {
416 _public_ sd_event* sd_event_ref(sd_event *e) {
417 assert_return(e, NULL);
419 assert(e->n_ref >= 1);
425 _public_ sd_event* sd_event_unref(sd_event *e) {
430 assert(e->n_ref >= 1);
439 static bool event_pid_changed(sd_event *e) {
442 /* We don't support people creating am event loop and keeping
443 * it around over a fork(). Let's complain. */
445 return e->original_pid != getpid();
448 static int source_io_unregister(sd_event_source *s) {
452 assert(s->type == SOURCE_IO);
454 if (!s->io.registered)
457 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
461 s->io.registered = false;
465 static int source_io_register(
470 struct epoll_event ev = {};
474 assert(s->type == SOURCE_IO);
475 assert(enabled != SD_EVENT_OFF);
480 if (enabled == SD_EVENT_ONESHOT)
481 ev.events |= EPOLLONESHOT;
483 if (s->io.registered)
484 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
486 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
491 s->io.registered = true;
496 static clockid_t event_source_type_to_clock(EventSourceType t) {
500 case SOURCE_TIME_REALTIME:
501 return CLOCK_REALTIME;
503 case SOURCE_TIME_MONOTONIC:
504 return CLOCK_MONOTONIC;
506 case SOURCE_TIME_REALTIME_ALARM:
507 return CLOCK_REALTIME_ALARM;
509 case SOURCE_TIME_BOOTTIME_ALARM:
510 return CLOCK_BOOTTIME_ALARM;
513 return (clockid_t) -1;
517 static EventSourceType clock_to_event_source_type(clockid_t clock) {
522 return SOURCE_TIME_REALTIME;
524 case CLOCK_MONOTONIC:
525 return SOURCE_TIME_MONOTONIC;
527 case CLOCK_REALTIME_ALARM:
528 return SOURCE_TIME_REALTIME_ALARM;
530 case CLOCK_BOOTTIME_ALARM:
531 return SOURCE_TIME_BOOTTIME_ALARM;
534 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
538 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
543 case SOURCE_TIME_REALTIME:
546 case SOURCE_TIME_MONOTONIC:
547 return &e->monotonic;
549 case SOURCE_TIME_REALTIME_ALARM:
550 return &e->realtime_alarm;
552 case SOURCE_TIME_BOOTTIME_ALARM:
553 return &e->boottime_alarm;
560 static void source_free(sd_event_source *s) {
564 assert(s->event->n_sources > 0);
570 source_io_unregister(s);
574 case SOURCE_TIME_REALTIME:
575 case SOURCE_TIME_MONOTONIC:
576 case SOURCE_TIME_REALTIME_ALARM:
577 case SOURCE_TIME_BOOTTIME_ALARM: {
578 struct clock_data *d;
580 d = event_get_clock_data(s->event, s->type);
583 prioq_remove(d->earliest, s, &s->time.earliest_index);
584 prioq_remove(d->latest, s, &s->time.latest_index);
589 if (s->signal.sig > 0) {
590 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
591 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
593 if (s->event->signal_sources)
594 s->event->signal_sources[s->signal.sig] = NULL;
600 if (s->child.pid > 0) {
601 if (s->enabled != SD_EVENT_OFF) {
602 assert(s->event->n_enabled_child_sources > 0);
603 s->event->n_enabled_child_sources--;
606 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
607 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
609 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
619 set_remove(s->event->post_sources, s);
623 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
627 assert_not_reached("Wut? I shouldn't exist.");
631 prioq_remove(s->event->pending, s, &s->pending_index);
634 prioq_remove(s->event->prepare, s, &s->prepare_index);
636 s->event->n_sources--;
637 sd_event_unref(s->event);
643 static int source_set_pending(sd_event_source *s, bool b) {
647 assert(s->type != SOURCE_EXIT);
655 s->pending_iteration = s->event->iteration;
657 r = prioq_put(s->event->pending, s, &s->pending_index);
663 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
665 if (EVENT_SOURCE_IS_TIME(s->type)) {
666 struct clock_data *d;
668 d = event_get_clock_data(s->event, s->type);
671 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
672 prioq_reshuffle(d->latest, s, &s->time.latest_index);
678 static sd_event_source *source_new(sd_event *e, EventSourceType type) {
683 s = new0(sd_event_source, 1);
688 s->event = sd_event_ref(e);
690 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
697 _public_ int sd_event_add_io(
699 sd_event_source **ret,
702 sd_event_io_handler_t callback,
708 assert_return(e, -EINVAL);
709 assert_return(fd >= 0, -EINVAL);
710 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
711 assert_return(callback, -EINVAL);
712 assert_return(ret, -EINVAL);
713 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
714 assert_return(!event_pid_changed(e), -ECHILD);
716 s = source_new(e, SOURCE_IO);
721 s->io.events = events;
722 s->io.callback = callback;
723 s->userdata = userdata;
724 s->enabled = SD_EVENT_ON;
726 r = source_io_register(s, s->enabled, events);
736 static void initialize_perturb(sd_event *e) {
737 sd_id128_t bootid = {};
739 /* When we sleep for longer, we try to realign the wakeup to
740 the same time wihtin each minute/second/250ms, so that
741 events all across the system can be coalesced into a single
742 CPU wakeup. However, let's take some system-specific
743 randomness for this value, so that in a network of systems
744 with synced clocks timer events are distributed a
745 bit. Here, we calculate a perturbation usec offset from the
748 if (_likely_(e->perturb != (usec_t) -1))
751 if (sd_id128_get_boot(&bootid) >= 0)
752 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
755 static int event_setup_timer_fd(
757 struct clock_data *d,
760 struct epoll_event ev = {};
766 if (_likely_(d->fd >= 0))
769 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
774 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
776 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
786 _public_ int sd_event_add_time(
788 sd_event_source **ret,
792 sd_event_time_handler_t callback,
795 EventSourceType type;
797 struct clock_data *d;
800 assert_return(e, -EINVAL);
801 assert_return(ret, -EINVAL);
802 assert_return(usec != (uint64_t) -1, -EINVAL);
803 assert_return(accuracy != (uint64_t) -1, -EINVAL);
804 assert_return(callback, -EINVAL);
805 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
806 assert_return(!event_pid_changed(e), -ECHILD);
808 type = clock_to_event_source_type(clock);
809 assert_return(type >= 0, -ENOTSUP);
811 d = event_get_clock_data(e, type);
815 d->earliest = prioq_new(earliest_time_prioq_compare);
821 d->latest = prioq_new(latest_time_prioq_compare);
827 r = event_setup_timer_fd(e, d, clock);
832 s = source_new(e, type);
837 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
838 s->time.callback = callback;
839 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
840 s->userdata = userdata;
841 s->enabled = SD_EVENT_ONESHOT;
843 r = prioq_put(d->earliest, s, &s->time.earliest_index);
847 r = prioq_put(d->latest, s, &s->time.latest_index);
859 static int event_update_signal_fd(sd_event *e) {
860 struct epoll_event ev = {};
866 add_to_epoll = e->signal_fd < 0;
868 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
878 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
880 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
882 e->signal_fd = safe_close(e->signal_fd);
889 _public_ int sd_event_add_signal(
891 sd_event_source **ret,
893 sd_event_signal_handler_t callback,
900 assert_return(e, -EINVAL);
901 assert_return(sig > 0, -EINVAL);
902 assert_return(sig < _NSIG, -EINVAL);
903 assert_return(callback, -EINVAL);
904 assert_return(ret, -EINVAL);
905 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
906 assert_return(!event_pid_changed(e), -ECHILD);
908 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
912 if (!sigismember(&ss, sig))
915 if (!e->signal_sources) {
916 e->signal_sources = new0(sd_event_source*, _NSIG);
917 if (!e->signal_sources)
919 } else if (e->signal_sources[sig])
922 s = source_new(e, SOURCE_SIGNAL);
927 s->signal.callback = callback;
928 s->userdata = userdata;
929 s->enabled = SD_EVENT_ON;
931 e->signal_sources[sig] = s;
932 assert_se(sigaddset(&e->sigset, sig) == 0);
934 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
935 r = event_update_signal_fd(e);
946 _public_ int sd_event_add_child(
948 sd_event_source **ret,
951 sd_event_child_handler_t callback,
957 assert_return(e, -EINVAL);
958 assert_return(pid > 1, -EINVAL);
959 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
960 assert_return(options != 0, -EINVAL);
961 assert_return(callback, -EINVAL);
962 assert_return(ret, -EINVAL);
963 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
964 assert_return(!event_pid_changed(e), -ECHILD);
966 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
970 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
973 s = source_new(e, SOURCE_CHILD);
978 s->child.options = options;
979 s->child.callback = callback;
980 s->userdata = userdata;
981 s->enabled = SD_EVENT_ONESHOT;
983 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
989 e->n_enabled_child_sources ++;
991 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
993 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
994 r = event_update_signal_fd(e);
1001 e->need_process_child = true;
1007 _public_ int sd_event_add_defer(
1009 sd_event_source **ret,
1010 sd_event_handler_t callback,
1016 assert_return(e, -EINVAL);
1017 assert_return(callback, -EINVAL);
1018 assert_return(ret, -EINVAL);
1019 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1020 assert_return(!event_pid_changed(e), -ECHILD);
1022 s = source_new(e, SOURCE_DEFER);
1026 s->defer.callback = callback;
1027 s->userdata = userdata;
1028 s->enabled = SD_EVENT_ONESHOT;
1030 r = source_set_pending(s, true);
1040 _public_ int sd_event_add_post(
1042 sd_event_source **ret,
1043 sd_event_handler_t callback,
1049 assert_return(e, -EINVAL);
1050 assert_return(callback, -EINVAL);
1051 assert_return(ret, -EINVAL);
1052 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1053 assert_return(!event_pid_changed(e), -ECHILD);
1055 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
1059 s = source_new(e, SOURCE_POST);
1063 s->post.callback = callback;
1064 s->userdata = userdata;
1065 s->enabled = SD_EVENT_ON;
1067 r = set_put(e->post_sources, s);
1077 _public_ int sd_event_add_exit(
1079 sd_event_source **ret,
1080 sd_event_handler_t callback,
1086 assert_return(e, -EINVAL);
1087 assert_return(callback, -EINVAL);
1088 assert_return(ret, -EINVAL);
1089 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1090 assert_return(!event_pid_changed(e), -ECHILD);
1093 e->exit = prioq_new(exit_prioq_compare);
1098 s = source_new(e, SOURCE_EXIT);
1102 s->exit.callback = callback;
1103 s->userdata = userdata;
1104 s->exit.prioq_index = PRIOQ_IDX_NULL;
1105 s->enabled = SD_EVENT_ONESHOT;
1107 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1117 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1118 assert_return(s, NULL);
1120 assert(s->n_ref >= 1);
1126 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1131 assert(s->n_ref >= 1);
1134 if (s->n_ref <= 0) {
1135 /* Here's a special hack: when we are called from a
1136 * dispatch handler we won't free the event source
1137 * immediately, but we will detach the fd from the
1138 * epoll. This way it is safe for the caller to unref
1139 * the event source and immediately close the fd, but
1140 * we still retain a valid event source object after
1143 if (s->dispatching) {
1144 if (s->type == SOURCE_IO)
1145 source_io_unregister(s);
1153 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1154 assert_return(s, NULL);
1159 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1160 assert_return(s, -EINVAL);
1161 assert_return(s->type != SOURCE_EXIT, -EDOM);
1162 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1163 assert_return(!event_pid_changed(s->event), -ECHILD);
1168 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1169 assert_return(s, -EINVAL);
1170 assert_return(s->type == SOURCE_IO, -EDOM);
1171 assert_return(!event_pid_changed(s->event), -ECHILD);
1176 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1179 assert_return(s, -EINVAL);
1180 assert_return(fd >= 0, -EINVAL);
1181 assert_return(s->type == SOURCE_IO, -EDOM);
1182 assert_return(!event_pid_changed(s->event), -ECHILD);
1187 if (s->enabled == SD_EVENT_OFF) {
1189 s->io.registered = false;
1193 saved_fd = s->io.fd;
1194 assert(s->io.registered);
1197 s->io.registered = false;
1199 r = source_io_register(s, s->enabled, s->io.events);
1201 s->io.fd = saved_fd;
1202 s->io.registered = true;
1206 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1212 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1213 assert_return(s, -EINVAL);
1214 assert_return(events, -EINVAL);
1215 assert_return(s->type == SOURCE_IO, -EDOM);
1216 assert_return(!event_pid_changed(s->event), -ECHILD);
1218 *events = s->io.events;
1222 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1225 assert_return(s, -EINVAL);
1226 assert_return(s->type == SOURCE_IO, -EDOM);
1227 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1228 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1229 assert_return(!event_pid_changed(s->event), -ECHILD);
1231 if (s->io.events == events)
1234 if (s->enabled != SD_EVENT_OFF) {
1235 r = source_io_register(s, s->enabled, events);
1240 s->io.events = events;
1241 source_set_pending(s, false);
1246 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1247 assert_return(s, -EINVAL);
1248 assert_return(revents, -EINVAL);
1249 assert_return(s->type == SOURCE_IO, -EDOM);
1250 assert_return(s->pending, -ENODATA);
1251 assert_return(!event_pid_changed(s->event), -ECHILD);
1253 *revents = s->io.revents;
1257 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1258 assert_return(s, -EINVAL);
1259 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1260 assert_return(!event_pid_changed(s->event), -ECHILD);
1262 return s->signal.sig;
1265 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1266 assert_return(s, -EINVAL);
1267 assert_return(!event_pid_changed(s->event), -ECHILD);
1272 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1273 assert_return(s, -EINVAL);
1274 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1275 assert_return(!event_pid_changed(s->event), -ECHILD);
1277 if (s->priority == priority)
1280 s->priority = priority;
1283 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1286 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1288 if (s->type == SOURCE_EXIT)
1289 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1294 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1295 assert_return(s, -EINVAL);
1296 assert_return(m, -EINVAL);
1297 assert_return(!event_pid_changed(s->event), -ECHILD);
1303 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1306 assert_return(s, -EINVAL);
1307 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1308 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1309 assert_return(!event_pid_changed(s->event), -ECHILD);
1311 if (s->enabled == m)
1314 if (m == SD_EVENT_OFF) {
1319 r = source_io_unregister(s);
1326 case SOURCE_TIME_REALTIME:
1327 case SOURCE_TIME_MONOTONIC:
1328 case SOURCE_TIME_REALTIME_ALARM:
1329 case SOURCE_TIME_BOOTTIME_ALARM: {
1330 struct clock_data *d;
1333 d = event_get_clock_data(s->event, s->type);
1336 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1337 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1343 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1344 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1345 event_update_signal_fd(s->event);
1353 assert(s->event->n_enabled_child_sources > 0);
1354 s->event->n_enabled_child_sources--;
1356 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1357 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1358 event_update_signal_fd(s->event);
1365 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1374 assert_not_reached("Wut? I shouldn't exist.");
1381 r = source_io_register(s, m, s->io.events);
1388 case SOURCE_TIME_REALTIME:
1389 case SOURCE_TIME_MONOTONIC:
1390 case SOURCE_TIME_REALTIME_ALARM:
1391 case SOURCE_TIME_BOOTTIME_ALARM: {
1392 struct clock_data *d;
1395 d = event_get_clock_data(s->event, s->type);
1398 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1399 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1406 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1407 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1408 event_update_signal_fd(s->event);
1413 if (s->enabled == SD_EVENT_OFF) {
1414 s->event->n_enabled_child_sources++;
1416 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1417 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1418 event_update_signal_fd(s->event);
1427 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1436 assert_not_reached("Wut? I shouldn't exist.");
1441 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1444 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1449 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1450 assert_return(s, -EINVAL);
1451 assert_return(usec, -EINVAL);
1452 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1453 assert_return(!event_pid_changed(s->event), -ECHILD);
1455 *usec = s->time.next;
1459 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1460 struct clock_data *d;
1462 assert_return(s, -EINVAL);
1463 assert_return(usec != (uint64_t) -1, -EINVAL);
1464 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1465 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1466 assert_return(!event_pid_changed(s->event), -ECHILD);
1468 s->time.next = usec;
1470 source_set_pending(s, false);
1472 d = event_get_clock_data(s->event, s->type);
1475 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1476 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1481 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1482 assert_return(s, -EINVAL);
1483 assert_return(usec, -EINVAL);
1484 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1485 assert_return(!event_pid_changed(s->event), -ECHILD);
1487 *usec = s->time.accuracy;
1491 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1492 struct clock_data *d;
1494 assert_return(s, -EINVAL);
1495 assert_return(usec != (uint64_t) -1, -EINVAL);
1496 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1497 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1498 assert_return(!event_pid_changed(s->event), -ECHILD);
1501 usec = DEFAULT_ACCURACY_USEC;
1503 s->time.accuracy = usec;
1505 source_set_pending(s, false);
1507 d = event_get_clock_data(s->event, s->type);
1510 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1515 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1516 assert_return(s, -EINVAL);
1517 assert_return(clock, -EINVAL);
1518 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1519 assert_return(!event_pid_changed(s->event), -ECHILD);
1521 *clock = event_source_type_to_clock(s->type);
1525 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1526 assert_return(s, -EINVAL);
1527 assert_return(pid, -EINVAL);
1528 assert_return(s->type == SOURCE_CHILD, -EDOM);
1529 assert_return(!event_pid_changed(s->event), -ECHILD);
1531 *pid = s->child.pid;
1535 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1538 assert_return(s, -EINVAL);
1539 assert_return(s->type != SOURCE_EXIT, -EDOM);
1540 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1541 assert_return(!event_pid_changed(s->event), -ECHILD);
1543 if (s->prepare == callback)
1546 if (callback && s->prepare) {
1547 s->prepare = callback;
1551 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1555 s->prepare = callback;
1558 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1562 prioq_remove(s->event->prepare, s, &s->prepare_index);
1567 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1568 assert_return(s, NULL);
1573 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1576 assert_return(s, NULL);
1579 s->userdata = userdata;
1584 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1595 initialize_perturb(e);
1598 Find a good time to wake up again between times a and b. We
1599 have two goals here:
1601 a) We want to wake up as seldom as possible, hence prefer
1602 later times over earlier times.
1604 b) But if we have to wake up, then let's make sure to
1605 dispatch as much as possible on the entire system.
1607 We implement this by waking up everywhere at the same time
1608 within any given minute if we can, synchronised via the
1609 perturbation value determined from the boot ID. If we can't,
1610 then we try to find the same spot in every 10s, then 1s and
1611 then 250ms step. Otherwise, we pick the last possible time
1615 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1617 if (_unlikely_(c < USEC_PER_MINUTE))
1620 c -= USEC_PER_MINUTE;
1626 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1628 if (_unlikely_(c < USEC_PER_SEC*10))
1631 c -= USEC_PER_SEC*10;
1637 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1639 if (_unlikely_(c < USEC_PER_SEC))
1648 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1650 if (_unlikely_(c < USEC_PER_MSEC*250))
1653 c -= USEC_PER_MSEC*250;
1662 static int event_arm_timer(
1664 struct clock_data *d) {
1666 struct itimerspec its = {};
1667 sd_event_source *a, *b;
1674 a = prioq_peek(d->earliest);
1675 if (!a || a->enabled == SD_EVENT_OFF) {
1680 if (d->next == (usec_t) -1)
1684 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1688 d->next = (usec_t) -1;
1692 b = prioq_peek(d->latest);
1693 assert_se(b && b->enabled != SD_EVENT_OFF);
1695 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1699 assert_se(d->fd >= 0);
1702 /* We don' want to disarm here, just mean some time looooong ago. */
1703 its.it_value.tv_sec = 0;
1704 its.it_value.tv_nsec = 1;
1706 timespec_store(&its.it_value, t);
1708 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1716 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1719 assert(s->type == SOURCE_IO);
1721 /* If the event source was already pending, we just OR in the
1722 * new revents, otherwise we reset the value. The ORing is
1723 * necessary to handle EPOLLONESHOT events properly where
1724 * readability might happen independently of writability, and
1725 * we need to keep track of both */
1728 s->io.revents |= revents;
1730 s->io.revents = revents;
1732 return source_set_pending(s, true);
1735 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1742 assert_return(events == EPOLLIN, -EIO);
1744 ss = read(fd, &x, sizeof(x));
1746 if (errno == EAGAIN || errno == EINTR)
1752 if (_unlikely_(ss != sizeof(x)))
1756 *next = (usec_t) -1;
1761 static int process_timer(
1764 struct clock_data *d) {
1773 s = prioq_peek(d->earliest);
1776 s->enabled == SD_EVENT_OFF ||
1780 r = source_set_pending(s, true);
1784 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1785 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1791 static int process_child(sd_event *e) {
1798 e->need_process_child = false;
1801 So, this is ugly. We iteratively invoke waitid() with P_PID
1802 + WNOHANG for each PID we wait for, instead of using
1803 P_ALL. This is because we only want to get child
1804 information of very specific child processes, and not all
1805 of them. We might not have processed the SIGCHLD even of a
1806 previous invocation and we don't want to maintain a
1807 unbounded *per-child* event queue, hence we really don't
1808 want anything flushed out of the kernel's queue that we
1809 don't care about. Since this is O(n) this means that if you
1810 have a lot of processes you probably want to handle SIGCHLD
1813 We do not reap the children here (by using WNOWAIT), this
1814 is only done after the event source is dispatched so that
1815 the callback still sees the process as a zombie.
1818 HASHMAP_FOREACH(s, e->child_sources, i) {
1819 assert(s->type == SOURCE_CHILD);
1824 if (s->enabled == SD_EVENT_OFF)
1827 zero(s->child.siginfo);
1828 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1829 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1833 if (s->child.siginfo.si_pid != 0) {
1835 s->child.siginfo.si_code == CLD_EXITED ||
1836 s->child.siginfo.si_code == CLD_KILLED ||
1837 s->child.siginfo.si_code == CLD_DUMPED;
1839 if (!zombie && (s->child.options & WEXITED)) {
1840 /* If the child isn't dead then let's
1841 * immediately remove the state change
1842 * from the queue, since there's no
1843 * benefit in leaving it queued */
1845 assert(s->child.options & (WSTOPPED|WCONTINUED));
1846 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1849 r = source_set_pending(s, true);
1858 static int process_signal(sd_event *e, uint32_t events) {
1859 bool read_one = false;
1863 assert(e->signal_sources);
1865 assert_return(events == EPOLLIN, -EIO);
1868 struct signalfd_siginfo si;
1872 ss = read(e->signal_fd, &si, sizeof(si));
1874 if (errno == EAGAIN || errno == EINTR)
1880 if (_unlikely_(ss != sizeof(si)))
1885 s = e->signal_sources[si.ssi_signo];
1886 if (si.ssi_signo == SIGCHLD) {
1887 r = process_child(e);
1896 s->signal.siginfo = si;
1897 r = source_set_pending(s, true);
1903 static int source_dispatch(sd_event_source *s) {
1907 assert(s->pending || s->type == SOURCE_EXIT);
1909 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1910 r = source_set_pending(s, false);
1915 if (s->type != SOURCE_POST) {
1919 /* If we execute a non-post source, let's mark all
1920 * post sources as pending */
1922 SET_FOREACH(z, s->event->post_sources, i) {
1923 if (z->enabled == SD_EVENT_OFF)
1926 r = source_set_pending(z, true);
1932 if (s->enabled == SD_EVENT_ONESHOT) {
1933 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
1938 s->dispatching = true;
1943 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
1946 case SOURCE_TIME_REALTIME:
1947 case SOURCE_TIME_MONOTONIC:
1948 case SOURCE_TIME_REALTIME_ALARM:
1949 case SOURCE_TIME_BOOTTIME_ALARM:
1950 r = s->time.callback(s, s->time.next, s->userdata);
1954 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
1957 case SOURCE_CHILD: {
1960 zombie = s->child.siginfo.si_code == CLD_EXITED ||
1961 s->child.siginfo.si_code == CLD_KILLED ||
1962 s->child.siginfo.si_code == CLD_DUMPED;
1964 r = s->child.callback(s, &s->child.siginfo, s->userdata);
1966 /* Now, reap the PID for good. */
1968 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
1974 r = s->defer.callback(s, s->userdata);
1978 r = s->post.callback(s, s->userdata);
1982 r = s->exit.callback(s, s->userdata);
1985 case SOURCE_WATCHDOG:
1986 assert_not_reached("Wut? I shouldn't exist.");
1989 s->dispatching = false;
1992 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
1997 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2002 static int event_prepare(sd_event *e) {
2010 s = prioq_peek(e->prepare);
2011 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2014 s->prepare_iteration = e->iteration;
2015 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2021 s->dispatching = true;
2022 r = s->prepare(s, s->userdata);
2023 s->dispatching = false;
2026 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2031 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2037 static int dispatch_exit(sd_event *e) {
2043 p = prioq_peek(e->exit);
2044 if (!p || p->enabled == SD_EVENT_OFF) {
2045 e->state = SD_EVENT_FINISHED;
2051 e->state = SD_EVENT_EXITING;
2053 r = source_dispatch(p);
2055 e->state = SD_EVENT_PASSIVE;
2061 static sd_event_source* event_next_pending(sd_event *e) {
2066 p = prioq_peek(e->pending);
2070 if (p->enabled == SD_EVENT_OFF)
2076 static int arm_watchdog(sd_event *e) {
2077 struct itimerspec its = {};
2082 assert(e->watchdog_fd >= 0);
2084 t = sleep_between(e,
2085 e->watchdog_last + (e->watchdog_period / 2),
2086 e->watchdog_last + (e->watchdog_period * 3 / 4));
2088 timespec_store(&its.it_value, t);
2090 /* Make sure we never set the watchdog to 0, which tells the
2091 * kernel to disable it. */
2092 if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
2093 its.it_value.tv_nsec = 1;
2095 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2102 static int process_watchdog(sd_event *e) {
2108 /* Don't notify watchdog too often */
2109 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2112 sd_notify(false, "WATCHDOG=1");
2113 e->watchdog_last = e->timestamp.monotonic;
2115 return arm_watchdog(e);
2118 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2119 struct epoll_event *ev_queue;
2120 unsigned ev_queue_max;
2124 assert_return(e, -EINVAL);
2125 assert_return(!event_pid_changed(e), -ECHILD);
2126 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2127 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2129 if (e->exit_requested)
2130 return dispatch_exit(e);
2134 e->state = SD_EVENT_RUNNING;
2136 r = event_prepare(e);
2140 r = event_arm_timer(e, &e->realtime);
2144 r = event_arm_timer(e, &e->monotonic);
2148 r = event_arm_timer(e, &e->realtime_alarm);
2152 r = event_arm_timer(e, &e->boottime_alarm);
2156 if (event_next_pending(e) || e->need_process_child)
2159 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2160 ev_queue = newa(struct epoll_event, ev_queue_max);
2162 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2163 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2165 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2169 dual_timestamp_get(&e->timestamp);
2170 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2172 for (i = 0; i < m; i++) {
2174 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2175 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2176 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2177 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2178 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2179 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2180 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2181 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2182 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2183 r = process_signal(e, ev_queue[i].events);
2184 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2185 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2187 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2193 r = process_watchdog(e);
2197 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2201 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2205 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2209 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2213 if (e->need_process_child) {
2214 r = process_child(e);
2219 p = event_next_pending(e);
2225 r = source_dispatch(p);
2228 e->state = SD_EVENT_PASSIVE;
2234 _public_ int sd_event_loop(sd_event *e) {
2237 assert_return(e, -EINVAL);
2238 assert_return(!event_pid_changed(e), -ECHILD);
2239 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2243 while (e->state != SD_EVENT_FINISHED) {
2244 r = sd_event_run(e, (uint64_t) -1);
2256 _public_ int sd_event_get_state(sd_event *e) {
2257 assert_return(e, -EINVAL);
2258 assert_return(!event_pid_changed(e), -ECHILD);
2263 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2264 assert_return(e, -EINVAL);
2265 assert_return(code, -EINVAL);
2266 assert_return(!event_pid_changed(e), -ECHILD);
2268 if (!e->exit_requested)
2271 *code = e->exit_code;
2275 _public_ int sd_event_exit(sd_event *e, int code) {
2276 assert_return(e, -EINVAL);
2277 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2278 assert_return(!event_pid_changed(e), -ECHILD);
2280 e->exit_requested = true;
2281 e->exit_code = code;
2286 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2287 assert_return(e, -EINVAL);
2288 assert_return(usec, -EINVAL);
2289 assert_return(!event_pid_changed(e), -ECHILD);
2291 /* If we haven't run yet, just get the actual time */
2292 if (!dual_timestamp_is_set(&e->timestamp))
2297 case CLOCK_REALTIME:
2298 case CLOCK_REALTIME_ALARM:
2299 *usec = e->timestamp.realtime;
2302 case CLOCK_MONOTONIC:
2303 *usec = e->timestamp.monotonic;
2306 case CLOCK_BOOTTIME_ALARM:
2307 *usec = e->timestamp_boottime;
2314 _public_ int sd_event_default(sd_event **ret) {
2316 static thread_local sd_event *default_event = NULL;
2321 return !!default_event;
2323 if (default_event) {
2324 *ret = sd_event_ref(default_event);
2328 r = sd_event_new(&e);
2332 e->default_event_ptr = &default_event;
2340 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2341 assert_return(e, -EINVAL);
2342 assert_return(tid, -EINVAL);
2343 assert_return(!event_pid_changed(e), -ECHILD);
2353 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2356 assert_return(e, -EINVAL);
2357 assert_return(!event_pid_changed(e), -ECHILD);
2359 if (e->watchdog == !!b)
2363 struct epoll_event ev = {};
2365 r = sd_watchdog_enabled(false, &e->watchdog_period);
2369 /* Issue first ping immediately */
2370 sd_notify(false, "WATCHDOG=1");
2371 e->watchdog_last = now(CLOCK_MONOTONIC);
2373 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2374 if (e->watchdog_fd < 0)
2377 r = arm_watchdog(e);
2381 ev.events = EPOLLIN;
2382 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2384 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2391 if (e->watchdog_fd >= 0) {
2392 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2393 e->watchdog_fd = safe_close(e->watchdog_fd);
2401 e->watchdog_fd = safe_close(e->watchdog_fd);
2405 _public_ int sd_event_get_watchdog(sd_event *e) {
2406 assert_return(e, -EINVAL);
2407 assert_return(!event_pid_changed(e), -ECHILD);