1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/epoll.h>
23 #include <sys/timerfd.h>
28 #include "sd-daemon.h"
33 #include "time-util.h"
39 #define EPOLL_QUEUE_MAX 512U
40 #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
42 typedef enum EventSourceType {
45 SOURCE_TIME_MONOTONIC,
46 SOURCE_TIME_REALTIME_ALARM,
47 SOURCE_TIME_BOOTTIME_ALARM,
54 _SOUFCE_EVENT_SOURCE_TYPE_MAX,
55 _SOURCE_EVENT_SOURCE_TYPE_INVALID = -1
58 #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
60 struct sd_event_source {
65 sd_event_handler_t prepare;
67 EventSourceType type:5;
73 unsigned pending_index;
74 unsigned prepare_index;
75 unsigned pending_iteration;
76 unsigned prepare_iteration;
80 sd_event_io_handler_t callback;
87 sd_event_time_handler_t callback;
88 usec_t next, accuracy;
89 unsigned earliest_index;
90 unsigned latest_index;
93 sd_event_signal_handler_t callback;
94 struct signalfd_siginfo siginfo;
98 sd_event_child_handler_t callback;
104 sd_event_handler_t callback;
107 sd_event_handler_t callback;
110 sd_event_handler_t callback;
111 unsigned prioq_index;
119 /* For all clocks we maintain two priority queues each, one
120 * ordered for the earliest times the events may be
121 * dispatched, and one ordered by the latest times they must
122 * have been dispatched. The range between the top entries in
123 * the two prioqs is the time window we can freely schedule
141 /* timerfd_create() only supports these four clocks so far. We
142 * can add support for more clocks when the kernel learns to
143 * deal with them, too. */
144 struct clock_data realtime;
145 struct clock_data monotonic;
146 struct clock_data realtime_alarm;
147 struct clock_data boottime_alarm;
152 sd_event_source **signal_sources;
154 Hashmap *child_sources;
155 unsigned n_enabled_child_sources;
164 dual_timestamp timestamp;
165 usec_t timestamp_boottime;
168 bool exit_requested:1;
169 bool need_process_child:1;
175 sd_event **default_event_ptr;
177 usec_t watchdog_last, watchdog_period;
182 static int pending_prioq_compare(const void *a, const void *b) {
183 const sd_event_source *x = a, *y = b;
188 /* Enabled ones first */
189 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
191 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
194 /* Lower priority values first */
195 if (x->priority < y->priority)
197 if (x->priority > y->priority)
200 /* Older entries first */
201 if (x->pending_iteration < y->pending_iteration)
203 if (x->pending_iteration > y->pending_iteration)
206 /* Stability for the rest */
215 static int prepare_prioq_compare(const void *a, const void *b) {
216 const sd_event_source *x = a, *y = b;
221 /* Move most recently prepared ones last, so that we can stop
222 * preparing as soon as we hit one that has already been
223 * prepared in the current iteration */
224 if (x->prepare_iteration < y->prepare_iteration)
226 if (x->prepare_iteration > y->prepare_iteration)
229 /* Enabled ones first */
230 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
232 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
235 /* Lower priority values first */
236 if (x->priority < y->priority)
238 if (x->priority > y->priority)
241 /* Stability for the rest */
250 static int earliest_time_prioq_compare(const void *a, const void *b) {
251 const sd_event_source *x = a, *y = b;
253 assert(EVENT_SOURCE_IS_TIME(x->type));
254 assert(x->type == y->type);
256 /* Enabled ones first */
257 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
259 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
262 /* Move the pending ones to the end */
263 if (!x->pending && y->pending)
265 if (x->pending && !y->pending)
269 if (x->time.next < y->time.next)
271 if (x->time.next > y->time.next)
274 /* Stability for the rest */
283 static int latest_time_prioq_compare(const void *a, const void *b) {
284 const sd_event_source *x = a, *y = b;
286 assert(EVENT_SOURCE_IS_TIME(x->type));
287 assert(x->type == y->type);
289 /* Enabled ones first */
290 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
292 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
295 /* Move the pending ones to the end */
296 if (!x->pending && y->pending)
298 if (x->pending && !y->pending)
302 if (x->time.next + x->time.accuracy < y->time.next + y->time.accuracy)
304 if (x->time.next + x->time.accuracy > y->time.next + y->time.accuracy)
307 /* Stability for the rest */
316 static int exit_prioq_compare(const void *a, const void *b) {
317 const sd_event_source *x = a, *y = b;
319 assert(x->type == SOURCE_EXIT);
320 assert(y->type == SOURCE_EXIT);
322 /* Enabled ones first */
323 if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
325 if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
328 /* Lower priority values first */
329 if (x->priority < y->priority)
331 if (x->priority > y->priority)
334 /* Stability for the rest */
343 static void free_clock_data(struct clock_data *d) {
347 prioq_free(d->earliest);
348 prioq_free(d->latest);
351 static void event_free(sd_event *e) {
353 assert(e->n_sources == 0);
355 if (e->default_event_ptr)
356 *(e->default_event_ptr) = NULL;
358 safe_close(e->epoll_fd);
359 safe_close(e->signal_fd);
360 safe_close(e->watchdog_fd);
362 free_clock_data(&e->realtime);
363 free_clock_data(&e->monotonic);
364 free_clock_data(&e->realtime_alarm);
365 free_clock_data(&e->boottime_alarm);
367 prioq_free(e->pending);
368 prioq_free(e->prepare);
371 free(e->signal_sources);
373 hashmap_free(e->child_sources);
374 set_free(e->post_sources);
378 _public_ int sd_event_new(sd_event** ret) {
382 assert_return(ret, -EINVAL);
384 e = new0(sd_event, 1);
389 e->signal_fd = e->watchdog_fd = e->epoll_fd = e->realtime.fd = e->monotonic.fd = e->realtime_alarm.fd = e->boottime_alarm.fd = -1;
390 e->realtime.next = e->monotonic.next = e->realtime_alarm.next = e->boottime_alarm.next = (usec_t) -1;
391 e->original_pid = getpid();
392 e->perturb = (usec_t) -1;
394 assert_se(sigemptyset(&e->sigset) == 0);
396 e->pending = prioq_new(pending_prioq_compare);
402 e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
403 if (e->epoll_fd < 0) {
416 _public_ sd_event* sd_event_ref(sd_event *e) {
417 assert_return(e, NULL);
419 assert(e->n_ref >= 1);
425 _public_ sd_event* sd_event_unref(sd_event *e) {
430 assert(e->n_ref >= 1);
439 static bool event_pid_changed(sd_event *e) {
442 /* We don't support people creating am event loop and keeping
443 * it around over a fork(). Let's complain. */
445 return e->original_pid != getpid();
448 static int source_io_unregister(sd_event_source *s) {
452 assert(s->type == SOURCE_IO);
454 if (!s->io.registered)
457 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
461 s->io.registered = false;
465 static int source_io_register(
470 struct epoll_event ev = {};
474 assert(s->type == SOURCE_IO);
475 assert(enabled != SD_EVENT_OFF);
480 if (enabled == SD_EVENT_ONESHOT)
481 ev.events |= EPOLLONESHOT;
483 if (s->io.registered)
484 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
486 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
491 s->io.registered = true;
496 static clockid_t event_source_type_to_clock(EventSourceType t) {
500 case SOURCE_TIME_REALTIME:
501 return CLOCK_REALTIME;
503 case SOURCE_TIME_MONOTONIC:
504 return CLOCK_MONOTONIC;
506 case SOURCE_TIME_REALTIME_ALARM:
507 return CLOCK_REALTIME_ALARM;
509 case SOURCE_TIME_BOOTTIME_ALARM:
510 return CLOCK_BOOTTIME_ALARM;
513 return (clockid_t) -1;
517 static EventSourceType clock_to_event_source_type(clockid_t clock) {
522 return SOURCE_TIME_REALTIME;
524 case CLOCK_MONOTONIC:
525 return SOURCE_TIME_MONOTONIC;
527 case CLOCK_REALTIME_ALARM:
528 return SOURCE_TIME_REALTIME_ALARM;
530 case CLOCK_BOOTTIME_ALARM:
531 return SOURCE_TIME_BOOTTIME_ALARM;
534 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
538 static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
543 case SOURCE_TIME_REALTIME:
546 case SOURCE_TIME_MONOTONIC:
547 return &e->monotonic;
549 case SOURCE_TIME_REALTIME_ALARM:
550 return &e->realtime_alarm;
552 case SOURCE_TIME_BOOTTIME_ALARM:
553 return &e->boottime_alarm;
560 static void source_free(sd_event_source *s) {
564 assert(s->event->n_sources > 0);
570 source_io_unregister(s);
574 case SOURCE_TIME_REALTIME:
575 case SOURCE_TIME_MONOTONIC:
576 case SOURCE_TIME_REALTIME_ALARM:
577 case SOURCE_TIME_BOOTTIME_ALARM: {
578 struct clock_data *d;
580 d = event_get_clock_data(s->event, s->type);
583 prioq_remove(d->earliest, s, &s->time.earliest_index);
584 prioq_remove(d->latest, s, &s->time.latest_index);
589 if (s->signal.sig > 0) {
590 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0)
591 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
593 if (s->event->signal_sources)
594 s->event->signal_sources[s->signal.sig] = NULL;
600 if (s->child.pid > 0) {
601 if (s->enabled != SD_EVENT_OFF) {
602 assert(s->event->n_enabled_child_sources > 0);
603 s->event->n_enabled_child_sources--;
606 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD])
607 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
609 hashmap_remove(s->event->child_sources, INT_TO_PTR(s->child.pid));
619 set_remove(s->event->post_sources, s);
623 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
627 assert_not_reached("Wut? I shouldn't exist.");
631 prioq_remove(s->event->pending, s, &s->pending_index);
634 prioq_remove(s->event->prepare, s, &s->prepare_index);
636 s->event->n_sources--;
637 sd_event_unref(s->event);
643 static int source_set_pending(sd_event_source *s, bool b) {
647 assert(s->type != SOURCE_EXIT);
655 s->pending_iteration = s->event->iteration;
657 r = prioq_put(s->event->pending, s, &s->pending_index);
663 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
665 if (EVENT_SOURCE_IS_TIME(s->type)) {
666 struct clock_data *d;
668 d = event_get_clock_data(s->event, s->type);
671 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
672 prioq_reshuffle(d->latest, s, &s->time.latest_index);
678 static sd_event_source *source_new(sd_event *e, EventSourceType type) {
683 s = new0(sd_event_source, 1);
688 s->event = sd_event_ref(e);
690 s->pending_index = s->prepare_index = PRIOQ_IDX_NULL;
697 _public_ int sd_event_add_io(
699 sd_event_source **ret,
702 sd_event_io_handler_t callback,
708 assert_return(e, -EINVAL);
709 assert_return(fd >= 0, -EINVAL);
710 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
711 assert_return(callback, -EINVAL);
712 assert_return(ret, -EINVAL);
713 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
714 assert_return(!event_pid_changed(e), -ECHILD);
716 s = source_new(e, SOURCE_IO);
721 s->io.events = events;
722 s->io.callback = callback;
723 s->userdata = userdata;
724 s->enabled = SD_EVENT_ON;
726 r = source_io_register(s, s->enabled, events);
736 static int event_setup_timer_fd(
738 struct clock_data *d,
741 sd_id128_t bootid = {};
742 struct epoll_event ev = {};
748 if (_likely_(d->fd >= 0))
751 fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
756 ev.data.ptr = INT_TO_PTR(clock_to_event_source_type(clock));
758 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
766 /* When we sleep for longer, we try to realign the wakeup to
767 the same time wihtin each minute/second/250ms, so that
768 events all across the system can be coalesced into a single
769 CPU wakeup. However, let's take some system-specific
770 randomness for this value, so that in a network of systems
771 with synced clocks timer events are distributed a
772 bit. Here, we calculate a perturbation usec offset from the
775 if (e->perturb == (usec_t) -1)
776 if (sd_id128_get_boot(&bootid) >= 0)
777 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
782 _public_ int sd_event_add_time(
784 sd_event_source **ret,
788 sd_event_time_handler_t callback,
791 EventSourceType type;
793 struct clock_data *d;
796 assert_return(e, -EINVAL);
797 assert_return(ret, -EINVAL);
798 assert_return(usec != (uint64_t) -1, -EINVAL);
799 assert_return(accuracy != (uint64_t) -1, -EINVAL);
800 assert_return(callback, -EINVAL);
801 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
802 assert_return(!event_pid_changed(e), -ECHILD);
804 type = clock_to_event_source_type(clock);
805 assert_return(type >= 0, -ENOTSUP);
807 d = event_get_clock_data(e, type);
811 d->earliest = prioq_new(earliest_time_prioq_compare);
817 d->latest = prioq_new(latest_time_prioq_compare);
823 r = event_setup_timer_fd(e, d, clock);
828 s = source_new(e, type);
833 s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
834 s->time.callback = callback;
835 s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
836 s->userdata = userdata;
837 s->enabled = SD_EVENT_ONESHOT;
839 r = prioq_put(d->earliest, s, &s->time.earliest_index);
843 r = prioq_put(d->latest, s, &s->time.latest_index);
855 static int event_update_signal_fd(sd_event *e) {
856 struct epoll_event ev = {};
862 add_to_epoll = e->signal_fd < 0;
864 r = signalfd(e->signal_fd, &e->sigset, SFD_NONBLOCK|SFD_CLOEXEC);
874 ev.data.ptr = INT_TO_PTR(SOURCE_SIGNAL);
876 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->signal_fd, &ev);
878 e->signal_fd = safe_close(e->signal_fd);
885 _public_ int sd_event_add_signal(
887 sd_event_source **ret,
889 sd_event_signal_handler_t callback,
896 assert_return(e, -EINVAL);
897 assert_return(sig > 0, -EINVAL);
898 assert_return(sig < _NSIG, -EINVAL);
899 assert_return(callback, -EINVAL);
900 assert_return(ret, -EINVAL);
901 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
902 assert_return(!event_pid_changed(e), -ECHILD);
904 r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
908 if (!sigismember(&ss, sig))
911 if (!e->signal_sources) {
912 e->signal_sources = new0(sd_event_source*, _NSIG);
913 if (!e->signal_sources)
915 } else if (e->signal_sources[sig])
918 s = source_new(e, SOURCE_SIGNAL);
923 s->signal.callback = callback;
924 s->userdata = userdata;
925 s->enabled = SD_EVENT_ON;
927 e->signal_sources[sig] = s;
928 assert_se(sigaddset(&e->sigset, sig) == 0);
930 if (sig != SIGCHLD || e->n_enabled_child_sources == 0) {
931 r = event_update_signal_fd(e);
942 _public_ int sd_event_add_child(
944 sd_event_source **ret,
947 sd_event_child_handler_t callback,
953 assert_return(e, -EINVAL);
954 assert_return(pid > 1, -EINVAL);
955 assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
956 assert_return(options != 0, -EINVAL);
957 assert_return(callback, -EINVAL);
958 assert_return(ret, -EINVAL);
959 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
960 assert_return(!event_pid_changed(e), -ECHILD);
962 r = hashmap_ensure_allocated(&e->child_sources, trivial_hash_func, trivial_compare_func);
966 if (hashmap_contains(e->child_sources, INT_TO_PTR(pid)))
969 s = source_new(e, SOURCE_CHILD);
974 s->child.options = options;
975 s->child.callback = callback;
976 s->userdata = userdata;
977 s->enabled = SD_EVENT_ONESHOT;
979 r = hashmap_put(e->child_sources, INT_TO_PTR(pid), s);
985 e->n_enabled_child_sources ++;
987 assert_se(sigaddset(&e->sigset, SIGCHLD) == 0);
989 if (!e->signal_sources || !e->signal_sources[SIGCHLD]) {
990 r = event_update_signal_fd(e);
997 e->need_process_child = true;
1003 _public_ int sd_event_add_defer(
1005 sd_event_source **ret,
1006 sd_event_handler_t callback,
1012 assert_return(e, -EINVAL);
1013 assert_return(callback, -EINVAL);
1014 assert_return(ret, -EINVAL);
1015 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1016 assert_return(!event_pid_changed(e), -ECHILD);
1018 s = source_new(e, SOURCE_DEFER);
1022 s->defer.callback = callback;
1023 s->userdata = userdata;
1024 s->enabled = SD_EVENT_ONESHOT;
1026 r = source_set_pending(s, true);
1036 _public_ int sd_event_add_post(
1038 sd_event_source **ret,
1039 sd_event_handler_t callback,
1045 assert_return(e, -EINVAL);
1046 assert_return(callback, -EINVAL);
1047 assert_return(ret, -EINVAL);
1048 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1049 assert_return(!event_pid_changed(e), -ECHILD);
1051 r = set_ensure_allocated(&e->post_sources, trivial_hash_func, trivial_compare_func);
1055 s = source_new(e, SOURCE_POST);
1059 s->post.callback = callback;
1060 s->userdata = userdata;
1061 s->enabled = SD_EVENT_ON;
1063 r = set_put(e->post_sources, s);
1073 _public_ int sd_event_add_exit(
1075 sd_event_source **ret,
1076 sd_event_handler_t callback,
1082 assert_return(e, -EINVAL);
1083 assert_return(callback, -EINVAL);
1084 assert_return(ret, -EINVAL);
1085 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
1086 assert_return(!event_pid_changed(e), -ECHILD);
1089 e->exit = prioq_new(exit_prioq_compare);
1094 s = source_new(e, SOURCE_EXIT);
1098 s->exit.callback = callback;
1099 s->userdata = userdata;
1100 s->exit.prioq_index = PRIOQ_IDX_NULL;
1101 s->enabled = SD_EVENT_ONESHOT;
1103 r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
1113 _public_ sd_event_source* sd_event_source_ref(sd_event_source *s) {
1114 assert_return(s, NULL);
1116 assert(s->n_ref >= 1);
1122 _public_ sd_event_source* sd_event_source_unref(sd_event_source *s) {
1127 assert(s->n_ref >= 1);
1130 if (s->n_ref <= 0) {
1131 /* Here's a special hack: when we are called from a
1132 * dispatch handler we won't free the event source
1133 * immediately, but we will detach the fd from the
1134 * epoll. This way it is safe for the caller to unref
1135 * the event source and immediately close the fd, but
1136 * we still retain a valid event source object after
1139 if (s->dispatching) {
1140 if (s->type == SOURCE_IO)
1141 source_io_unregister(s);
1149 _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
1150 assert_return(s, NULL);
1155 _public_ int sd_event_source_get_pending(sd_event_source *s) {
1156 assert_return(s, -EINVAL);
1157 assert_return(s->type != SOURCE_EXIT, -EDOM);
1158 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1159 assert_return(!event_pid_changed(s->event), -ECHILD);
1164 _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
1165 assert_return(s, -EINVAL);
1166 assert_return(s->type == SOURCE_IO, -EDOM);
1167 assert_return(!event_pid_changed(s->event), -ECHILD);
1172 _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
1175 assert_return(s, -EINVAL);
1176 assert_return(fd >= 0, -EINVAL);
1177 assert_return(s->type == SOURCE_IO, -EDOM);
1178 assert_return(!event_pid_changed(s->event), -ECHILD);
1183 if (s->enabled == SD_EVENT_OFF) {
1185 s->io.registered = false;
1189 saved_fd = s->io.fd;
1190 assert(s->io.registered);
1193 s->io.registered = false;
1195 r = source_io_register(s, s->enabled, s->io.events);
1197 s->io.fd = saved_fd;
1198 s->io.registered = true;
1202 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
1208 _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
1209 assert_return(s, -EINVAL);
1210 assert_return(events, -EINVAL);
1211 assert_return(s->type == SOURCE_IO, -EDOM);
1212 assert_return(!event_pid_changed(s->event), -ECHILD);
1214 *events = s->io.events;
1218 _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
1221 assert_return(s, -EINVAL);
1222 assert_return(s->type == SOURCE_IO, -EDOM);
1223 assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
1224 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1225 assert_return(!event_pid_changed(s->event), -ECHILD);
1227 if (s->io.events == events)
1230 if (s->enabled != SD_EVENT_OFF) {
1231 r = source_io_register(s, s->enabled, events);
1236 s->io.events = events;
1237 source_set_pending(s, false);
1242 _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
1243 assert_return(s, -EINVAL);
1244 assert_return(revents, -EINVAL);
1245 assert_return(s->type == SOURCE_IO, -EDOM);
1246 assert_return(s->pending, -ENODATA);
1247 assert_return(!event_pid_changed(s->event), -ECHILD);
1249 *revents = s->io.revents;
1253 _public_ int sd_event_source_get_signal(sd_event_source *s) {
1254 assert_return(s, -EINVAL);
1255 assert_return(s->type == SOURCE_SIGNAL, -EDOM);
1256 assert_return(!event_pid_changed(s->event), -ECHILD);
1258 return s->signal.sig;
1261 _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
1262 assert_return(s, -EINVAL);
1263 assert_return(!event_pid_changed(s->event), -ECHILD);
1268 _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
1269 assert_return(s, -EINVAL);
1270 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1271 assert_return(!event_pid_changed(s->event), -ECHILD);
1273 if (s->priority == priority)
1276 s->priority = priority;
1279 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1282 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1284 if (s->type == SOURCE_EXIT)
1285 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1290 _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
1291 assert_return(s, -EINVAL);
1292 assert_return(m, -EINVAL);
1293 assert_return(!event_pid_changed(s->event), -ECHILD);
1299 _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
1302 assert_return(s, -EINVAL);
1303 assert_return(m == SD_EVENT_OFF || m == SD_EVENT_ON || m == SD_EVENT_ONESHOT, -EINVAL);
1304 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1305 assert_return(!event_pid_changed(s->event), -ECHILD);
1307 if (s->enabled == m)
1310 if (m == SD_EVENT_OFF) {
1315 r = source_io_unregister(s);
1322 case SOURCE_TIME_REALTIME:
1323 case SOURCE_TIME_MONOTONIC:
1324 case SOURCE_TIME_REALTIME_ALARM:
1325 case SOURCE_TIME_BOOTTIME_ALARM: {
1326 struct clock_data *d;
1329 d = event_get_clock_data(s->event, s->type);
1332 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1333 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1339 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1340 assert_se(sigdelset(&s->event->sigset, s->signal.sig) == 0);
1341 event_update_signal_fd(s->event);
1349 assert(s->event->n_enabled_child_sources > 0);
1350 s->event->n_enabled_child_sources--;
1352 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1353 assert_se(sigdelset(&s->event->sigset, SIGCHLD) == 0);
1354 event_update_signal_fd(s->event);
1361 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1370 assert_not_reached("Wut? I shouldn't exist.");
1377 r = source_io_register(s, m, s->io.events);
1384 case SOURCE_TIME_REALTIME:
1385 case SOURCE_TIME_MONOTONIC:
1386 case SOURCE_TIME_REALTIME_ALARM:
1387 case SOURCE_TIME_BOOTTIME_ALARM: {
1388 struct clock_data *d;
1391 d = event_get_clock_data(s->event, s->type);
1394 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1395 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1402 if (s->signal.sig != SIGCHLD || s->event->n_enabled_child_sources == 0) {
1403 assert_se(sigaddset(&s->event->sigset, s->signal.sig) == 0);
1404 event_update_signal_fd(s->event);
1409 if (s->enabled == SD_EVENT_OFF) {
1410 s->event->n_enabled_child_sources++;
1412 if (!s->event->signal_sources || !s->event->signal_sources[SIGCHLD]) {
1413 assert_se(sigaddset(&s->event->sigset, SIGCHLD) == 0);
1414 event_update_signal_fd(s->event);
1423 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
1432 assert_not_reached("Wut? I shouldn't exist.");
1437 prioq_reshuffle(s->event->pending, s, &s->pending_index);
1440 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
1445 _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
1446 assert_return(s, -EINVAL);
1447 assert_return(usec, -EINVAL);
1448 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1449 assert_return(!event_pid_changed(s->event), -ECHILD);
1451 *usec = s->time.next;
1455 _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
1456 struct clock_data *d;
1458 assert_return(s, -EINVAL);
1459 assert_return(usec != (uint64_t) -1, -EINVAL);
1460 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1461 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1462 assert_return(!event_pid_changed(s->event), -ECHILD);
1464 s->time.next = usec;
1466 source_set_pending(s, false);
1468 d = event_get_clock_data(s->event, s->type);
1471 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1472 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1477 _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
1478 assert_return(s, -EINVAL);
1479 assert_return(usec, -EINVAL);
1480 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1481 assert_return(!event_pid_changed(s->event), -ECHILD);
1483 *usec = s->time.accuracy;
1487 _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
1488 struct clock_data *d;
1490 assert_return(s, -EINVAL);
1491 assert_return(usec != (uint64_t) -1, -EINVAL);
1492 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1493 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1494 assert_return(!event_pid_changed(s->event), -ECHILD);
1497 usec = DEFAULT_ACCURACY_USEC;
1499 s->time.accuracy = usec;
1501 source_set_pending(s, false);
1503 d = event_get_clock_data(s->event, s->type);
1506 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1511 _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
1512 assert_return(s, -EINVAL);
1513 assert_return(clock, -EINVAL);
1514 assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
1515 assert_return(!event_pid_changed(s->event), -ECHILD);
1517 *clock = event_source_type_to_clock(s->type);
1521 _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
1522 assert_return(s, -EINVAL);
1523 assert_return(pid, -EINVAL);
1524 assert_return(s->type == SOURCE_CHILD, -EDOM);
1525 assert_return(!event_pid_changed(s->event), -ECHILD);
1527 *pid = s->child.pid;
1531 _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
1534 assert_return(s, -EINVAL);
1535 assert_return(s->type != SOURCE_EXIT, -EDOM);
1536 assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
1537 assert_return(!event_pid_changed(s->event), -ECHILD);
1539 if (s->prepare == callback)
1542 if (callback && s->prepare) {
1543 s->prepare = callback;
1547 r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
1551 s->prepare = callback;
1554 r = prioq_put(s->event->prepare, s, &s->prepare_index);
1558 prioq_remove(s->event->prepare, s, &s->prepare_index);
1563 _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
1564 assert_return(s, NULL);
1569 _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
1572 assert_return(s, NULL);
1575 s->userdata = userdata;
1580 static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
1592 Find a good time to wake up again between times a and b. We
1593 have two goals here:
1595 a) We want to wake up as seldom as possible, hence prefer
1596 later times over earlier times.
1598 b) But if we have to wake up, then let's make sure to
1599 dispatch as much as possible on the entire system.
1601 We implement this by waking up everywhere at the same time
1602 within any given minute if we can, synchronised via the
1603 perturbation value determined from the boot ID. If we can't,
1604 then we try to find the same spot in every 10s, then 1s and
1605 then 250ms step. Otherwise, we pick the last possible time
1609 c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
1611 if (_unlikely_(c < USEC_PER_MINUTE))
1614 c -= USEC_PER_MINUTE;
1620 c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
1622 if (_unlikely_(c < USEC_PER_SEC*10))
1625 c -= USEC_PER_SEC*10;
1631 c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
1633 if (_unlikely_(c < USEC_PER_SEC))
1642 c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
1644 if (_unlikely_(c < USEC_PER_MSEC*250))
1647 c -= USEC_PER_MSEC*250;
1656 static int event_arm_timer(
1658 struct clock_data *d) {
1660 struct itimerspec its = {};
1661 sd_event_source *a, *b;
1668 a = prioq_peek(d->earliest);
1669 if (!a || a->enabled == SD_EVENT_OFF) {
1674 if (d->next == (usec_t) -1)
1678 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1682 d->next = (usec_t) -1;
1686 b = prioq_peek(d->latest);
1687 assert_se(b && b->enabled != SD_EVENT_OFF);
1689 t = sleep_between(e, a->time.next, b->time.next + b->time.accuracy);
1693 assert_se(d->fd >= 0);
1696 /* We don' want to disarm here, just mean some time looooong ago. */
1697 its.it_value.tv_sec = 0;
1698 its.it_value.tv_nsec = 1;
1700 timespec_store(&its.it_value, t);
1702 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
1710 static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
1713 assert(s->type == SOURCE_IO);
1715 /* If the event source was already pending, we just OR in the
1716 * new revents, otherwise we reset the value. The ORing is
1717 * necessary to handle EPOLLONESHOT events properly where
1718 * readability might happen independently of writability, and
1719 * we need to keep track of both */
1722 s->io.revents |= revents;
1724 s->io.revents = revents;
1726 return source_set_pending(s, true);
1729 static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
1736 assert_return(events == EPOLLIN, -EIO);
1738 ss = read(fd, &x, sizeof(x));
1740 if (errno == EAGAIN || errno == EINTR)
1746 if (_unlikely_(ss != sizeof(x)))
1750 *next = (usec_t) -1;
1755 static int process_timer(
1758 struct clock_data *d) {
1767 s = prioq_peek(d->earliest);
1770 s->enabled == SD_EVENT_OFF ||
1774 r = source_set_pending(s, true);
1778 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
1779 prioq_reshuffle(d->latest, s, &s->time.latest_index);
1785 static int process_child(sd_event *e) {
1792 e->need_process_child = false;
1795 So, this is ugly. We iteratively invoke waitid() with P_PID
1796 + WNOHANG for each PID we wait for, instead of using
1797 P_ALL. This is because we only want to get child
1798 information of very specific child processes, and not all
1799 of them. We might not have processed the SIGCHLD even of a
1800 previous invocation and we don't want to maintain a
1801 unbounded *per-child* event queue, hence we really don't
1802 want anything flushed out of the kernel's queue that we
1803 don't care about. Since this is O(n) this means that if you
1804 have a lot of processes you probably want to handle SIGCHLD
1807 We do not reap the children here (by using WNOWAIT), this
1808 is only done after the event source is dispatched so that
1809 the callback still sees the process as a zombie.
1812 HASHMAP_FOREACH(s, e->child_sources, i) {
1813 assert(s->type == SOURCE_CHILD);
1818 if (s->enabled == SD_EVENT_OFF)
1821 zero(s->child.siginfo);
1822 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
1823 WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
1827 if (s->child.siginfo.si_pid != 0) {
1829 s->child.siginfo.si_code == CLD_EXITED ||
1830 s->child.siginfo.si_code == CLD_KILLED ||
1831 s->child.siginfo.si_code == CLD_DUMPED;
1833 if (!zombie && (s->child.options & WEXITED)) {
1834 /* If the child isn't dead then let's
1835 * immediately remove the state change
1836 * from the queue, since there's no
1837 * benefit in leaving it queued */
1839 assert(s->child.options & (WSTOPPED|WCONTINUED));
1840 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
1843 r = source_set_pending(s, true);
1852 static int process_signal(sd_event *e, uint32_t events) {
1853 bool read_one = false;
1857 assert(e->signal_sources);
1859 assert_return(events == EPOLLIN, -EIO);
1862 struct signalfd_siginfo si;
1866 ss = read(e->signal_fd, &si, sizeof(si));
1868 if (errno == EAGAIN || errno == EINTR)
1874 if (_unlikely_(ss != sizeof(si)))
1879 s = e->signal_sources[si.ssi_signo];
1880 if (si.ssi_signo == SIGCHLD) {
1881 r = process_child(e);
1890 s->signal.siginfo = si;
1891 r = source_set_pending(s, true);
1897 static int source_dispatch(sd_event_source *s) {
1901 assert(s->pending || s->type == SOURCE_EXIT);
1903 if (s->type != SOURCE_DEFER && s->type != SOURCE_EXIT) {
1904 r = source_set_pending(s, false);
1909 if (s->type != SOURCE_POST) {
1913 /* If we execute a non-post source, let's mark all
1914 * post sources as pending */
1916 SET_FOREACH(z, s->event->post_sources, i) {
1917 if (z->enabled == SD_EVENT_OFF)
1920 r = source_set_pending(z, true);
1926 if (s->enabled == SD_EVENT_ONESHOT) {
1927 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
1932 s->dispatching = true;
1937 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
1940 case SOURCE_TIME_REALTIME:
1941 case SOURCE_TIME_MONOTONIC:
1942 case SOURCE_TIME_REALTIME_ALARM:
1943 case SOURCE_TIME_BOOTTIME_ALARM:
1944 r = s->time.callback(s, s->time.next, s->userdata);
1948 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
1951 case SOURCE_CHILD: {
1954 zombie = s->child.siginfo.si_code == CLD_EXITED ||
1955 s->child.siginfo.si_code == CLD_KILLED ||
1956 s->child.siginfo.si_code == CLD_DUMPED;
1958 r = s->child.callback(s, &s->child.siginfo, s->userdata);
1960 /* Now, reap the PID for good. */
1962 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
1968 r = s->defer.callback(s, s->userdata);
1972 r = s->post.callback(s, s->userdata);
1976 r = s->exit.callback(s, s->userdata);
1979 case SOURCE_WATCHDOG:
1980 assert_not_reached("Wut? I shouldn't exist.");
1983 s->dispatching = false;
1986 log_debug("Event source %p returned error, disabling: %s", s, strerror(-r));
1991 sd_event_source_set_enabled(s, SD_EVENT_OFF);
1996 static int event_prepare(sd_event *e) {
2004 s = prioq_peek(e->prepare);
2005 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
2008 s->prepare_iteration = e->iteration;
2009 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
2015 s->dispatching = true;
2016 r = s->prepare(s, s->userdata);
2017 s->dispatching = false;
2020 log_debug("Prepare callback of event source %p returned error, disabling: %s", s, strerror(-r));
2025 sd_event_source_set_enabled(s, SD_EVENT_OFF);
2031 static int dispatch_exit(sd_event *e) {
2037 p = prioq_peek(e->exit);
2038 if (!p || p->enabled == SD_EVENT_OFF) {
2039 e->state = SD_EVENT_FINISHED;
2045 e->state = SD_EVENT_EXITING;
2047 r = source_dispatch(p);
2049 e->state = SD_EVENT_PASSIVE;
2055 static sd_event_source* event_next_pending(sd_event *e) {
2060 p = prioq_peek(e->pending);
2064 if (p->enabled == SD_EVENT_OFF)
2070 static int arm_watchdog(sd_event *e) {
2071 struct itimerspec its = {};
2076 assert(e->watchdog_fd >= 0);
2078 t = sleep_between(e,
2079 e->watchdog_last + (e->watchdog_period / 2),
2080 e->watchdog_last + (e->watchdog_period * 3 / 4));
2082 timespec_store(&its.it_value, t);
2084 r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
2091 static int process_watchdog(sd_event *e) {
2097 /* Don't notify watchdog too often */
2098 if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
2101 sd_notify(false, "WATCHDOG=1");
2102 e->watchdog_last = e->timestamp.monotonic;
2104 return arm_watchdog(e);
2107 _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
2108 struct epoll_event *ev_queue;
2109 unsigned ev_queue_max;
2113 assert_return(e, -EINVAL);
2114 assert_return(!event_pid_changed(e), -ECHILD);
2115 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2116 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2118 if (e->exit_requested)
2119 return dispatch_exit(e);
2123 e->state = SD_EVENT_RUNNING;
2125 r = event_prepare(e);
2129 r = event_arm_timer(e, &e->realtime);
2133 r = event_arm_timer(e, &e->monotonic);
2137 r = event_arm_timer(e, &e->realtime_alarm);
2141 r = event_arm_timer(e, &e->boottime_alarm);
2145 if (event_next_pending(e) || e->need_process_child)
2148 ev_queue_max = CLAMP(e->n_sources, 1U, EPOLL_QUEUE_MAX);
2149 ev_queue = newa(struct epoll_event, ev_queue_max);
2151 m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
2152 timeout == (uint64_t) -1 ? -1 : (int) ((timeout + USEC_PER_MSEC - 1) / USEC_PER_MSEC));
2154 r = errno == EAGAIN || errno == EINTR ? 1 : -errno;
2158 dual_timestamp_get(&e->timestamp);
2159 e->timestamp_boottime = now(CLOCK_BOOTTIME);
2161 for (i = 0; i < m; i++) {
2163 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME))
2164 r = flush_timer(e, e->realtime.fd, ev_queue[i].events, &e->realtime.next);
2165 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_MONOTONIC))
2166 r = flush_timer(e, e->monotonic.fd, ev_queue[i].events, &e->monotonic.next);
2167 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_REALTIME_ALARM))
2168 r = flush_timer(e, e->realtime_alarm.fd, ev_queue[i].events, &e->realtime_alarm.next);
2169 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_TIME_BOOTTIME_ALARM))
2170 r = flush_timer(e, e->boottime_alarm.fd, ev_queue[i].events, &e->boottime_alarm.next);
2171 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_SIGNAL))
2172 r = process_signal(e, ev_queue[i].events);
2173 else if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
2174 r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
2176 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
2182 r = process_watchdog(e);
2186 r = process_timer(e, e->timestamp.realtime, &e->realtime);
2190 r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
2194 r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
2198 r = process_timer(e, e->timestamp_boottime, &e->boottime_alarm);
2202 if (e->need_process_child) {
2203 r = process_child(e);
2208 p = event_next_pending(e);
2214 r = source_dispatch(p);
2217 e->state = SD_EVENT_PASSIVE;
2223 _public_ int sd_event_loop(sd_event *e) {
2226 assert_return(e, -EINVAL);
2227 assert_return(!event_pid_changed(e), -ECHILD);
2228 assert_return(e->state == SD_EVENT_PASSIVE, -EBUSY);
2232 while (e->state != SD_EVENT_FINISHED) {
2233 r = sd_event_run(e, (uint64_t) -1);
2245 _public_ int sd_event_get_state(sd_event *e) {
2246 assert_return(e, -EINVAL);
2247 assert_return(!event_pid_changed(e), -ECHILD);
2252 _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
2253 assert_return(e, -EINVAL);
2254 assert_return(code, -EINVAL);
2255 assert_return(!event_pid_changed(e), -ECHILD);
2257 if (!e->exit_requested)
2260 *code = e->exit_code;
2264 _public_ int sd_event_exit(sd_event *e, int code) {
2265 assert_return(e, -EINVAL);
2266 assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
2267 assert_return(!event_pid_changed(e), -ECHILD);
2269 e->exit_requested = true;
2270 e->exit_code = code;
2275 _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
2276 assert_return(e, -EINVAL);
2277 assert_return(usec, -EINVAL);
2278 assert_return(!event_pid_changed(e), -ECHILD);
2280 /* If we haven't run yet, just get the actual time */
2281 if (!dual_timestamp_is_set(&e->timestamp))
2286 case CLOCK_REALTIME:
2287 case CLOCK_REALTIME_ALARM:
2288 *usec = e->timestamp.realtime;
2291 case CLOCK_MONOTONIC:
2292 *usec = e->timestamp.monotonic;
2295 case CLOCK_BOOTTIME_ALARM:
2296 *usec = e->timestamp_boottime;
2303 _public_ int sd_event_default(sd_event **ret) {
2305 static thread_local sd_event *default_event = NULL;
2310 return !!default_event;
2312 if (default_event) {
2313 *ret = sd_event_ref(default_event);
2317 r = sd_event_new(&e);
2321 e->default_event_ptr = &default_event;
2329 _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
2330 assert_return(e, -EINVAL);
2331 assert_return(tid, -EINVAL);
2332 assert_return(!event_pid_changed(e), -ECHILD);
2342 _public_ int sd_event_set_watchdog(sd_event *e, int b) {
2345 assert_return(e, -EINVAL);
2346 assert_return(!event_pid_changed(e), -ECHILD);
2348 if (e->watchdog == !!b)
2352 struct epoll_event ev = {};
2354 r = sd_watchdog_enabled(false, &e->watchdog_period);
2358 /* Issue first ping immediately */
2359 sd_notify(false, "WATCHDOG=1");
2360 e->watchdog_last = now(CLOCK_MONOTONIC);
2362 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
2363 if (e->watchdog_fd < 0)
2366 r = arm_watchdog(e);
2370 ev.events = EPOLLIN;
2371 ev.data.ptr = INT_TO_PTR(SOURCE_WATCHDOG);
2373 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
2380 if (e->watchdog_fd >= 0) {
2381 epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
2382 e->watchdog_fd = safe_close(e->watchdog_fd);
2390 e->watchdog_fd = safe_close(e->watchdog_fd);
2394 _public_ int sd_event_get_watchdog(sd_event *e) {
2395 assert_return(e, -EINVAL);
2396 assert_return(!event_pid_changed(e), -ECHILD);