1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
7 #include <sys/timerfd.h>
16 #include "load-fragment.h"
17 #include "load-dropin.h"
20 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
21 [UNIT_SERVICE] = &service_vtable,
22 [UNIT_TIMER] = &timer_vtable,
23 [UNIT_SOCKET] = &socket_vtable,
24 [UNIT_TARGET] = &target_vtable,
25 [UNIT_DEVICE] = &device_vtable,
26 [UNIT_MOUNT] = &mount_vtable,
27 [UNIT_AUTOMOUNT] = &automount_vtable,
28 [UNIT_SNAPSHOT] = &snapshot_vtable
31 UnitType unit_name_to_type(const char *n) {
36 for (t = 0; t < _UNIT_TYPE_MAX; t++)
37 if (endswith(n, unit_vtable[t]->suffix))
40 return _UNIT_TYPE_INVALID;
45 "abcdefghijklmnopqrstuvwxyz" \
46 "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
49 bool unit_name_is_valid(const char *n) {
55 if (strlen(n) >= UNIT_NAME_MAX)
58 t = unit_name_to_type(n);
59 if (t < 0 || t >= _UNIT_TYPE_MAX)
62 if (!(e = strrchr(n, '.')))
68 for (i = n; i < e; i++)
69 if (!strchr(VALID_CHARS, *i))
75 char *unit_name_change_suffix(const char *n, const char *suffix) {
80 assert(unit_name_is_valid(n));
83 assert_se(e = strrchr(n, '.'));
87 if (!(r = new(char, a + b + 1)))
91 memcpy(r+a, suffix, b+1);
96 Unit *unit_new(Manager *m) {
101 if (!(u = new0(Unit, 1)))
104 if (!(u->meta.names = set_new(string_hash_func, string_compare_func))) {
110 u->meta.type = _UNIT_TYPE_INVALID;
115 int unit_add_name(Unit *u, const char *text) {
123 if (!unit_name_is_valid(text))
126 if ((t = unit_name_to_type(text)) == _UNIT_TYPE_INVALID)
129 if (u->meta.type != _UNIT_TYPE_INVALID && t != u->meta.type)
132 if (!(s = strdup(text)))
135 if ((r = set_put(u->meta.names, s)) < 0) {
144 if ((r = hashmap_put(u->meta.manager->units, s, u)) < 0) {
145 set_remove(u->meta.names, s);
158 void unit_add_to_load_queue(Unit *u) {
161 if (u->meta.load_state != UNIT_STUB || u->meta.in_load_queue)
164 LIST_PREPEND(Meta, load_queue, u->meta.manager->load_queue, &u->meta);
165 u->meta.in_load_queue = true;
168 static void bidi_set_free(Unit *u, Set *s) {
174 /* Frees the set and makes sure we are dropped from the
175 * inverse pointers */
177 SET_FOREACH(other, s, i) {
180 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
181 set_remove(other->meta.dependencies[d], u);
187 void unit_free(Unit *u) {
194 /* Detach from next 'bigger' objects */
196 SET_FOREACH(t, u->meta.names, i)
197 hashmap_remove_value(u->meta.manager->units, t, u);
199 if (u->meta.in_load_queue)
200 LIST_REMOVE(Meta, load_queue, u->meta.manager->load_queue, &u->meta);
202 if (u->meta.load_state == UNIT_LOADED)
203 if (UNIT_VTABLE(u)->done)
204 UNIT_VTABLE(u)->done(u);
206 /* Free data and next 'smaller' objects */
208 job_free(u->meta.job);
210 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
211 bidi_set_free(u, u->meta.dependencies[d]);
213 free(u->meta.description);
214 free(u->meta.load_path);
216 while ((t = set_steal_first(u->meta.names)))
218 set_free(u->meta.names);
223 UnitActiveState unit_active_state(Unit *u) {
226 if (u->meta.load_state != UNIT_LOADED)
227 return UNIT_INACTIVE;
229 return UNIT_VTABLE(u)->active_state(u);
232 static int ensure_merge(Set **s, Set *other) {
238 return set_merge(*s, other);
240 if (!(*s = set_copy(other)))
246 /* FIXME: Does not rollback on failure! Needs to fix special unit
247 * pointers. Needs to merge names and dependencies properly.*/
248 int unit_merge(Unit *u, Unit *other) {
254 assert(u->meta.manager == other->meta.manager);
256 /* This merges 'other' into 'unit'. FIXME: This does not
257 * rollback on failure. */
259 if (u->meta.type != u->meta.type)
262 if (u->meta.load_state != UNIT_STUB)
266 if ((r = ensure_merge(&u->meta.names, other->meta.names)) < 0)
269 /* Merge dependencies */
270 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
271 /* fixme, the inverse mapping is missing */
272 if ((r = ensure_merge(&u->meta.dependencies[d], other->meta.dependencies[d])) < 0)
278 const char* unit_id(Unit *u) {
284 return set_first(u->meta.names);
287 const char *unit_description(Unit *u) {
290 if (u->meta.description)
291 return u->meta.description;
296 void unit_dump(Unit *u, FILE *f, const char *prefix) {
298 static const char* const load_state_table[_UNIT_LOAD_STATE_MAX] = {
299 [UNIT_STUB] = "stub",
300 [UNIT_LOADED] = "loaded",
301 [UNIT_FAILED] = "failed"
304 static const char* const active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
305 [UNIT_ACTIVE] = "active",
306 [UNIT_INACTIVE] = "inactive",
307 [UNIT_ACTIVATING] = "activating",
308 [UNIT_DEACTIVATING] = "deactivating"
311 static const char* const dependency_table[_UNIT_DEPENDENCY_MAX] = {
312 [UNIT_REQUIRES] = "Requires",
313 [UNIT_SOFT_REQUIRES] = "SoftRequires",
314 [UNIT_WANTS] = "Wants",
315 [UNIT_REQUISITE] = "Requisite",
316 [UNIT_SOFT_REQUISITE] = "SoftRequisite",
317 [UNIT_REQUIRED_BY] = "RequiredBy",
318 [UNIT_SOFT_REQUIRED_BY] = "SoftRequiredBy",
319 [UNIT_WANTED_BY] = "WantedBy",
320 [UNIT_CONFLICTS] = "Conflicts",
321 [UNIT_BEFORE] = "Before",
322 [UNIT_AFTER] = "After",
334 prefix2 = strappend(prefix, "\t");
340 "%s\tDescription: %s\n"
341 "%s\tUnit Load State: %s\n"
342 "%s\tUnit Active State: %s\n",
344 prefix, unit_description(u),
345 prefix, load_state_table[u->meta.load_state],
346 prefix, active_state_table[unit_active_state(u)]);
348 if (u->meta.load_path)
349 fprintf(f, "%s\tLoad Path: %s\n", prefix, u->meta.load_path);
351 SET_FOREACH(t, u->meta.names, i)
352 fprintf(f, "%s\tName: %s\n", prefix, t);
354 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
357 if (set_isempty(u->meta.dependencies[d]))
360 SET_FOREACH(other, u->meta.dependencies[d], i)
361 fprintf(f, "%s\t%s: %s\n", prefix, dependency_table[d], unit_id(other));
364 if (UNIT_VTABLE(u)->dump)
365 UNIT_VTABLE(u)->dump(u, f, prefix2);
368 job_dump(u->meta.job, f, prefix2);
373 /* Common implementation for multiple backends */
374 int unit_load_fragment_and_dropin(Unit *u) {
379 /* Load a .socket file */
380 if ((r = unit_load_fragment(u)) < 0)
383 /* Load drop-in directory data */
384 if ((r = unit_load_dropin(u)) < 0)
390 int unit_load(Unit *u) {
395 if (u->meta.in_load_queue) {
396 LIST_REMOVE(Meta, load_queue, u->meta.manager->load_queue, &u->meta);
397 u->meta.in_load_queue = false;
400 if (u->meta.load_state != UNIT_STUB)
403 if (UNIT_VTABLE(u)->init)
404 if ((r = UNIT_VTABLE(u)->init(u)) < 0)
407 u->meta.load_state = UNIT_LOADED;
411 u->meta.load_state = UNIT_FAILED;
416 * -EBADR: This unit type does not support starting.
417 * -EALREADY: Unit is already started.
418 * -EAGAIN: An operation is already in progress. Retry later.
420 int unit_start(Unit *u) {
421 UnitActiveState state;
425 if (!UNIT_VTABLE(u)->start)
428 state = unit_active_state(u);
429 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
432 /* We don't suppress calls to ->start() here when we are
433 * already starting, to allow this request to be used as a
434 * "hurry up" call, for example when the unit is in some "auto
435 * restart" state where it waits for a holdoff timer to elapse
436 * before it will start again. */
438 return UNIT_VTABLE(u)->start(u);
441 bool unit_can_start(Unit *u) {
444 return !!UNIT_VTABLE(u)->start;
448 * -EBADR: This unit type does not support stopping.
449 * -EALREADY: Unit is already stopped.
450 * -EAGAIN: An operation is already in progress. Retry later.
452 int unit_stop(Unit *u) {
453 UnitActiveState state;
457 if (!UNIT_VTABLE(u)->stop)
460 state = unit_active_state(u);
461 if (state == UNIT_INACTIVE)
464 if (state == UNIT_DEACTIVATING)
467 return UNIT_VTABLE(u)->stop(u);
471 * -EBADR: This unit type does not support reloading.
472 * -ENOEXEC: Unit is not started.
473 * -EAGAIN: An operation is already in progress. Retry later.
475 int unit_reload(Unit *u) {
476 UnitActiveState state;
480 if (!unit_can_reload(u))
483 state = unit_active_state(u);
484 if (unit_active_state(u) == UNIT_ACTIVE_RELOADING)
487 if (unit_active_state(u) != UNIT_ACTIVE)
490 return UNIT_VTABLE(u)->reload(u);
493 bool unit_can_reload(Unit *u) {
496 if (!UNIT_VTABLE(u)->reload)
499 if (!UNIT_VTABLE(u)->can_reload)
502 return UNIT_VTABLE(u)->can_reload(u);
505 static void retroactively_start_dependencies(Unit *u) {
510 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
512 SET_FOREACH(other, u->meta.dependencies[UNIT_REQUIRES], i)
513 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
514 manager_add_job(u->meta.manager, JOB_START, other, JOB_REPLACE, true, NULL);
516 SET_FOREACH(other, u->meta.dependencies[UNIT_SOFT_REQUIRES], i)
517 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
518 manager_add_job(u->meta.manager, JOB_START, other, JOB_FAIL, false, NULL);
520 SET_FOREACH(other, u->meta.dependencies[UNIT_REQUISITE], i)
521 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
522 manager_add_job(u->meta.manager, JOB_START, other, JOB_REPLACE, true, NULL);
524 SET_FOREACH(other, u->meta.dependencies[UNIT_WANTS], i)
525 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
526 manager_add_job(u->meta.manager, JOB_START, other, JOB_FAIL, false, NULL);
528 SET_FOREACH(other, u->meta.dependencies[UNIT_CONFLICTS], i)
529 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
530 manager_add_job(u->meta.manager, JOB_STOP, other, JOB_REPLACE, true, NULL);
533 static void retroactively_stop_dependencies(Unit *u) {
538 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
540 SET_FOREACH(other, u->meta.dependencies[UNIT_REQUIRED_BY], i)
541 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
542 manager_add_job(u->meta.manager, JOB_STOP, other, JOB_REPLACE, true, NULL);
545 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns) {
547 assert(os < _UNIT_ACTIVE_STATE_MAX);
548 assert(ns < _UNIT_ACTIVE_STATE_MAX);
549 assert(!(os == UNIT_ACTIVE && ns == UNIT_ACTIVATING));
550 assert(!(os == UNIT_INACTIVE && ns == UNIT_DEACTIVATING));
555 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
556 u->meta.active_enter_timestamp = now(CLOCK_REALTIME);
557 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
558 u->meta.active_exit_timestamp = now(CLOCK_REALTIME);
562 if (u->meta.job->state == JOB_WAITING)
564 /* So we reached a different state for this
565 * job. Let's see if we can run it now if it
566 * failed previously due to EAGAIN. */
567 job_schedule_run(u->meta.job);
570 assert(u->meta.job->state == JOB_RUNNING);
572 /* Let's check of this state change
573 * constitutes a finished job, or maybe
574 * cotradicts a running job and hence needs to
575 * invalidate jobs. */
577 switch (u->meta.job->type) {
580 case JOB_VERIFY_ACTIVE:
582 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
583 job_finish_and_invalidate(u->meta.job, true);
585 } else if (ns == UNIT_ACTIVATING)
588 job_finish_and_invalidate(u->meta.job, false);
593 case JOB_RELOAD_OR_START:
595 if (ns == UNIT_ACTIVE) {
596 job_finish_and_invalidate(u->meta.job, true);
598 } else if (ns == UNIT_ACTIVATING || ns == UNIT_ACTIVE_RELOADING)
601 job_finish_and_invalidate(u->meta.job, false);
607 case JOB_TRY_RESTART:
609 if (ns == UNIT_INACTIVE) {
610 job_finish_and_invalidate(u->meta.job, true);
612 } else if (ns == UNIT_DEACTIVATING)
615 job_finish_and_invalidate(u->meta.job, false);
620 assert_not_reached("Job type unknown");
625 /* If this state change happened without being requested by a
626 * job, then let's retroactively start or stop dependencies */
628 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
629 retroactively_start_dependencies(u);
630 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
631 retroactively_stop_dependencies(u);
634 int unit_watch_fd(Unit *u, int fd, uint32_t events, Watch *w) {
635 struct epoll_event ev;
640 assert(w->type == WATCH_INVALID || (w->type == WATCH_FD && w->fd == fd && w->unit == u));
646 if (epoll_ctl(u->meta.manager->epoll_fd,
647 w->type == WATCH_INVALID ? EPOLL_CTL_ADD : EPOLL_CTL_MOD,
659 void unit_unwatch_fd(Unit *u, Watch *w) {
663 if (w->type == WATCH_INVALID)
666 assert(w->type == WATCH_FD && w->unit == u);
667 assert_se(epoll_ctl(u->meta.manager->epoll_fd, EPOLL_CTL_DEL, w->fd, NULL) >= 0);
670 w->type = WATCH_INVALID;
674 int unit_watch_pid(Unit *u, pid_t pid) {
678 return hashmap_put(u->meta.manager->watch_pids, UINT32_TO_PTR(pid), u);
681 void unit_unwatch_pid(Unit *u, pid_t pid) {
685 hashmap_remove(u->meta.manager->watch_pids, UINT32_TO_PTR(pid));
688 int unit_watch_timer(Unit *u, usec_t delay, Watch *w) {
689 struct itimerspec its;
695 assert(w->type == WATCH_INVALID || (w->type == WATCH_TIMER && w->unit == u));
697 /* This will try to reuse the old timer if there is one */
699 if (w->type == WATCH_TIMER) {
704 if ((fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC)) < 0)
711 /* Set absolute time in the past, but not 0, since we
712 * don't want to disarm the timer */
713 its.it_value.tv_sec = 0;
714 its.it_value.tv_nsec = 1;
716 flags = TFD_TIMER_ABSTIME;
718 timespec_store(&its.it_value, delay);
722 /* This will also flush the elapse counter */
723 if (timerfd_settime(fd, flags, &its, NULL) < 0)
726 if (w->type == WATCH_INVALID) {
727 struct epoll_event ev;
733 if (epoll_ctl(u->meta.manager->epoll_fd, EPOLL_CTL_ADD, fd, &ev) < 0)
738 w->type = WATCH_TIMER;
745 assert_se(close_nointr(fd) == 0);
750 void unit_unwatch_timer(Unit *u, Watch *w) {
754 if (w->type == WATCH_INVALID)
757 assert(w->type == WATCH_TIMER && w->unit == u);
759 assert_se(epoll_ctl(u->meta.manager->epoll_fd, EPOLL_CTL_DEL, w->fd, NULL) >= 0);
760 assert_se(close_nointr(w->fd) == 0);
763 w->type = WATCH_INVALID;
767 bool unit_job_is_applicable(Unit *u, JobType j) {
769 assert(j >= 0 && j < _JOB_TYPE_MAX);
773 case JOB_VERIFY_ACTIVE:
779 case JOB_TRY_RESTART:
780 return unit_can_start(u);
783 return unit_can_reload(u);
785 case JOB_RELOAD_OR_START:
786 return unit_can_reload(u) && unit_can_start(u);
789 assert_not_reached("Invalid job type");
793 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other) {
795 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
796 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
797 [UNIT_SOFT_REQUIRES] = UNIT_SOFT_REQUIRED_BY,
798 [UNIT_WANTS] = UNIT_WANTED_BY,
799 [UNIT_REQUISITE] = UNIT_REQUIRED_BY,
800 [UNIT_SOFT_REQUISITE] = UNIT_SOFT_REQUIRED_BY,
801 [UNIT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
802 [UNIT_SOFT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
803 [UNIT_WANTED_BY] = _UNIT_DEPENDENCY_INVALID,
804 [UNIT_CONFLICTS] = UNIT_CONFLICTS,
805 [UNIT_BEFORE] = UNIT_AFTER,
806 [UNIT_AFTER] = UNIT_BEFORE
811 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
812 assert(inverse_table[d] != _UNIT_DEPENDENCY_INVALID);
815 /* We won't allow dependencies on ourselves. We will not
816 * consider them an error however. */
820 if ((r = set_ensure_allocated(&u->meta.dependencies[d], trivial_hash_func, trivial_compare_func)) < 0)
823 if ((r = set_ensure_allocated(&other->meta.dependencies[inverse_table[d]], trivial_hash_func, trivial_compare_func)) < 0)
826 if ((r = set_put(u->meta.dependencies[d], other)) < 0)
829 if ((r = set_put(other->meta.dependencies[inverse_table[d]], u)) < 0) {
830 set_remove(u->meta.dependencies[d], other);
837 const char *unit_path(void) {
840 if ((e = getenv("UNIT_PATH")))
841 if (path_is_absolute(e))
847 int set_unit_path(const char *p) {
851 /* This is mostly for debug purposes */
853 if (path_is_absolute(p)) {
854 if (!(c = strdup(p)))
857 if (!(cwd = get_current_dir_name()))
860 r = asprintf(&c, "%s/%s", cwd, p);
867 if (setenv("UNIT_PATH", c, 0) < 0) {