1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
26 #include <sys/timerfd.h>
33 #include "sd-messages.h"
38 #include "path-util.h"
39 #include "load-fragment.h"
40 #include "load-dropin.h"
42 #include "unit-name.h"
43 #include "dbus-unit.h"
45 #include "cgroup-util.h"
49 #include "fileio-label.h"
50 #include "bus-common-errors.h"
56 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
57 [UNIT_SERVICE] = &service_vtable,
58 [UNIT_SOCKET] = &socket_vtable,
59 [UNIT_BUSNAME] = &busname_vtable,
60 [UNIT_TARGET] = &target_vtable,
61 [UNIT_SNAPSHOT] = &snapshot_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable
72 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency);
74 Unit *unit_new(Manager *m, size_t size) {
78 assert(size >= sizeof(Unit));
84 u->names = set_new(&string_hash_ops);
91 u->type = _UNIT_TYPE_INVALID;
92 u->deserialized_job = _JOB_TYPE_INVALID;
93 u->default_dependencies = true;
94 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
95 u->unit_file_preset = -1;
96 u->on_failure_job_mode = JOB_REPLACE;
101 bool unit_has_name(Unit *u, const char *name) {
105 return !!set_get(u->names, (char*) name);
108 static void unit_init(Unit *u) {
115 assert(u->type >= 0);
117 cc = unit_get_cgroup_context(u);
119 cgroup_context_init(cc);
121 /* Copy in the manager defaults into the cgroup
122 * context, _before_ the rest of the settings have
123 * been initialized */
125 cc->cpu_accounting = u->manager->default_cpu_accounting;
126 cc->blockio_accounting = u->manager->default_blockio_accounting;
127 cc->memory_accounting = u->manager->default_memory_accounting;
130 ec = unit_get_exec_context(u);
132 exec_context_init(ec);
134 kc = unit_get_kill_context(u);
136 kill_context_init(kc);
138 if (UNIT_VTABLE(u)->init)
139 UNIT_VTABLE(u)->init(u);
142 int unit_add_name(Unit *u, const char *text) {
143 _cleanup_free_ char *s = NULL, *i = NULL;
150 if (unit_name_is_template(text)) {
155 s = unit_name_replace_instance(text, u->instance);
161 if (!unit_name_is_valid(s, TEMPLATE_INVALID))
164 assert_se((t = unit_name_to_type(s)) >= 0);
166 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
169 r = unit_name_to_instance(s, &i);
173 if (i && unit_vtable[t]->no_instances)
176 /* Ensure that this unit is either instanced or not instanced,
178 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
181 if (unit_vtable[t]->no_alias &&
182 !set_isempty(u->names) &&
183 !set_get(u->names, s))
186 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
189 r = set_put(u->names, s);
197 r = hashmap_put(u->manager->units, s, u);
199 set_remove(u->names, s);
203 if (u->type == _UNIT_TYPE_INVALID) {
208 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
217 unit_add_to_dbus_queue(u);
221 int unit_choose_id(Unit *u, const char *name) {
222 _cleanup_free_ char *t = NULL;
229 if (unit_name_is_template(name)) {
234 t = unit_name_replace_instance(name, u->instance);
241 /* Selects one of the names of this unit as the id */
242 s = set_get(u->names, (char*) name);
246 r = unit_name_to_instance(s, &i);
255 unit_add_to_dbus_queue(u);
260 int unit_set_description(Unit *u, const char *description) {
265 if (isempty(description))
268 s = strdup(description);
273 free(u->description);
276 unit_add_to_dbus_queue(u);
280 bool unit_check_gc(Unit *u) {
283 if (UNIT_VTABLE(u)->no_gc)
295 if (unit_active_state(u) != UNIT_INACTIVE)
301 if (UNIT_VTABLE(u)->check_gc)
302 if (UNIT_VTABLE(u)->check_gc(u))
308 void unit_add_to_load_queue(Unit *u) {
310 assert(u->type != _UNIT_TYPE_INVALID);
312 if (u->load_state != UNIT_STUB || u->in_load_queue)
315 LIST_PREPEND(load_queue, u->manager->load_queue, u);
316 u->in_load_queue = true;
319 void unit_add_to_cleanup_queue(Unit *u) {
322 if (u->in_cleanup_queue)
325 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
326 u->in_cleanup_queue = true;
329 void unit_add_to_gc_queue(Unit *u) {
332 if (u->in_gc_queue || u->in_cleanup_queue)
335 if (unit_check_gc(u))
338 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
339 u->in_gc_queue = true;
341 u->manager->n_in_gc_queue ++;
344 void unit_add_to_dbus_queue(Unit *u) {
346 assert(u->type != _UNIT_TYPE_INVALID);
348 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
351 /* Shortcut things if nobody cares */
352 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
353 set_isempty(u->manager->private_buses)) {
354 u->sent_dbus_new_signal = true;
358 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
359 u->in_dbus_queue = true;
362 static void bidi_set_free(Unit *u, Set *s) {
368 /* Frees the set and makes sure we are dropped from the
369 * inverse pointers */
371 SET_FOREACH(other, s, i) {
374 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
375 set_remove(other->dependencies[d], u);
377 unit_add_to_gc_queue(other);
383 static void unit_remove_transient(Unit *u) {
391 if (u->fragment_path)
392 unlink(u->fragment_path);
394 STRV_FOREACH(i, u->dropin_paths) {
395 _cleanup_free_ char *p = NULL;
400 r = path_get_parent(*i, &p);
406 static void unit_free_requires_mounts_for(Unit *u) {
409 STRV_FOREACH(j, u->requires_mounts_for) {
410 char s[strlen(*j) + 1];
412 PATH_FOREACH_PREFIX_MORE(s, *j) {
416 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
422 if (set_isempty(x)) {
423 hashmap_remove(u->manager->units_requiring_mounts_for, y);
430 strv_free(u->requires_mounts_for);
431 u->requires_mounts_for = NULL;
434 static void unit_done(Unit *u) {
443 if (UNIT_VTABLE(u)->done)
444 UNIT_VTABLE(u)->done(u);
446 ec = unit_get_exec_context(u);
448 exec_context_done(ec);
450 cc = unit_get_cgroup_context(u);
452 cgroup_context_done(cc);
455 void unit_free(Unit *u) {
462 if (u->manager->n_reloading <= 0)
463 unit_remove_transient(u);
465 bus_unit_send_removed_signal(u);
469 unit_free_requires_mounts_for(u);
471 SET_FOREACH(t, u->names, i)
472 hashmap_remove_value(u->manager->units, t, u);
486 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
487 bidi_set_free(u, u->dependencies[d]);
489 if (u->type != _UNIT_TYPE_INVALID)
490 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
492 if (u->in_load_queue)
493 LIST_REMOVE(load_queue, u->manager->load_queue, u);
495 if (u->in_dbus_queue)
496 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
498 if (u->in_cleanup_queue)
499 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
501 if (u->in_gc_queue) {
502 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
503 u->manager->n_in_gc_queue--;
506 if (u->in_cgroup_queue)
507 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
509 if (u->cgroup_path) {
510 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
511 free(u->cgroup_path);
514 set_remove(u->manager->failed_units, u);
515 set_remove(u->manager->startup_units, u);
517 free(u->description);
518 strv_free(u->documentation);
519 free(u->fragment_path);
520 free(u->source_path);
521 strv_free(u->dropin_paths);
524 free(u->job_timeout_reboot_arg);
526 set_free_free(u->names);
528 unit_unwatch_all_pids(u);
530 condition_free_list(u->conditions);
531 condition_free_list(u->asserts);
533 unit_ref_unset(&u->slice);
536 unit_ref_unset(u->refs);
541 UnitActiveState unit_active_state(Unit *u) {
544 if (u->load_state == UNIT_MERGED)
545 return unit_active_state(unit_follow_merge(u));
547 /* After a reload it might happen that a unit is not correctly
548 * loaded but still has a process around. That's why we won't
549 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
551 return UNIT_VTABLE(u)->active_state(u);
554 const char* unit_sub_state_to_string(Unit *u) {
557 return UNIT_VTABLE(u)->sub_state_to_string(u);
560 static int complete_move(Set **s, Set **other) {
570 r = set_move(*s, *other);
581 static int merge_names(Unit *u, Unit *other) {
589 r = complete_move(&u->names, &other->names);
593 set_free_free(other->names);
597 SET_FOREACH(t, u->names, i)
598 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
603 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
608 assert(d < _UNIT_DEPENDENCY_MAX);
611 * If u does not have this dependency set allocated, there is no need
612 * to reserve anything. In that case other's set will be transfered
613 * as a whole to u by complete_move().
615 if (!u->dependencies[d])
618 /* merge_dependencies() will skip a u-on-u dependency */
619 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
621 return set_reserve(u->dependencies[d], n_reserve);
624 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
631 assert(d < _UNIT_DEPENDENCY_MAX);
633 /* Fix backwards pointers */
634 SET_FOREACH(back, other->dependencies[d], i) {
637 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
638 /* Do not add dependencies between u and itself */
640 if (set_remove(back->dependencies[k], other))
641 maybe_warn_about_dependency(u->id, other_id, k);
643 r = set_remove_and_put(back->dependencies[k], other, u);
645 set_remove(back->dependencies[k], other);
647 assert(r >= 0 || r == -ENOENT);
652 /* Also do not move dependencies on u to itself */
653 back = set_remove(other->dependencies[d], u);
655 maybe_warn_about_dependency(u->id, other_id, d);
657 /* The move cannot fail. The caller must have performed a reservation. */
658 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
660 set_free(other->dependencies[d]);
661 other->dependencies[d] = NULL;
664 int unit_merge(Unit *u, Unit *other) {
666 const char *other_id = NULL;
671 assert(u->manager == other->manager);
672 assert(u->type != _UNIT_TYPE_INVALID);
674 other = unit_follow_merge(other);
679 if (u->type != other->type)
682 if (!u->instance != !other->instance)
685 if (other->load_state != UNIT_STUB &&
686 other->load_state != UNIT_NOT_FOUND)
695 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
699 other_id = strdupa(other->id);
701 /* Make reservations to ensure merge_dependencies() won't fail */
702 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
703 r = reserve_dependencies(u, other, d);
705 * We don't rollback reservations if we fail. We don't have
706 * a way to undo reservations. A reservation is not a leak.
713 r = merge_names(u, other);
717 /* Redirect all references */
719 unit_ref_set(other->refs, u);
721 /* Merge dependencies */
722 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
723 merge_dependencies(u, other, other_id, d);
725 other->load_state = UNIT_MERGED;
726 other->merged_into = u;
728 /* If there is still some data attached to the other node, we
729 * don't need it anymore, and can free it. */
730 if (other->load_state != UNIT_STUB)
731 if (UNIT_VTABLE(other)->done)
732 UNIT_VTABLE(other)->done(other);
734 unit_add_to_dbus_queue(u);
735 unit_add_to_cleanup_queue(other);
740 int unit_merge_by_name(Unit *u, const char *name) {
743 _cleanup_free_ char *s = NULL;
748 if (unit_name_is_template(name)) {
752 s = unit_name_replace_instance(name, u->instance);
759 other = manager_get_unit(u->manager, name);
761 r = unit_add_name(u, name);
763 r = unit_merge(u, other);
768 Unit* unit_follow_merge(Unit *u) {
771 while (u->load_state == UNIT_MERGED)
772 assert_se(u = u->merged_into);
777 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
783 if (c->working_directory) {
784 r = unit_require_mounts_for(u, c->working_directory);
789 if (c->root_directory) {
790 r = unit_require_mounts_for(u, c->root_directory);
795 if (u->manager->running_as != SYSTEMD_SYSTEM)
798 if (c->private_tmp) {
799 r = unit_require_mounts_for(u, "/tmp");
803 r = unit_require_mounts_for(u, "/var/tmp");
808 if (c->std_output != EXEC_OUTPUT_KMSG &&
809 c->std_output != EXEC_OUTPUT_SYSLOG &&
810 c->std_output != EXEC_OUTPUT_JOURNAL &&
811 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
812 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
813 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
814 c->std_error != EXEC_OUTPUT_KMSG &&
815 c->std_error != EXEC_OUTPUT_SYSLOG &&
816 c->std_error != EXEC_OUTPUT_JOURNAL &&
817 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
818 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
819 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
822 /* If syslog or kernel logging is requested, make sure our own
823 * logging daemon is run first. */
825 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
832 const char *unit_description(Unit *u) {
836 return u->description;
841 void unit_dump(Unit *u, FILE *f, const char *prefix) {
847 timestamp1[FORMAT_TIMESTAMP_MAX],
848 timestamp2[FORMAT_TIMESTAMP_MAX],
849 timestamp3[FORMAT_TIMESTAMP_MAX],
850 timestamp4[FORMAT_TIMESTAMP_MAX],
851 timespan[FORMAT_TIMESPAN_MAX];
853 _cleanup_set_free_ Set *following_set = NULL;
857 assert(u->type >= 0);
859 prefix = strempty(prefix);
860 prefix2 = strappenda(prefix, "\t");
864 "%s\tDescription: %s\n"
866 "%s\tUnit Load State: %s\n"
867 "%s\tUnit Active State: %s\n"
868 "%s\tInactive Exit Timestamp: %s\n"
869 "%s\tActive Enter Timestamp: %s\n"
870 "%s\tActive Exit Timestamp: %s\n"
871 "%s\tInactive Enter Timestamp: %s\n"
872 "%s\tGC Check Good: %s\n"
873 "%s\tNeed Daemon Reload: %s\n"
874 "%s\tTransient: %s\n"
877 "%s\tCGroup realized: %s\n"
878 "%s\tCGroup mask: 0x%x\n"
879 "%s\tCGroup members mask: 0x%x\n",
881 prefix, unit_description(u),
882 prefix, strna(u->instance),
883 prefix, unit_load_state_to_string(u->load_state),
884 prefix, unit_active_state_to_string(unit_active_state(u)),
885 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
886 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
887 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
888 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
889 prefix, yes_no(unit_check_gc(u)),
890 prefix, yes_no(unit_need_daemon_reload(u)),
891 prefix, yes_no(u->transient),
892 prefix, strna(unit_slice_name(u)),
893 prefix, strna(u->cgroup_path),
894 prefix, yes_no(u->cgroup_realized),
895 prefix, u->cgroup_realized_mask,
896 prefix, u->cgroup_members_mask);
898 SET_FOREACH(t, u->names, i)
899 fprintf(f, "%s\tName: %s\n", prefix, t);
901 STRV_FOREACH(j, u->documentation)
902 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
904 following = unit_following(u);
906 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
908 r = unit_following_set(u, &following_set);
912 SET_FOREACH(other, following_set, i)
913 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
916 if (u->fragment_path)
917 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
920 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
922 STRV_FOREACH(j, u->dropin_paths)
923 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
925 if (u->job_timeout > 0)
926 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
928 if (u->job_timeout_action != FAILURE_ACTION_NONE)
929 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
931 if (u->job_timeout_reboot_arg)
932 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
934 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
935 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
937 if (dual_timestamp_is_set(&u->condition_timestamp))
939 "%s\tCondition Timestamp: %s\n"
940 "%s\tCondition Result: %s\n",
941 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
942 prefix, yes_no(u->condition_result));
944 if (dual_timestamp_is_set(&u->assert_timestamp))
946 "%s\tAssert Timestamp: %s\n"
947 "%s\tAssert Result: %s\n",
948 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
949 prefix, yes_no(u->assert_result));
951 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
954 SET_FOREACH(other, u->dependencies[d], i)
955 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
958 if (!strv_isempty(u->requires_mounts_for)) {
960 "%s\tRequiresMountsFor:", prefix);
962 STRV_FOREACH(j, u->requires_mounts_for)
963 fprintf(f, " %s", *j);
968 if (u->load_state == UNIT_LOADED) {
971 "%s\tStopWhenUnneeded: %s\n"
972 "%s\tRefuseManualStart: %s\n"
973 "%s\tRefuseManualStop: %s\n"
974 "%s\tDefaultDependencies: %s\n"
975 "%s\tOnFailureJobMode: %s\n"
976 "%s\tIgnoreOnIsolate: %s\n"
977 "%s\tIgnoreOnSnapshot: %s\n",
978 prefix, yes_no(u->stop_when_unneeded),
979 prefix, yes_no(u->refuse_manual_start),
980 prefix, yes_no(u->refuse_manual_stop),
981 prefix, yes_no(u->default_dependencies),
982 prefix, job_mode_to_string(u->on_failure_job_mode),
983 prefix, yes_no(u->ignore_on_isolate),
984 prefix, yes_no(u->ignore_on_snapshot));
986 if (UNIT_VTABLE(u)->dump)
987 UNIT_VTABLE(u)->dump(u, f, prefix2);
989 } else if (u->load_state == UNIT_MERGED)
991 "%s\tMerged into: %s\n",
992 prefix, u->merged_into->id);
993 else if (u->load_state == UNIT_ERROR)
994 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
998 job_dump(u->job, f, prefix2);
1001 job_dump(u->nop_job, f, prefix2);
1005 /* Common implementation for multiple backends */
1006 int unit_load_fragment_and_dropin(Unit *u) {
1011 /* Load a .{service,socket,...} file */
1012 r = unit_load_fragment(u);
1016 if (u->load_state == UNIT_STUB)
1019 /* Load drop-in directory data */
1020 r = unit_load_dropin(unit_follow_merge(u));
1027 /* Common implementation for multiple backends */
1028 int unit_load_fragment_and_dropin_optional(Unit *u) {
1033 /* Same as unit_load_fragment_and_dropin(), but whether
1034 * something can be loaded or not doesn't matter. */
1036 /* Load a .service file */
1037 r = unit_load_fragment(u);
1041 if (u->load_state == UNIT_STUB)
1042 u->load_state = UNIT_LOADED;
1044 /* Load drop-in directory data */
1045 r = unit_load_dropin(unit_follow_merge(u));
1052 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1056 if (target->type != UNIT_TARGET)
1059 /* Only add the dependency if both units are loaded, so that
1060 * that loop check below is reliable */
1061 if (u->load_state != UNIT_LOADED ||
1062 target->load_state != UNIT_LOADED)
1065 /* If either side wants no automatic dependencies, then let's
1067 if (!u->default_dependencies ||
1068 !target->default_dependencies)
1071 /* Don't create loops */
1072 if (set_get(target->dependencies[UNIT_BEFORE], u))
1075 return unit_add_dependency(target, UNIT_AFTER, u, true);
1078 static int unit_add_target_dependencies(Unit *u) {
1080 static const UnitDependency deps[] = {
1082 UNIT_REQUIRED_BY_OVERRIDABLE,
1094 for (k = 0; k < ELEMENTSOF(deps); k++)
1095 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1096 r = unit_add_default_target_dependency(u, target);
1104 static int unit_add_slice_dependencies(Unit *u) {
1107 if (!unit_get_cgroup_context(u))
1110 if (UNIT_ISSET(u->slice))
1111 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_WANTS, UNIT_DEREF(u->slice), true);
1113 if (streq(u->id, SPECIAL_ROOT_SLICE))
1116 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, SPECIAL_ROOT_SLICE, NULL, true);
1119 static int unit_add_mount_dependencies(Unit *u) {
1125 STRV_FOREACH(i, u->requires_mounts_for) {
1126 char prefix[strlen(*i) + 1];
1128 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1131 r = manager_get_unit_by_path(u->manager, prefix, ".mount", &m);
1139 if (m->load_state != UNIT_LOADED)
1142 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1146 if (m->fragment_path) {
1147 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1157 static int unit_add_startup_units(Unit *u) {
1161 c = unit_get_cgroup_context(u);
1165 if (c->startup_cpu_shares == (unsigned long) -1 &&
1166 c->startup_blockio_weight == (unsigned long) -1)
1169 r = set_put(u->manager->startup_units, u);
1176 int unit_load(Unit *u) {
1181 if (u->in_load_queue) {
1182 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1183 u->in_load_queue = false;
1186 if (u->type == _UNIT_TYPE_INVALID)
1189 if (u->load_state != UNIT_STUB)
1192 if (UNIT_VTABLE(u)->load) {
1193 r = UNIT_VTABLE(u)->load(u);
1198 if (u->load_state == UNIT_STUB) {
1203 if (u->load_state == UNIT_LOADED) {
1205 r = unit_add_target_dependencies(u);
1209 r = unit_add_slice_dependencies(u);
1213 r = unit_add_mount_dependencies(u);
1217 r = unit_add_startup_units(u);
1221 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1222 log_unit_error(u->id, "More than one OnFailure= dependencies specified for %s but OnFailureJobMode=isolate set. Refusing.", u->id);
1227 unit_update_cgroup_members_masks(u);
1230 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1232 unit_add_to_dbus_queue(unit_follow_merge(u));
1233 unit_add_to_gc_queue(u);
1238 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1240 unit_add_to_dbus_queue(u);
1241 unit_add_to_gc_queue(u);
1243 log_unit_debug(u->id, "Failed to load configuration for %s: %s",
1244 u->id, strerror(-r));
1249 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1256 /* If the condition list is empty, then it is true */
1260 /* Otherwise, if all of the non-trigger conditions apply and
1261 * if any of the trigger conditions apply (unless there are
1262 * none) we return true */
1263 LIST_FOREACH(conditions, c, first) {
1266 r = condition_test(c);
1268 log_unit_warning(u->id,
1269 "Couldn't determine result for %s=%s%s%s for %s, assuming failed: %s",
1271 c->trigger ? "|" : "",
1272 c->negate ? "!" : "",
1277 log_unit_debug(u->id,
1278 "%s=%s%s%s %s for %s.",
1280 c->trigger ? "|" : "",
1281 c->negate ? "!" : "",
1283 condition_result_to_string(c->result),
1286 if (!c->trigger && r <= 0)
1289 if (c->trigger && triggered <= 0)
1293 return triggered != 0;
1296 static bool unit_condition_test(Unit *u) {
1299 dual_timestamp_get(&u->condition_timestamp);
1300 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1302 return u->condition_result;
1305 static bool unit_assert_test(Unit *u) {
1308 dual_timestamp_get(&u->assert_timestamp);
1309 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1311 return u->assert_result;
1314 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1315 const UnitStatusMessageFormats *format_table;
1319 assert(t < _JOB_TYPE_MAX);
1321 if (t != JOB_START && t != JOB_STOP)
1324 format_table = &UNIT_VTABLE(u)->status_message_formats;
1328 return format_table->starting_stopping[t == JOB_STOP];
1331 _pure_ static const char *unit_get_status_message_format_try_harder(Unit *u, JobType t) {
1336 assert(t < _JOB_TYPE_MAX);
1338 format = unit_get_status_message_format(u, t);
1342 /* Return generic strings */
1344 return "Starting %s.";
1345 else if (t == JOB_STOP)
1346 return "Stopping %s.";
1347 else if (t == JOB_RELOAD)
1348 return "Reloading %s.";
1353 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1358 /* We only print status messages for selected units on
1359 * selected operations. */
1361 format = unit_get_status_message_format(u, t);
1365 DISABLE_WARNING_FORMAT_NONLITERAL;
1366 unit_status_printf(u, "", format);
1370 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1377 if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
1380 if (log_on_console())
1383 /* We log status messages for all units and all operations. */
1385 format = unit_get_status_message_format_try_harder(u, t);
1389 DISABLE_WARNING_FORMAT_NONLITERAL;
1390 snprintf(buf, sizeof(buf), format, unit_description(u));
1394 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1395 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1396 SD_MESSAGE_UNIT_RELOADING;
1398 log_unit_struct(u->id,
1400 LOG_MESSAGE_ID(mid),
1401 LOG_MESSAGE("%s", buf),
1406 * -EBADR: This unit type does not support starting.
1407 * -EALREADY: Unit is already started.
1408 * -EAGAIN: An operation is already in progress. Retry later.
1409 * -ECANCELED: Too many requests for now.
1410 * -EPROTO: Assert failed
1412 int unit_start(Unit *u) {
1413 UnitActiveState state;
1418 if (u->load_state != UNIT_LOADED)
1421 /* If this is already started, then this will succeed. Note
1422 * that this will even succeed if this unit is not startable
1423 * by the user. This is relied on to detect when we need to
1424 * wait for units and when waiting is finished. */
1425 state = unit_active_state(u);
1426 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1429 /* If the conditions failed, don't do anything at all. If we
1430 * already are activating this call might still be useful to
1431 * speed up activation in case there is some hold-off time,
1432 * but we don't want to recheck the condition in that case. */
1433 if (state != UNIT_ACTIVATING &&
1434 !unit_condition_test(u)) {
1435 log_unit_debug(u->id, "Starting of %s requested but condition failed. Not starting unit.", u->id);
1439 /* If the asserts failed, fail the entire job */
1440 if (state != UNIT_ACTIVATING &&
1441 !unit_assert_test(u)) {
1442 log_unit_debug(u->id, "Starting of %s requested but asserts failed.", u->id);
1446 /* Forward to the main object, if we aren't it. */
1447 following = unit_following(u);
1449 log_unit_debug(u->id, "Redirecting start request from %s to %s.", u->id, following->id);
1450 return unit_start(following);
1453 unit_status_log_starting_stopping_reloading(u, JOB_START);
1454 unit_status_print_starting_stopping(u, JOB_START);
1456 /* If it is stopped, but we cannot start it, then fail */
1457 if (!UNIT_VTABLE(u)->start)
1460 /* We don't suppress calls to ->start() here when we are
1461 * already starting, to allow this request to be used as a
1462 * "hurry up" call, for example when the unit is in some "auto
1463 * restart" state where it waits for a holdoff timer to elapse
1464 * before it will start again. */
1466 unit_add_to_dbus_queue(u);
1468 return UNIT_VTABLE(u)->start(u);
1471 bool unit_can_start(Unit *u) {
1474 return !!UNIT_VTABLE(u)->start;
1477 bool unit_can_isolate(Unit *u) {
1480 return unit_can_start(u) &&
1485 * -EBADR: This unit type does not support stopping.
1486 * -EALREADY: Unit is already stopped.
1487 * -EAGAIN: An operation is already in progress. Retry later.
1489 int unit_stop(Unit *u) {
1490 UnitActiveState state;
1495 state = unit_active_state(u);
1496 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1499 if ((following = unit_following(u))) {
1500 log_unit_debug(u->id, "Redirecting stop request from %s to %s.",
1501 u->id, following->id);
1502 return unit_stop(following);
1505 unit_status_log_starting_stopping_reloading(u, JOB_STOP);
1506 unit_status_print_starting_stopping(u, JOB_STOP);
1508 if (!UNIT_VTABLE(u)->stop)
1511 unit_add_to_dbus_queue(u);
1513 return UNIT_VTABLE(u)->stop(u);
1517 * -EBADR: This unit type does not support reloading.
1518 * -ENOEXEC: Unit is not started.
1519 * -EAGAIN: An operation is already in progress. Retry later.
1521 int unit_reload(Unit *u) {
1522 UnitActiveState state;
1527 if (u->load_state != UNIT_LOADED)
1530 if (!unit_can_reload(u))
1533 state = unit_active_state(u);
1534 if (state == UNIT_RELOADING)
1537 if (state != UNIT_ACTIVE) {
1538 log_unit_warning(u->id, "Unit %s cannot be reloaded because it is inactive.",
1543 following = unit_following(u);
1545 log_unit_debug(u->id, "Redirecting reload request from %s to %s.",
1546 u->id, following->id);
1547 return unit_reload(following);
1550 unit_status_log_starting_stopping_reloading(u, JOB_RELOAD);
1552 unit_add_to_dbus_queue(u);
1553 return UNIT_VTABLE(u)->reload(u);
1556 bool unit_can_reload(Unit *u) {
1559 if (!UNIT_VTABLE(u)->reload)
1562 if (!UNIT_VTABLE(u)->can_reload)
1565 return UNIT_VTABLE(u)->can_reload(u);
1568 static void unit_check_unneeded(Unit *u) {
1574 /* If this service shall be shut down when unneeded then do
1577 if (!u->stop_when_unneeded)
1580 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1583 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY], i)
1584 if (unit_active_or_pending(other))
1587 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
1588 if (unit_active_or_pending(other))
1591 SET_FOREACH(other, u->dependencies[UNIT_WANTED_BY], i)
1592 if (unit_active_or_pending(other))
1595 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1596 if (unit_active_or_pending(other))
1599 log_unit_info(u->id, "Unit %s is not needed anymore. Stopping.", u->id);
1601 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1602 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1605 static void unit_check_binds_to(Unit *u) {
1615 if (unit_active_state(u) != UNIT_ACTIVE)
1618 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1622 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1631 log_unit_info(u->id, "Unit %s is bound to inactive service. Stopping, too.", u->id);
1633 /* A unit we need to run is gone. Sniff. Let's stop this. */
1634 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1637 static void retroactively_start_dependencies(Unit *u) {
1642 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1644 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1645 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1646 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1647 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1649 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1650 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1651 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1652 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1654 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1655 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1656 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1657 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1659 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1660 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1661 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1662 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1664 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1665 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1666 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1668 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1669 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1670 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1673 static void retroactively_stop_dependencies(Unit *u) {
1678 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1680 /* Pull down units which are bound to us recursively if enabled */
1681 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1682 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1683 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1686 static void check_unneeded_dependencies(Unit *u) {
1691 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1693 /* Garbage collect services that might not be needed anymore, if enabled */
1694 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1695 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1696 unit_check_unneeded(other);
1697 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1698 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1699 unit_check_unneeded(other);
1700 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1701 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1702 unit_check_unneeded(other);
1703 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1704 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1705 unit_check_unneeded(other);
1706 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1707 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1708 unit_check_unneeded(other);
1709 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1710 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1711 unit_check_unneeded(other);
1714 void unit_start_on_failure(Unit *u) {
1720 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1723 log_unit_info(u->id, "Triggering OnFailure= dependencies of %s.", u->id);
1725 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1728 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, true, NULL, NULL);
1730 log_unit_error_errno(u->id, r, "Failed to enqueue OnFailure= job: %m");
1734 void unit_trigger_notify(Unit *u) {
1740 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1741 if (UNIT_VTABLE(other)->trigger_notify)
1742 UNIT_VTABLE(other)->trigger_notify(other, u);
1745 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1750 assert(os < _UNIT_ACTIVE_STATE_MAX);
1751 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1753 /* Note that this is called for all low-level state changes,
1754 * even if they might map to the same high-level
1755 * UnitActiveState! That means that ns == os is an expected
1756 * behavior here. For example: if a mount point is remounted
1757 * this function will be called too! */
1761 /* Update timestamps for state changes */
1762 if (m->n_reloading <= 0) {
1765 dual_timestamp_get(&ts);
1767 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1768 u->inactive_exit_timestamp = ts;
1769 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1770 u->inactive_enter_timestamp = ts;
1772 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1773 u->active_enter_timestamp = ts;
1774 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1775 u->active_exit_timestamp = ts;
1778 /* Keep track of failed units */
1779 if (ns == UNIT_FAILED)
1780 set_put(u->manager->failed_units, u);
1782 set_remove(u->manager->failed_units, u);
1784 /* Make sure the cgroup is always removed when we become inactive */
1785 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1786 unit_destroy_cgroup_if_empty(u);
1788 /* Note that this doesn't apply to RemainAfterExit services exiting
1789 * successfully, since there's no change of state in that case. Which is
1790 * why it is handled in service_set_state() */
1791 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1794 ec = unit_get_exec_context(u);
1795 if (ec && exec_context_may_touch_console(ec)) {
1796 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1799 if (m->n_on_console == 0)
1800 /* unset no_console_output flag, since the console is free */
1801 m->no_console_output = false;
1810 if (u->job->state == JOB_WAITING)
1812 /* So we reached a different state for this
1813 * job. Let's see if we can run it now if it
1814 * failed previously due to EAGAIN. */
1815 job_add_to_run_queue(u->job);
1817 /* Let's check whether this state change constitutes a
1818 * finished job, or maybe contradicts a running job and
1819 * hence needs to invalidate jobs. */
1821 switch (u->job->type) {
1824 case JOB_VERIFY_ACTIVE:
1826 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1827 job_finish_and_invalidate(u->job, JOB_DONE, true);
1828 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1831 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1832 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1838 case JOB_RELOAD_OR_START:
1840 if (u->job->state == JOB_RUNNING) {
1841 if (ns == UNIT_ACTIVE)
1842 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1843 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1846 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1847 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1855 case JOB_TRY_RESTART:
1857 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1858 job_finish_and_invalidate(u->job, JOB_DONE, true);
1859 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1861 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1867 assert_not_reached("Job type unknown");
1873 if (m->n_reloading <= 0) {
1875 /* If this state change happened without being
1876 * requested by a job, then let's retroactively start
1877 * or stop dependencies. We skip that step when
1878 * deserializing, since we don't want to create any
1879 * additional jobs just because something is already
1883 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1884 retroactively_start_dependencies(u);
1885 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1886 retroactively_stop_dependencies(u);
1889 /* stop unneeded units regardless if going down was expected or not */
1890 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1891 check_unneeded_dependencies(u);
1893 if (ns != os && ns == UNIT_FAILED) {
1894 log_unit_notice(u->id, "Unit %s entered failed state.", u->id);
1895 unit_start_on_failure(u);
1899 /* Some names are special */
1900 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1902 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1903 /* The bus might have just become available,
1904 * hence try to connect to it, if we aren't
1908 if (u->type == UNIT_SERVICE &&
1909 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1910 m->n_reloading <= 0) {
1911 /* Write audit record if we have just finished starting up */
1912 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1916 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1917 manager_send_unit_plymouth(m, u);
1921 /* We don't care about D-Bus here, since we'll get an
1922 * asynchronous notification for it anyway. */
1924 if (u->type == UNIT_SERVICE &&
1925 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1926 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1927 m->n_reloading <= 0) {
1929 /* Hmm, if there was no start record written
1930 * write it now, so that we always have a nice
1933 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1935 if (ns == UNIT_INACTIVE)
1936 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1938 /* Write audit record if we have just finished shutting down */
1939 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
1941 u->in_audit = false;
1945 manager_recheck_journal(m);
1946 unit_trigger_notify(u);
1948 if (u->manager->n_reloading <= 0) {
1949 /* Maybe we finished startup and are now ready for
1950 * being stopped because unneeded? */
1951 unit_check_unneeded(u);
1953 /* Maybe we finished startup, but something we needed
1954 * has vanished? Let's die then. (This happens when
1955 * something BindsTo= to a Type=oneshot unit, as these
1956 * units go directly from starting to inactive,
1957 * without ever entering started.) */
1958 unit_check_binds_to(u);
1961 unit_add_to_dbus_queue(u);
1962 unit_add_to_gc_queue(u);
1965 int unit_watch_pid(Unit *u, pid_t pid) {
1971 /* Watch a specific PID. We only support one or two units
1972 * watching each PID for now, not more. */
1974 r = set_ensure_allocated(&u->pids, NULL);
1978 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
1982 r = hashmap_put(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
1984 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
1988 r = hashmap_put(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
1991 q = set_put(u->pids, LONG_TO_PTR(pid));
1998 void unit_unwatch_pid(Unit *u, pid_t pid) {
2002 hashmap_remove_value(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2003 hashmap_remove_value(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2004 set_remove(u->pids, LONG_TO_PTR(pid));
2007 void unit_unwatch_all_pids(Unit *u) {
2010 while (!set_isempty(u->pids))
2011 unit_unwatch_pid(u, PTR_TO_LONG(set_first(u->pids)));
2017 static int unit_watch_pids_in_path(Unit *u, const char *path) {
2018 _cleanup_closedir_ DIR *d = NULL;
2019 _cleanup_fclose_ FILE *f = NULL;
2025 /* Adds all PIDs from a specific cgroup path to the set of PIDs we watch. */
2027 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2031 while ((r = cg_read_pid(f, &pid)) > 0) {
2032 r = unit_watch_pid(u, pid);
2033 if (r < 0 && ret >= 0)
2036 if (r < 0 && ret >= 0)
2039 } else if (ret >= 0)
2042 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2046 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2047 _cleanup_free_ char *p = NULL;
2049 p = strjoin(path, "/", fn, NULL);
2055 r = unit_watch_pids_in_path(u, p);
2056 if (r < 0 && ret >= 0)
2059 if (r < 0 && ret >= 0)
2062 } else if (ret >= 0)
2068 int unit_watch_all_pids(Unit *u) {
2071 /* Adds all PIDs from our cgroup to the set of PIDs we watch */
2073 if (!u->cgroup_path)
2076 return unit_watch_pids_in_path(u, u->cgroup_path);
2079 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2085 /* Cleans dead PIDs from our list */
2087 SET_FOREACH(e, u->pids, i) {
2088 pid_t pid = PTR_TO_LONG(e);
2090 if (pid == except1 || pid == except2)
2093 if (!pid_is_unwaited(pid))
2094 unit_unwatch_pid(u, pid);
2098 bool unit_job_is_applicable(Unit *u, JobType j) {
2100 assert(j >= 0 && j < _JOB_TYPE_MAX);
2104 case JOB_VERIFY_ACTIVE:
2111 case JOB_TRY_RESTART:
2112 return unit_can_start(u);
2115 return unit_can_reload(u);
2117 case JOB_RELOAD_OR_START:
2118 return unit_can_reload(u) && unit_can_start(u);
2121 assert_not_reached("Invalid job type");
2125 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency) {
2128 switch (dependency) {
2130 case UNIT_REQUIRES_OVERRIDABLE:
2132 case UNIT_REQUISITE:
2133 case UNIT_REQUISITE_OVERRIDABLE:
2136 case UNIT_REQUIRED_BY:
2137 case UNIT_REQUIRED_BY_OVERRIDABLE:
2138 case UNIT_WANTED_BY:
2140 case UNIT_CONSISTS_OF:
2141 case UNIT_REFERENCES:
2142 case UNIT_REFERENCED_BY:
2143 case UNIT_PROPAGATES_RELOAD_TO:
2144 case UNIT_RELOAD_PROPAGATED_FROM:
2145 case UNIT_JOINS_NAMESPACE_OF:
2148 case UNIT_CONFLICTS:
2149 case UNIT_CONFLICTED_BY:
2152 case UNIT_ON_FAILURE:
2154 case UNIT_TRIGGERED_BY:
2155 if (streq_ptr(id, other))
2156 log_unit_warning(id, "Dependency %s=%s dropped from unit %s",
2157 unit_dependency_to_string(dependency), id, other);
2159 log_unit_warning(id, "Dependency %s=%s dropped from unit %s merged into %s",
2160 unit_dependency_to_string(dependency), id,
2164 case _UNIT_DEPENDENCY_MAX:
2165 case _UNIT_DEPENDENCY_INVALID:
2169 assert_not_reached("Invalid dependency type");
2172 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2174 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2175 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2176 [UNIT_REQUIRES_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2177 [UNIT_WANTS] = UNIT_WANTED_BY,
2178 [UNIT_REQUISITE] = UNIT_REQUIRED_BY,
2179 [UNIT_REQUISITE_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2180 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2181 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2182 [UNIT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
2183 [UNIT_REQUIRED_BY_OVERRIDABLE] = _UNIT_DEPENDENCY_INVALID,
2184 [UNIT_WANTED_BY] = _UNIT_DEPENDENCY_INVALID,
2185 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2186 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2187 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2188 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2189 [UNIT_BEFORE] = UNIT_AFTER,
2190 [UNIT_AFTER] = UNIT_BEFORE,
2191 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2192 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2193 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2194 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2195 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2196 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2197 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2198 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2200 int r, q = 0, v = 0, w = 0;
2201 Unit *orig_u = u, *orig_other = other;
2204 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2207 u = unit_follow_merge(u);
2208 other = unit_follow_merge(other);
2210 /* We won't allow dependencies on ourselves. We will not
2211 * consider them an error however. */
2213 maybe_warn_about_dependency(orig_u->id, orig_other->id, d);
2217 r = set_ensure_allocated(&u->dependencies[d], NULL);
2221 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2222 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2227 if (add_reference) {
2228 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2232 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2237 q = set_put(u->dependencies[d], other);
2241 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2242 v = set_put(other->dependencies[inverse_table[d]], u);
2249 if (add_reference) {
2250 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2256 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2261 unit_add_to_dbus_queue(u);
2266 set_remove(u->dependencies[d], other);
2269 set_remove(other->dependencies[inverse_table[d]], u);
2272 set_remove(u->dependencies[UNIT_REFERENCES], other);
2277 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2282 r = unit_add_dependency(u, d, other, add_reference);
2286 r = unit_add_dependency(u, e, other, add_reference);
2293 static const char *resolve_template(Unit *u, const char *name, const char*path, char **p) {
2297 assert(name || path);
2301 name = basename(path);
2303 if (!unit_name_is_template(name)) {
2309 s = unit_name_replace_instance(name, u->instance);
2311 _cleanup_free_ char *i = NULL;
2313 i = unit_name_to_prefix(u->id);
2317 s = unit_name_replace_instance(name, i);
2327 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2330 _cleanup_free_ char *s = NULL;
2333 assert(name || path);
2335 name = resolve_template(u, name, path, &s);
2339 r = manager_load_unit(u->manager, name, path, NULL, &other);
2343 return unit_add_dependency(u, d, other, add_reference);
2346 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2347 _cleanup_free_ char *s = NULL;
2352 assert(name || path);
2354 name = resolve_template(u, name, path, &s);
2358 r = manager_load_unit(u->manager, name, path, NULL, &other);
2362 return unit_add_two_dependencies(u, d, e, other, add_reference);
2365 int unit_add_dependency_by_name_inverse(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2368 _cleanup_free_ char *s = NULL;
2371 assert(name || path);
2373 name = resolve_template(u, name, path, &s);
2377 r = manager_load_unit(u->manager, name, path, NULL, &other);
2381 return unit_add_dependency(other, d, u, add_reference);
2384 int unit_add_two_dependencies_by_name_inverse(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2387 _cleanup_free_ char *s = NULL;
2390 assert(name || path);
2392 name = resolve_template(u, name, path, &s);
2396 r = manager_load_unit(u->manager, name, path, NULL, &other);
2400 r = unit_add_two_dependencies(other, d, e, u, add_reference);
2407 int set_unit_path(const char *p) {
2408 /* This is mostly for debug purposes */
2409 if (setenv("SYSTEMD_UNIT_PATH", p, 0) < 0)
2415 char *unit_dbus_path(Unit *u) {
2421 return unit_dbus_path_from_name(u->id);
2424 char *unit_default_cgroup_path(Unit *u) {
2425 _cleanup_free_ char *escaped = NULL, *slice = NULL;
2430 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2431 return strdup(u->manager->cgroup_root);
2433 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
2434 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
2439 escaped = cg_escape(u->id);
2444 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
2446 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
2449 int unit_add_default_slice(Unit *u, CGroupContext *c) {
2450 _cleanup_free_ char *b = NULL;
2451 const char *slice_name;
2458 if (UNIT_ISSET(u->slice))
2462 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2464 /* Implicitly place all instantiated units in their
2465 * own per-template slice */
2467 prefix = unit_name_to_prefix(u->id);
2471 /* The prefix is already escaped, but it might include
2472 * "-" which has a special meaning for slice units,
2473 * hence escape it here extra. */
2474 escaped = strreplace(prefix, "-", "\\x2d");
2478 if (u->manager->running_as == SYSTEMD_SYSTEM)
2479 b = strjoin("system-", escaped, ".slice", NULL);
2481 b = strappend(escaped, ".slice");
2488 u->manager->running_as == SYSTEMD_SYSTEM
2489 ? SPECIAL_SYSTEM_SLICE
2490 : SPECIAL_ROOT_SLICE;
2492 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2496 unit_ref_set(&u->slice, slice);
2500 const char *unit_slice_name(Unit *u) {
2503 if (!UNIT_ISSET(u->slice))
2506 return UNIT_DEREF(u->slice)->id;
2509 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2510 _cleanup_free_ char *t = NULL;
2517 t = unit_name_change_suffix(u->id, type);
2521 assert(!unit_has_name(u, t));
2523 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2524 assert(r < 0 || *_found != u);
2528 int unit_watch_bus_name(Unit *u, const char *name) {
2532 /* Watch a specific name on the bus. We only support one unit
2533 * watching each name for now. */
2535 return hashmap_put(u->manager->watch_bus, name, u);
2538 void unit_unwatch_bus_name(Unit *u, const char *name) {
2542 hashmap_remove_value(u->manager->watch_bus, name, u);
2545 bool unit_can_serialize(Unit *u) {
2548 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2551 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2558 if (unit_can_serialize(u)) {
2561 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2565 rt = unit_get_exec_runtime(u);
2567 r = exec_runtime_serialize(rt, u, f, fds);
2573 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2574 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2575 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2576 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2577 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2578 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2580 if (dual_timestamp_is_set(&u->condition_timestamp))
2581 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2583 if (dual_timestamp_is_set(&u->assert_timestamp))
2584 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2586 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2589 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2591 if (serialize_jobs) {
2593 fprintf(f, "job\n");
2594 job_serialize(u->job, f, fds);
2598 fprintf(f, "job\n");
2599 job_serialize(u->nop_job, f, fds);
2608 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2619 va_start(ap, format);
2620 vfprintf(f, format, ap);
2626 void unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2632 fprintf(f, "%s=%s\n", key, value);
2635 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2636 ExecRuntime **rt = NULL;
2644 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2646 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2649 char line[LINE_MAX], *l, *v;
2652 if (!fgets(line, sizeof(line), f)) {
2665 k = strcspn(l, "=");
2673 if (streq(l, "job")) {
2675 /* new-style serialized job */
2676 Job *j = job_new_raw(u);
2680 r = job_deserialize(j, f, fds);
2686 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2692 r = job_install_deserialized(j);
2694 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2699 if (j->state == JOB_RUNNING)
2700 u->manager->n_running_jobs++;
2703 JobType type = job_type_from_string(v);
2705 log_debug("Failed to parse job type value %s", v);
2707 u->deserialized_job = type;
2710 } else if (streq(l, "inactive-exit-timestamp")) {
2711 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2713 } else if (streq(l, "active-enter-timestamp")) {
2714 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2716 } else if (streq(l, "active-exit-timestamp")) {
2717 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2719 } else if (streq(l, "inactive-enter-timestamp")) {
2720 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2722 } else if (streq(l, "condition-timestamp")) {
2723 dual_timestamp_deserialize(v, &u->condition_timestamp);
2725 } else if (streq(l, "assert-timestamp")) {
2726 dual_timestamp_deserialize(v, &u->assert_timestamp);
2728 } else if (streq(l, "condition-result")) {
2731 b = parse_boolean(v);
2733 log_debug("Failed to parse condition result value %s", v);
2735 u->condition_result = b;
2739 } else if (streq(l, "assert-result")) {
2742 b = parse_boolean(v);
2744 log_debug("Failed to parse assert result value %s", v);
2746 u->assert_result = b;
2750 } else if (streq(l, "transient")) {
2753 b = parse_boolean(v);
2755 log_debug("Failed to parse transient bool %s", v);
2760 } else if (streq(l, "cgroup")) {
2767 if (u->cgroup_path) {
2770 p = hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2771 log_info("Removing cgroup_path %s from hashmap (%p)",
2773 free(u->cgroup_path);
2777 assert(hashmap_put(u->manager->cgroup_unit, s, u) == 1);
2782 if (unit_can_serialize(u)) {
2784 r = exec_runtime_deserialize_item(rt, u, l, v, fds);
2791 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2798 int unit_add_node_link(Unit *u, const char *what, bool wants) {
2800 _cleanup_free_ char *e = NULL;
2808 /* Adds in links to the device node that this unit is based on */
2810 if (!is_device_path(what))
2813 e = unit_name_from_path(what, ".device");
2817 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2822 r = unit_add_two_dependencies(u, UNIT_AFTER, UNIT_BINDS_TO, device, true);
2827 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2835 int unit_coldplug(Unit *u) {
2840 if (UNIT_VTABLE(u)->coldplug)
2841 if ((r = UNIT_VTABLE(u)->coldplug(u)) < 0)
2845 r = job_coldplug(u->job);
2848 } else if (u->deserialized_job >= 0) {
2850 r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL);
2854 u->deserialized_job = _JOB_TYPE_INVALID;
2860 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
2861 DISABLE_WARNING_FORMAT_NONLITERAL;
2862 manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
2863 status, unit_status_msg_format, unit_description(u));
2867 bool unit_need_daemon_reload(Unit *u) {
2868 _cleanup_strv_free_ char **t = NULL;
2871 unsigned loaded_cnt, current_cnt;
2875 if (u->fragment_path) {
2877 if (stat(u->fragment_path, &st) < 0)
2878 /* What, cannot access this anymore? */
2881 if (u->fragment_mtime > 0 &&
2882 timespec_load(&st.st_mtim) != u->fragment_mtime)
2886 if (u->source_path) {
2888 if (stat(u->source_path, &st) < 0)
2891 if (u->source_mtime > 0 &&
2892 timespec_load(&st.st_mtim) != u->source_mtime)
2896 t = unit_find_dropin_paths(u);
2897 loaded_cnt = strv_length(t);
2898 current_cnt = strv_length(u->dropin_paths);
2900 if (loaded_cnt == current_cnt) {
2901 if (loaded_cnt == 0)
2904 if (strv_overlap(u->dropin_paths, t)) {
2905 STRV_FOREACH(path, u->dropin_paths) {
2907 if (stat(*path, &st) < 0)
2910 if (u->dropin_mtime > 0 &&
2911 timespec_load(&st.st_mtim) > u->dropin_mtime)
2922 void unit_reset_failed(Unit *u) {
2925 if (UNIT_VTABLE(u)->reset_failed)
2926 UNIT_VTABLE(u)->reset_failed(u);
2929 Unit *unit_following(Unit *u) {
2932 if (UNIT_VTABLE(u)->following)
2933 return UNIT_VTABLE(u)->following(u);
2938 bool unit_stop_pending(Unit *u) {
2941 /* This call does check the current state of the unit. It's
2942 * hence useful to be called from state change calls of the
2943 * unit itself, where the state isn't updated yet. This is
2944 * different from unit_inactive_or_pending() which checks both
2945 * the current state and for a queued job. */
2947 return u->job && u->job->type == JOB_STOP;
2950 bool unit_inactive_or_pending(Unit *u) {
2953 /* Returns true if the unit is inactive or going down */
2955 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
2958 if (unit_stop_pending(u))
2964 bool unit_active_or_pending(Unit *u) {
2967 /* Returns true if the unit is active or going up */
2969 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
2973 (u->job->type == JOB_START ||
2974 u->job->type == JOB_RELOAD_OR_START ||
2975 u->job->type == JOB_RESTART))
2981 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
2983 assert(w >= 0 && w < _KILL_WHO_MAX);
2985 assert(signo < _NSIG);
2987 if (!UNIT_VTABLE(u)->kill)
2990 return UNIT_VTABLE(u)->kill(u, w, signo, error);
2993 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
2997 pid_set = set_new(NULL);
3001 /* Exclude the main/control pids from being killed via the cgroup */
3003 r = set_put(pid_set, LONG_TO_PTR(main_pid));
3008 if (control_pid > 0) {
3009 r = set_put(pid_set, LONG_TO_PTR(control_pid));
3021 int unit_kill_common(
3027 sd_bus_error *error) {
3031 if (who == KILL_MAIN && main_pid <= 0) {
3033 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3035 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3038 if (who == KILL_CONTROL && control_pid <= 0) {
3039 if (control_pid < 0)
3040 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3042 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3045 if (who == KILL_CONTROL || who == KILL_ALL)
3046 if (control_pid > 0)
3047 if (kill(control_pid, signo) < 0)
3050 if (who == KILL_MAIN || who == KILL_ALL)
3052 if (kill(main_pid, signo) < 0)
3055 if (who == KILL_ALL && u->cgroup_path) {
3056 _cleanup_set_free_ Set *pid_set = NULL;
3059 /* Exclude the main/control pids from being killed via the cgroup */
3060 pid_set = unit_pid_set(main_pid, control_pid);
3064 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, true, false, pid_set);
3065 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3072 int unit_following_set(Unit *u, Set **s) {
3076 if (UNIT_VTABLE(u)->following_set)
3077 return UNIT_VTABLE(u)->following_set(u, s);
3083 UnitFileState unit_get_unit_file_state(Unit *u) {
3086 if (u->unit_file_state < 0 && u->fragment_path)
3087 u->unit_file_state = unit_file_get_state(
3088 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3089 NULL, basename(u->fragment_path));
3091 return u->unit_file_state;
3094 int unit_get_unit_file_preset(Unit *u) {
3097 if (u->unit_file_preset < 0 && u->fragment_path)
3098 u->unit_file_preset = unit_file_query_preset(
3099 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3100 NULL, basename(u->fragment_path));
3102 return u->unit_file_preset;
3105 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3110 unit_ref_unset(ref);
3113 LIST_PREPEND(refs, u->refs, ref);
3117 void unit_ref_unset(UnitRef *ref) {
3123 LIST_REMOVE(refs, ref->unit->refs, ref);
3127 int unit_patch_contexts(Unit *u) {
3135 /* Patch in the manager defaults into the exec and cgroup
3136 * contexts, _after_ the rest of the settings have been
3139 ec = unit_get_exec_context(u);
3141 /* This only copies in the ones that need memory */
3142 for (i = 0; i < _RLIMIT_MAX; i++)
3143 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3144 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3149 if (u->manager->running_as == SYSTEMD_USER &&
3150 !ec->working_directory) {
3152 r = get_home_dir(&ec->working_directory);
3157 if (u->manager->running_as == SYSTEMD_USER &&
3158 (ec->syscall_whitelist ||
3159 !set_isempty(ec->syscall_filter) ||
3160 !set_isempty(ec->syscall_archs) ||
3161 ec->address_families_whitelist ||
3162 !set_isempty(ec->address_families)))
3163 ec->no_new_privileges = true;
3165 if (ec->private_devices)
3166 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3169 cc = unit_get_cgroup_context(u);
3173 ec->private_devices &&
3174 cc->device_policy == CGROUP_AUTO)
3175 cc->device_policy = CGROUP_CLOSED;
3181 ExecContext *unit_get_exec_context(Unit *u) {
3188 offset = UNIT_VTABLE(u)->exec_context_offset;
3192 return (ExecContext*) ((uint8_t*) u + offset);
3195 KillContext *unit_get_kill_context(Unit *u) {
3202 offset = UNIT_VTABLE(u)->kill_context_offset;
3206 return (KillContext*) ((uint8_t*) u + offset);
3209 CGroupContext *unit_get_cgroup_context(Unit *u) {
3215 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3219 return (CGroupContext*) ((uint8_t*) u + offset);
3222 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3228 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3232 return *(ExecRuntime**) ((uint8_t*) u + offset);
3235 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3236 if (u->manager->running_as == SYSTEMD_USER) {
3239 if (mode == UNIT_PERSISTENT && !transient)
3240 r = user_config_home(dir);
3242 r = user_runtime_dir(dir);
3249 if (mode == UNIT_PERSISTENT && !transient)
3250 *dir = strdup("/etc/systemd/system");
3252 *dir = strdup("/run/systemd/system");
3259 static int unit_drop_in_file(Unit *u,
3260 UnitSetPropertiesMode mode, const char *name, char **p, char **q) {
3261 _cleanup_free_ char *dir = NULL;
3266 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3270 return drop_in_file(dir, u->id, 50, name, p, q);
3273 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3275 _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
3280 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3283 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3287 r = write_drop_in(dir, u->id, 50, name, data);
3291 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3295 r = strv_extend(&u->dropin_paths, q);
3299 strv_sort(u->dropin_paths);
3300 strv_uniq(u->dropin_paths);
3302 u->dropin_mtime = now(CLOCK_REALTIME);
3307 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3308 _cleanup_free_ char *p = NULL;
3316 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3319 va_start(ap, format);
3320 r = vasprintf(&p, format, ap);
3326 return unit_write_drop_in(u, mode, name, p);
3329 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3330 _cleanup_free_ char *ndata = NULL;
3336 if (!UNIT_VTABLE(u)->private_section)
3339 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3342 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3346 return unit_write_drop_in(u, mode, name, ndata);
3349 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3350 _cleanup_free_ char *p = NULL;
3358 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3361 va_start(ap, format);
3362 r = vasprintf(&p, format, ap);
3368 return unit_write_drop_in_private(u, mode, name, p);
3371 int unit_remove_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name) {
3372 _cleanup_free_ char *p = NULL, *q = NULL;
3377 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3380 r = unit_drop_in_file(u, mode, name, &p, &q);
3385 r = errno == ENOENT ? 0 : -errno;
3393 int unit_make_transient(Unit *u) {
3398 u->load_state = UNIT_STUB;
3400 u->transient = true;
3402 free(u->fragment_path);
3403 u->fragment_path = NULL;
3405 if (u->manager->running_as == SYSTEMD_USER) {
3406 _cleanup_free_ char *c = NULL;
3408 r = user_runtime_dir(&c);
3414 u->fragment_path = strjoin(c, "/", u->id, NULL);
3415 if (!u->fragment_path)
3420 u->fragment_path = strappend("/run/systemd/system/", u->id);
3421 if (!u->fragment_path)
3424 mkdir_p("/run/systemd/system", 0755);
3427 return write_string_file_atomic_label(u->fragment_path, "# Transient stub");
3430 int unit_kill_context(
3436 bool main_pid_alien) {
3438 int sig, wait_for_exit = false, r;
3443 if (c->kill_mode == KILL_NONE)
3453 case KILL_TERMINATE:
3454 sig = c->kill_signal;
3457 assert_not_reached("KillOperation unknown");
3461 r = kill_and_sigcont(main_pid, sig);
3463 if (r < 0 && r != -ESRCH) {
3464 _cleanup_free_ char *comm = NULL;
3465 get_process_comm(main_pid, &comm);
3467 log_unit_warning_errno(u->id, r, "Failed to kill main process " PID_FMT " (%s): %m", main_pid, strna(comm));
3469 if (!main_pid_alien)
3470 wait_for_exit = true;
3472 if (c->send_sighup && k != KILL_KILL)
3473 kill(main_pid, SIGHUP);
3477 if (control_pid > 0) {
3478 r = kill_and_sigcont(control_pid, sig);
3480 if (r < 0 && r != -ESRCH) {
3481 _cleanup_free_ char *comm = NULL;
3482 get_process_comm(control_pid, &comm);
3484 log_unit_warning_errno(u->id, r, "Failed to kill control process " PID_FMT " (%s): %m", control_pid, strna(comm));
3486 wait_for_exit = true;
3488 if (c->send_sighup && k != KILL_KILL)
3489 kill(control_pid, SIGHUP);
3493 if ((c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL)) && u->cgroup_path) {
3494 _cleanup_set_free_ Set *pid_set = NULL;
3496 /* Exclude the main/control pids from being killed via the cgroup */
3497 pid_set = unit_pid_set(main_pid, control_pid);
3501 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, true, false, pid_set);
3503 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3504 log_unit_warning_errno(u->id, r, "Failed to kill control group: %m");
3507 /* FIXME: For now, we will not wait for the
3508 * cgroup members to die, simply because
3509 * cgroup notification is unreliable. It
3510 * doesn't work at all in containers, and
3511 * outside of containers it can be confused
3512 * easily by leaving directories in the
3515 /* wait_for_exit = true; */
3517 if (c->send_sighup && k != KILL_KILL) {
3520 pid_set = unit_pid_set(main_pid, control_pid);
3524 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3529 return wait_for_exit;
3532 int unit_require_mounts_for(Unit *u, const char *path) {
3533 char prefix[strlen(path) + 1], *p;
3539 /* Registers a unit for requiring a certain path and all its
3540 * prefixes. We keep a simple array of these paths in the
3541 * unit, since its usually short. However, we build a prefix
3542 * table for all possible prefixes so that new appearing mount
3543 * units can easily determine which units to make themselves a
3546 if (!path_is_absolute(path))
3553 path_kill_slashes(p);
3555 if (!path_is_safe(p)) {
3560 if (strv_contains(u->requires_mounts_for, p)) {
3565 r = strv_consume(&u->requires_mounts_for, p);
3569 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3572 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3576 if (!u->manager->units_requiring_mounts_for) {
3577 u->manager->units_requiring_mounts_for = hashmap_new(&string_hash_ops);
3578 if (!u->manager->units_requiring_mounts_for)
3592 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3608 int unit_setup_exec_runtime(Unit *u) {
3614 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3617 /* Check if there already is an ExecRuntime for this unit? */
3618 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3622 /* Try to get it from somebody else */
3623 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3625 *rt = unit_get_exec_runtime(other);
3627 exec_runtime_ref(*rt);
3632 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3635 static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
3636 [UNIT_ACTIVE] = "active",
3637 [UNIT_RELOADING] = "reloading",
3638 [UNIT_INACTIVE] = "inactive",
3639 [UNIT_FAILED] = "failed",
3640 [UNIT_ACTIVATING] = "activating",
3641 [UNIT_DEACTIVATING] = "deactivating"
3644 DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState);