1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
26 #include <sys/timerfd.h>
33 #include "sd-messages.h"
38 #include "path-util.h"
39 #include "load-fragment.h"
40 #include "load-dropin.h"
42 #include "unit-name.h"
43 #include "dbus-unit.h"
45 #include "cgroup-util.h"
49 #include "fileio-label.h"
50 #include "bus-common-errors.h"
56 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
57 [UNIT_SERVICE] = &service_vtable,
58 [UNIT_SOCKET] = &socket_vtable,
59 [UNIT_BUSNAME] = &busname_vtable,
60 [UNIT_TARGET] = &target_vtable,
61 [UNIT_SNAPSHOT] = &snapshot_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable
72 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency);
74 Unit *unit_new(Manager *m, size_t size) {
78 assert(size >= sizeof(Unit));
84 u->names = set_new(&string_hash_ops);
91 u->type = _UNIT_TYPE_INVALID;
92 u->deserialized_job = _JOB_TYPE_INVALID;
93 u->default_dependencies = true;
94 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
95 u->unit_file_preset = -1;
96 u->on_failure_job_mode = JOB_REPLACE;
101 bool unit_has_name(Unit *u, const char *name) {
105 return !!set_get(u->names, (char*) name);
108 static void unit_init(Unit *u) {
115 assert(u->type >= 0);
117 cc = unit_get_cgroup_context(u);
119 cgroup_context_init(cc);
121 /* Copy in the manager defaults into the cgroup
122 * context, _before_ the rest of the settings have
123 * been initialized */
125 cc->cpu_accounting = u->manager->default_cpu_accounting;
126 cc->blockio_accounting = u->manager->default_blockio_accounting;
127 cc->memory_accounting = u->manager->default_memory_accounting;
130 ec = unit_get_exec_context(u);
132 exec_context_init(ec);
134 kc = unit_get_kill_context(u);
136 kill_context_init(kc);
138 if (UNIT_VTABLE(u)->init)
139 UNIT_VTABLE(u)->init(u);
142 int unit_add_name(Unit *u, const char *text) {
143 _cleanup_free_ char *s = NULL, *i = NULL;
150 if (unit_name_is_template(text)) {
155 s = unit_name_replace_instance(text, u->instance);
161 if (!unit_name_is_valid(s, TEMPLATE_INVALID))
164 assert_se((t = unit_name_to_type(s)) >= 0);
166 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
169 r = unit_name_to_instance(s, &i);
173 if (i && unit_vtable[t]->no_instances)
176 /* Ensure that this unit is either instanced or not instanced,
178 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
181 if (unit_vtable[t]->no_alias &&
182 !set_isempty(u->names) &&
183 !set_get(u->names, s))
186 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
189 r = set_put(u->names, s);
197 r = hashmap_put(u->manager->units, s, u);
199 set_remove(u->names, s);
203 if (u->type == _UNIT_TYPE_INVALID) {
208 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
217 unit_add_to_dbus_queue(u);
221 int unit_choose_id(Unit *u, const char *name) {
222 _cleanup_free_ char *t = NULL;
229 if (unit_name_is_template(name)) {
234 t = unit_name_replace_instance(name, u->instance);
241 /* Selects one of the names of this unit as the id */
242 s = set_get(u->names, (char*) name);
246 r = unit_name_to_instance(s, &i);
255 unit_add_to_dbus_queue(u);
260 int unit_set_description(Unit *u, const char *description) {
265 if (isempty(description))
268 s = strdup(description);
273 free(u->description);
276 unit_add_to_dbus_queue(u);
280 bool unit_check_gc(Unit *u) {
281 UnitActiveState state;
290 state = unit_active_state(u);
292 /* If the unit is inactive and failed and no job is queued for
293 * it, then release its runtime resources */
294 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
295 UNIT_VTABLE(u)->release_resources)
296 UNIT_VTABLE(u)->release_resources(u);
298 /* But we keep the unit object around for longer when it is
299 * referenced or configured to not be gc'ed */
300 if (state != UNIT_INACTIVE)
303 if (UNIT_VTABLE(u)->no_gc)
312 if (UNIT_VTABLE(u)->check_gc)
313 if (UNIT_VTABLE(u)->check_gc(u))
319 void unit_add_to_load_queue(Unit *u) {
321 assert(u->type != _UNIT_TYPE_INVALID);
323 if (u->load_state != UNIT_STUB || u->in_load_queue)
326 LIST_PREPEND(load_queue, u->manager->load_queue, u);
327 u->in_load_queue = true;
330 void unit_add_to_cleanup_queue(Unit *u) {
333 if (u->in_cleanup_queue)
336 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
337 u->in_cleanup_queue = true;
340 void unit_add_to_gc_queue(Unit *u) {
343 if (u->in_gc_queue || u->in_cleanup_queue)
346 if (unit_check_gc(u))
349 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
350 u->in_gc_queue = true;
352 u->manager->n_in_gc_queue ++;
355 void unit_add_to_dbus_queue(Unit *u) {
357 assert(u->type != _UNIT_TYPE_INVALID);
359 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
362 /* Shortcut things if nobody cares */
363 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
364 set_isempty(u->manager->private_buses)) {
365 u->sent_dbus_new_signal = true;
369 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
370 u->in_dbus_queue = true;
373 static void bidi_set_free(Unit *u, Set *s) {
379 /* Frees the set and makes sure we are dropped from the
380 * inverse pointers */
382 SET_FOREACH(other, s, i) {
385 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
386 set_remove(other->dependencies[d], u);
388 unit_add_to_gc_queue(other);
394 static void unit_remove_transient(Unit *u) {
402 if (u->fragment_path)
403 unlink(u->fragment_path);
405 STRV_FOREACH(i, u->dropin_paths) {
406 _cleanup_free_ char *p = NULL;
411 r = path_get_parent(*i, &p);
417 static void unit_free_requires_mounts_for(Unit *u) {
420 STRV_FOREACH(j, u->requires_mounts_for) {
421 char s[strlen(*j) + 1];
423 PATH_FOREACH_PREFIX_MORE(s, *j) {
427 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
433 if (set_isempty(x)) {
434 hashmap_remove(u->manager->units_requiring_mounts_for, y);
441 strv_free(u->requires_mounts_for);
442 u->requires_mounts_for = NULL;
445 static void unit_done(Unit *u) {
454 if (UNIT_VTABLE(u)->done)
455 UNIT_VTABLE(u)->done(u);
457 ec = unit_get_exec_context(u);
459 exec_context_done(ec);
461 cc = unit_get_cgroup_context(u);
463 cgroup_context_done(cc);
466 void unit_free(Unit *u) {
473 if (u->manager->n_reloading <= 0)
474 unit_remove_transient(u);
476 bus_unit_send_removed_signal(u);
480 unit_free_requires_mounts_for(u);
482 SET_FOREACH(t, u->names, i)
483 hashmap_remove_value(u->manager->units, t, u);
497 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
498 bidi_set_free(u, u->dependencies[d]);
500 if (u->type != _UNIT_TYPE_INVALID)
501 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
503 if (u->in_load_queue)
504 LIST_REMOVE(load_queue, u->manager->load_queue, u);
506 if (u->in_dbus_queue)
507 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
509 if (u->in_cleanup_queue)
510 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
512 if (u->in_gc_queue) {
513 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
514 u->manager->n_in_gc_queue--;
517 if (u->in_cgroup_queue)
518 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
520 if (u->cgroup_path) {
521 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
522 free(u->cgroup_path);
525 set_remove(u->manager->failed_units, u);
526 set_remove(u->manager->startup_units, u);
528 free(u->description);
529 strv_free(u->documentation);
530 free(u->fragment_path);
531 free(u->source_path);
532 strv_free(u->dropin_paths);
535 free(u->job_timeout_reboot_arg);
537 set_free_free(u->names);
539 unit_unwatch_all_pids(u);
541 condition_free_list(u->conditions);
542 condition_free_list(u->asserts);
544 unit_ref_unset(&u->slice);
547 unit_ref_unset(u->refs);
552 UnitActiveState unit_active_state(Unit *u) {
555 if (u->load_state == UNIT_MERGED)
556 return unit_active_state(unit_follow_merge(u));
558 /* After a reload it might happen that a unit is not correctly
559 * loaded but still has a process around. That's why we won't
560 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
562 return UNIT_VTABLE(u)->active_state(u);
565 const char* unit_sub_state_to_string(Unit *u) {
568 return UNIT_VTABLE(u)->sub_state_to_string(u);
571 static int complete_move(Set **s, Set **other) {
581 r = set_move(*s, *other);
592 static int merge_names(Unit *u, Unit *other) {
600 r = complete_move(&u->names, &other->names);
604 set_free_free(other->names);
608 SET_FOREACH(t, u->names, i)
609 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
614 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
619 assert(d < _UNIT_DEPENDENCY_MAX);
622 * If u does not have this dependency set allocated, there is no need
623 * to reserve anything. In that case other's set will be transferred
624 * as a whole to u by complete_move().
626 if (!u->dependencies[d])
629 /* merge_dependencies() will skip a u-on-u dependency */
630 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
632 return set_reserve(u->dependencies[d], n_reserve);
635 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
642 assert(d < _UNIT_DEPENDENCY_MAX);
644 /* Fix backwards pointers */
645 SET_FOREACH(back, other->dependencies[d], i) {
648 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
649 /* Do not add dependencies between u and itself */
651 if (set_remove(back->dependencies[k], other))
652 maybe_warn_about_dependency(u->id, other_id, k);
654 r = set_remove_and_put(back->dependencies[k], other, u);
656 set_remove(back->dependencies[k], other);
658 assert(r >= 0 || r == -ENOENT);
663 /* Also do not move dependencies on u to itself */
664 back = set_remove(other->dependencies[d], u);
666 maybe_warn_about_dependency(u->id, other_id, d);
668 /* The move cannot fail. The caller must have performed a reservation. */
669 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
671 set_free(other->dependencies[d]);
672 other->dependencies[d] = NULL;
675 int unit_merge(Unit *u, Unit *other) {
677 const char *other_id = NULL;
682 assert(u->manager == other->manager);
683 assert(u->type != _UNIT_TYPE_INVALID);
685 other = unit_follow_merge(other);
690 if (u->type != other->type)
693 if (!u->instance != !other->instance)
696 if (other->load_state != UNIT_STUB &&
697 other->load_state != UNIT_NOT_FOUND)
706 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
710 other_id = strdupa(other->id);
712 /* Make reservations to ensure merge_dependencies() won't fail */
713 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
714 r = reserve_dependencies(u, other, d);
716 * We don't rollback reservations if we fail. We don't have
717 * a way to undo reservations. A reservation is not a leak.
724 r = merge_names(u, other);
728 /* Redirect all references */
730 unit_ref_set(other->refs, u);
732 /* Merge dependencies */
733 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
734 merge_dependencies(u, other, other_id, d);
736 other->load_state = UNIT_MERGED;
737 other->merged_into = u;
739 /* If there is still some data attached to the other node, we
740 * don't need it anymore, and can free it. */
741 if (other->load_state != UNIT_STUB)
742 if (UNIT_VTABLE(other)->done)
743 UNIT_VTABLE(other)->done(other);
745 unit_add_to_dbus_queue(u);
746 unit_add_to_cleanup_queue(other);
751 int unit_merge_by_name(Unit *u, const char *name) {
754 _cleanup_free_ char *s = NULL;
759 if (unit_name_is_template(name)) {
763 s = unit_name_replace_instance(name, u->instance);
770 other = manager_get_unit(u->manager, name);
772 r = unit_add_name(u, name);
774 r = unit_merge(u, other);
779 Unit* unit_follow_merge(Unit *u) {
782 while (u->load_state == UNIT_MERGED)
783 assert_se(u = u->merged_into);
788 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
794 if (c->working_directory) {
795 r = unit_require_mounts_for(u, c->working_directory);
800 if (c->root_directory) {
801 r = unit_require_mounts_for(u, c->root_directory);
806 if (u->manager->running_as != SYSTEMD_SYSTEM)
809 if (c->private_tmp) {
810 r = unit_require_mounts_for(u, "/tmp");
814 r = unit_require_mounts_for(u, "/var/tmp");
819 if (c->std_output != EXEC_OUTPUT_KMSG &&
820 c->std_output != EXEC_OUTPUT_SYSLOG &&
821 c->std_output != EXEC_OUTPUT_JOURNAL &&
822 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
823 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
824 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
825 c->std_error != EXEC_OUTPUT_KMSG &&
826 c->std_error != EXEC_OUTPUT_SYSLOG &&
827 c->std_error != EXEC_OUTPUT_JOURNAL &&
828 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
829 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
830 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
833 /* If syslog or kernel logging is requested, make sure our own
834 * logging daemon is run first. */
836 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
843 const char *unit_description(Unit *u) {
847 return u->description;
852 void unit_dump(Unit *u, FILE *f, const char *prefix) {
858 timestamp1[FORMAT_TIMESTAMP_MAX],
859 timestamp2[FORMAT_TIMESTAMP_MAX],
860 timestamp3[FORMAT_TIMESTAMP_MAX],
861 timestamp4[FORMAT_TIMESTAMP_MAX],
862 timespan[FORMAT_TIMESPAN_MAX];
864 _cleanup_set_free_ Set *following_set = NULL;
868 assert(u->type >= 0);
870 prefix = strempty(prefix);
871 prefix2 = strjoina(prefix, "\t");
875 "%s\tDescription: %s\n"
877 "%s\tUnit Load State: %s\n"
878 "%s\tUnit Active State: %s\n"
879 "%s\tInactive Exit Timestamp: %s\n"
880 "%s\tActive Enter Timestamp: %s\n"
881 "%s\tActive Exit Timestamp: %s\n"
882 "%s\tInactive Enter Timestamp: %s\n"
883 "%s\tGC Check Good: %s\n"
884 "%s\tNeed Daemon Reload: %s\n"
885 "%s\tTransient: %s\n"
888 "%s\tCGroup realized: %s\n"
889 "%s\tCGroup mask: 0x%x\n"
890 "%s\tCGroup members mask: 0x%x\n",
892 prefix, unit_description(u),
893 prefix, strna(u->instance),
894 prefix, unit_load_state_to_string(u->load_state),
895 prefix, unit_active_state_to_string(unit_active_state(u)),
896 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
897 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
898 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
899 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
900 prefix, yes_no(unit_check_gc(u)),
901 prefix, yes_no(unit_need_daemon_reload(u)),
902 prefix, yes_no(u->transient),
903 prefix, strna(unit_slice_name(u)),
904 prefix, strna(u->cgroup_path),
905 prefix, yes_no(u->cgroup_realized),
906 prefix, u->cgroup_realized_mask,
907 prefix, u->cgroup_members_mask);
909 SET_FOREACH(t, u->names, i)
910 fprintf(f, "%s\tName: %s\n", prefix, t);
912 STRV_FOREACH(j, u->documentation)
913 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
915 following = unit_following(u);
917 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
919 r = unit_following_set(u, &following_set);
923 SET_FOREACH(other, following_set, i)
924 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
927 if (u->fragment_path)
928 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
931 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
933 STRV_FOREACH(j, u->dropin_paths)
934 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
936 if (u->job_timeout > 0)
937 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
939 if (u->job_timeout_action != FAILURE_ACTION_NONE)
940 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
942 if (u->job_timeout_reboot_arg)
943 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
945 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
946 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
948 if (dual_timestamp_is_set(&u->condition_timestamp))
950 "%s\tCondition Timestamp: %s\n"
951 "%s\tCondition Result: %s\n",
952 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
953 prefix, yes_no(u->condition_result));
955 if (dual_timestamp_is_set(&u->assert_timestamp))
957 "%s\tAssert Timestamp: %s\n"
958 "%s\tAssert Result: %s\n",
959 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
960 prefix, yes_no(u->assert_result));
962 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
965 SET_FOREACH(other, u->dependencies[d], i)
966 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
969 if (!strv_isempty(u->requires_mounts_for)) {
971 "%s\tRequiresMountsFor:", prefix);
973 STRV_FOREACH(j, u->requires_mounts_for)
974 fprintf(f, " %s", *j);
979 if (u->load_state == UNIT_LOADED) {
982 "%s\tStopWhenUnneeded: %s\n"
983 "%s\tRefuseManualStart: %s\n"
984 "%s\tRefuseManualStop: %s\n"
985 "%s\tDefaultDependencies: %s\n"
986 "%s\tOnFailureJobMode: %s\n"
987 "%s\tIgnoreOnIsolate: %s\n"
988 "%s\tIgnoreOnSnapshot: %s\n",
989 prefix, yes_no(u->stop_when_unneeded),
990 prefix, yes_no(u->refuse_manual_start),
991 prefix, yes_no(u->refuse_manual_stop),
992 prefix, yes_no(u->default_dependencies),
993 prefix, job_mode_to_string(u->on_failure_job_mode),
994 prefix, yes_no(u->ignore_on_isolate),
995 prefix, yes_no(u->ignore_on_snapshot));
997 if (UNIT_VTABLE(u)->dump)
998 UNIT_VTABLE(u)->dump(u, f, prefix2);
1000 } else if (u->load_state == UNIT_MERGED)
1002 "%s\tMerged into: %s\n",
1003 prefix, u->merged_into->id);
1004 else if (u->load_state == UNIT_ERROR)
1005 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1009 job_dump(u->job, f, prefix2);
1012 job_dump(u->nop_job, f, prefix2);
1016 /* Common implementation for multiple backends */
1017 int unit_load_fragment_and_dropin(Unit *u) {
1022 /* Load a .{service,socket,...} file */
1023 r = unit_load_fragment(u);
1027 if (u->load_state == UNIT_STUB)
1030 /* Load drop-in directory data */
1031 r = unit_load_dropin(unit_follow_merge(u));
1038 /* Common implementation for multiple backends */
1039 int unit_load_fragment_and_dropin_optional(Unit *u) {
1044 /* Same as unit_load_fragment_and_dropin(), but whether
1045 * something can be loaded or not doesn't matter. */
1047 /* Load a .service file */
1048 r = unit_load_fragment(u);
1052 if (u->load_state == UNIT_STUB)
1053 u->load_state = UNIT_LOADED;
1055 /* Load drop-in directory data */
1056 r = unit_load_dropin(unit_follow_merge(u));
1063 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1067 if (target->type != UNIT_TARGET)
1070 /* Only add the dependency if both units are loaded, so that
1071 * that loop check below is reliable */
1072 if (u->load_state != UNIT_LOADED ||
1073 target->load_state != UNIT_LOADED)
1076 /* If either side wants no automatic dependencies, then let's
1078 if (!u->default_dependencies ||
1079 !target->default_dependencies)
1082 /* Don't create loops */
1083 if (set_get(target->dependencies[UNIT_BEFORE], u))
1086 return unit_add_dependency(target, UNIT_AFTER, u, true);
1089 static int unit_add_target_dependencies(Unit *u) {
1091 static const UnitDependency deps[] = {
1093 UNIT_REQUIRED_BY_OVERRIDABLE,
1105 for (k = 0; k < ELEMENTSOF(deps); k++)
1106 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1107 r = unit_add_default_target_dependency(u, target);
1115 static int unit_add_slice_dependencies(Unit *u) {
1118 if (!unit_get_cgroup_context(u))
1121 if (UNIT_ISSET(u->slice))
1122 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_WANTS, UNIT_DEREF(u->slice), true);
1124 if (streq(u->id, SPECIAL_ROOT_SLICE))
1127 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, SPECIAL_ROOT_SLICE, NULL, true);
1130 static int unit_add_mount_dependencies(Unit *u) {
1136 STRV_FOREACH(i, u->requires_mounts_for) {
1137 char prefix[strlen(*i) + 1];
1139 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1142 r = manager_get_unit_by_path(u->manager, prefix, ".mount", &m);
1150 if (m->load_state != UNIT_LOADED)
1153 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1157 if (m->fragment_path) {
1158 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1168 static int unit_add_startup_units(Unit *u) {
1172 c = unit_get_cgroup_context(u);
1176 if (c->startup_cpu_shares == (unsigned long) -1 &&
1177 c->startup_blockio_weight == (unsigned long) -1)
1180 r = set_put(u->manager->startup_units, u);
1187 int unit_load(Unit *u) {
1192 if (u->in_load_queue) {
1193 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1194 u->in_load_queue = false;
1197 if (u->type == _UNIT_TYPE_INVALID)
1200 if (u->load_state != UNIT_STUB)
1203 if (UNIT_VTABLE(u)->load) {
1204 r = UNIT_VTABLE(u)->load(u);
1209 if (u->load_state == UNIT_STUB) {
1214 if (u->load_state == UNIT_LOADED) {
1216 r = unit_add_target_dependencies(u);
1220 r = unit_add_slice_dependencies(u);
1224 r = unit_add_mount_dependencies(u);
1228 r = unit_add_startup_units(u);
1232 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1233 log_unit_error(u->id, "More than one OnFailure= dependencies specified for %s but OnFailureJobMode=isolate set. Refusing.", u->id);
1238 unit_update_cgroup_members_masks(u);
1241 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1243 unit_add_to_dbus_queue(unit_follow_merge(u));
1244 unit_add_to_gc_queue(u);
1249 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1251 unit_add_to_dbus_queue(u);
1252 unit_add_to_gc_queue(u);
1254 log_unit_debug(u->id, "Failed to load configuration for %s: %s",
1255 u->id, strerror(-r));
1260 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1267 /* If the condition list is empty, then it is true */
1271 /* Otherwise, if all of the non-trigger conditions apply and
1272 * if any of the trigger conditions apply (unless there are
1273 * none) we return true */
1274 LIST_FOREACH(conditions, c, first) {
1277 r = condition_test(c);
1279 log_unit_warning(u->id,
1280 "Couldn't determine result for %s=%s%s%s for %s, assuming failed: %s",
1282 c->trigger ? "|" : "",
1283 c->negate ? "!" : "",
1288 log_unit_debug(u->id,
1289 "%s=%s%s%s %s for %s.",
1291 c->trigger ? "|" : "",
1292 c->negate ? "!" : "",
1294 condition_result_to_string(c->result),
1297 if (!c->trigger && r <= 0)
1300 if (c->trigger && triggered <= 0)
1304 return triggered != 0;
1307 static bool unit_condition_test(Unit *u) {
1310 dual_timestamp_get(&u->condition_timestamp);
1311 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1313 return u->condition_result;
1316 static bool unit_assert_test(Unit *u) {
1319 dual_timestamp_get(&u->assert_timestamp);
1320 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1322 return u->assert_result;
1325 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1326 const UnitStatusMessageFormats *format_table;
1330 assert(t < _JOB_TYPE_MAX);
1332 if (t != JOB_START && t != JOB_STOP)
1335 format_table = &UNIT_VTABLE(u)->status_message_formats;
1339 return format_table->starting_stopping[t == JOB_STOP];
1342 _pure_ static const char *unit_get_status_message_format_try_harder(Unit *u, JobType t) {
1347 assert(t < _JOB_TYPE_MAX);
1349 format = unit_get_status_message_format(u, t);
1353 /* Return generic strings */
1355 return "Starting %s.";
1356 else if (t == JOB_STOP)
1357 return "Stopping %s.";
1358 else if (t == JOB_RELOAD)
1359 return "Reloading %s.";
1364 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1369 /* We only print status messages for selected units on
1370 * selected operations. */
1372 format = unit_get_status_message_format(u, t);
1376 DISABLE_WARNING_FORMAT_NONLITERAL;
1377 unit_status_printf(u, "", format);
1381 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1388 if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
1391 if (log_on_console())
1394 /* We log status messages for all units and all operations. */
1396 format = unit_get_status_message_format_try_harder(u, t);
1400 DISABLE_WARNING_FORMAT_NONLITERAL;
1401 snprintf(buf, sizeof(buf), format, unit_description(u));
1404 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1405 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1406 SD_MESSAGE_UNIT_RELOADING;
1408 log_unit_struct(u->id,
1410 LOG_MESSAGE_ID(mid),
1411 LOG_MESSAGE("%s", buf),
1416 * -EBADR: This unit type does not support starting.
1417 * -EALREADY: Unit is already started.
1418 * -EAGAIN: An operation is already in progress. Retry later.
1419 * -ECANCELED: Too many requests for now.
1420 * -EPROTO: Assert failed
1422 int unit_start(Unit *u) {
1423 UnitActiveState state;
1429 if (u->load_state != UNIT_LOADED)
1432 /* If this is already started, then this will succeed. Note
1433 * that this will even succeed if this unit is not startable
1434 * by the user. This is relied on to detect when we need to
1435 * wait for units and when waiting is finished. */
1436 state = unit_active_state(u);
1437 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1440 /* If the conditions failed, don't do anything at all. If we
1441 * already are activating this call might still be useful to
1442 * speed up activation in case there is some hold-off time,
1443 * but we don't want to recheck the condition in that case. */
1444 if (state != UNIT_ACTIVATING &&
1445 !unit_condition_test(u)) {
1446 log_unit_debug(u->id, "Starting of %s requested but condition failed. Not starting unit.", u->id);
1450 /* If the asserts failed, fail the entire job */
1451 if (state != UNIT_ACTIVATING &&
1452 !unit_assert_test(u)) {
1453 log_unit_debug(u->id, "Starting of %s requested but asserts failed.", u->id);
1457 /* Forward to the main object, if we aren't it. */
1458 following = unit_following(u);
1460 log_unit_debug(u->id, "Redirecting start request from %s to %s.", u->id, following->id);
1461 return unit_start(following);
1464 if (UNIT_VTABLE(u)->supported && !UNIT_VTABLE(u)->supported(u->manager))
1467 /* If it is stopped, but we cannot start it, then fail */
1468 if (!UNIT_VTABLE(u)->start)
1471 /* We don't suppress calls to ->start() here when we are
1472 * already starting, to allow this request to be used as a
1473 * "hurry up" call, for example when the unit is in some "auto
1474 * restart" state where it waits for a holdoff timer to elapse
1475 * before it will start again. */
1477 unit_add_to_dbus_queue(u);
1479 r = UNIT_VTABLE(u)->start(u);
1483 /* Log if the start function actually did something */
1484 unit_status_log_starting_stopping_reloading(u, JOB_START);
1485 unit_status_print_starting_stopping(u, JOB_START);
1489 bool unit_can_start(Unit *u) {
1492 return !!UNIT_VTABLE(u)->start;
1495 bool unit_can_isolate(Unit *u) {
1498 return unit_can_start(u) &&
1503 * -EBADR: This unit type does not support stopping.
1504 * -EALREADY: Unit is already stopped.
1505 * -EAGAIN: An operation is already in progress. Retry later.
1507 int unit_stop(Unit *u) {
1508 UnitActiveState state;
1514 state = unit_active_state(u);
1515 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1518 following = unit_following(u);
1520 log_unit_debug(u->id, "Redirecting stop request from %s to %s.", u->id, following->id);
1521 return unit_stop(following);
1524 if (!UNIT_VTABLE(u)->stop)
1527 unit_add_to_dbus_queue(u);
1529 r = UNIT_VTABLE(u)->stop(u);
1533 unit_status_log_starting_stopping_reloading(u, JOB_STOP);
1534 unit_status_print_starting_stopping(u, JOB_STOP);
1539 * -EBADR: This unit type does not support reloading.
1540 * -ENOEXEC: Unit is not started.
1541 * -EAGAIN: An operation is already in progress. Retry later.
1543 int unit_reload(Unit *u) {
1544 UnitActiveState state;
1550 if (u->load_state != UNIT_LOADED)
1553 if (!unit_can_reload(u))
1556 state = unit_active_state(u);
1557 if (state == UNIT_RELOADING)
1560 if (state != UNIT_ACTIVE) {
1561 log_unit_warning(u->id, "Unit %s cannot be reloaded because it is inactive.", u->id);
1565 following = unit_following(u);
1567 log_unit_debug(u->id, "Redirecting reload request from %s to %s.", u->id, following->id);
1568 return unit_reload(following);
1571 unit_add_to_dbus_queue(u);
1573 r = UNIT_VTABLE(u)->reload(u);
1577 unit_status_log_starting_stopping_reloading(u, JOB_RELOAD);
1581 bool unit_can_reload(Unit *u) {
1584 if (!UNIT_VTABLE(u)->reload)
1587 if (!UNIT_VTABLE(u)->can_reload)
1590 return UNIT_VTABLE(u)->can_reload(u);
1593 static void unit_check_unneeded(Unit *u) {
1599 /* If this service shall be shut down when unneeded then do
1602 if (!u->stop_when_unneeded)
1605 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1608 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY], i)
1609 if (unit_active_or_pending(other))
1612 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
1613 if (unit_active_or_pending(other))
1616 SET_FOREACH(other, u->dependencies[UNIT_WANTED_BY], i)
1617 if (unit_active_or_pending(other))
1620 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1621 if (unit_active_or_pending(other))
1624 log_unit_info(u->id, "Unit %s is not needed anymore. Stopping.", u->id);
1626 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1627 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1630 static void unit_check_binds_to(Unit *u) {
1640 if (unit_active_state(u) != UNIT_ACTIVE)
1643 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1647 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1656 log_unit_info(u->id, "Unit %s is bound to inactive service. Stopping, too.", u->id);
1658 /* A unit we need to run is gone. Sniff. Let's stop this. */
1659 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1662 static void retroactively_start_dependencies(Unit *u) {
1667 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1669 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1670 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1671 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1672 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1674 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1675 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1676 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1677 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1679 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1680 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1681 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1682 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1684 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1685 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1686 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1687 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1689 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1690 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1691 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1693 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1694 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1695 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1698 static void retroactively_stop_dependencies(Unit *u) {
1703 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1705 /* Pull down units which are bound to us recursively if enabled */
1706 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1707 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1708 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1711 static void check_unneeded_dependencies(Unit *u) {
1716 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1718 /* Garbage collect services that might not be needed anymore, if enabled */
1719 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1720 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1721 unit_check_unneeded(other);
1722 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1723 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1724 unit_check_unneeded(other);
1725 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1726 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1727 unit_check_unneeded(other);
1728 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1729 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1730 unit_check_unneeded(other);
1731 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1732 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1733 unit_check_unneeded(other);
1734 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1735 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1736 unit_check_unneeded(other);
1739 void unit_start_on_failure(Unit *u) {
1745 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1748 log_unit_info(u->id, "Triggering OnFailure= dependencies of %s.", u->id);
1750 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1753 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, true, NULL, NULL);
1755 log_unit_error_errno(u->id, r, "Failed to enqueue OnFailure= job: %m");
1759 void unit_trigger_notify(Unit *u) {
1765 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1766 if (UNIT_VTABLE(other)->trigger_notify)
1767 UNIT_VTABLE(other)->trigger_notify(other, u);
1770 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1775 assert(os < _UNIT_ACTIVE_STATE_MAX);
1776 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1778 /* Note that this is called for all low-level state changes,
1779 * even if they might map to the same high-level
1780 * UnitActiveState! That means that ns == os is an expected
1781 * behavior here. For example: if a mount point is remounted
1782 * this function will be called too! */
1786 /* Update timestamps for state changes */
1787 if (m->n_reloading <= 0) {
1790 dual_timestamp_get(&ts);
1792 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1793 u->inactive_exit_timestamp = ts;
1794 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1795 u->inactive_enter_timestamp = ts;
1797 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1798 u->active_enter_timestamp = ts;
1799 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1800 u->active_exit_timestamp = ts;
1803 /* Keep track of failed units */
1804 if (ns == UNIT_FAILED)
1805 set_put(u->manager->failed_units, u);
1807 set_remove(u->manager->failed_units, u);
1809 /* Make sure the cgroup is always removed when we become inactive */
1810 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1811 unit_destroy_cgroup_if_empty(u);
1813 /* Note that this doesn't apply to RemainAfterExit services exiting
1814 * successfully, since there's no change of state in that case. Which is
1815 * why it is handled in service_set_state() */
1816 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1819 ec = unit_get_exec_context(u);
1820 if (ec && exec_context_may_touch_console(ec)) {
1821 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1824 if (m->n_on_console == 0)
1825 /* unset no_console_output flag, since the console is free */
1826 m->no_console_output = false;
1835 if (u->job->state == JOB_WAITING)
1837 /* So we reached a different state for this
1838 * job. Let's see if we can run it now if it
1839 * failed previously due to EAGAIN. */
1840 job_add_to_run_queue(u->job);
1842 /* Let's check whether this state change constitutes a
1843 * finished job, or maybe contradicts a running job and
1844 * hence needs to invalidate jobs. */
1846 switch (u->job->type) {
1849 case JOB_VERIFY_ACTIVE:
1851 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1852 job_finish_and_invalidate(u->job, JOB_DONE, true);
1853 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1856 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1857 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1863 case JOB_RELOAD_OR_START:
1865 if (u->job->state == JOB_RUNNING) {
1866 if (ns == UNIT_ACTIVE)
1867 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1868 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1871 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1872 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1880 case JOB_TRY_RESTART:
1882 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1883 job_finish_and_invalidate(u->job, JOB_DONE, true);
1884 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1886 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1892 assert_not_reached("Job type unknown");
1898 if (m->n_reloading <= 0) {
1900 /* If this state change happened without being
1901 * requested by a job, then let's retroactively start
1902 * or stop dependencies. We skip that step when
1903 * deserializing, since we don't want to create any
1904 * additional jobs just because something is already
1908 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1909 retroactively_start_dependencies(u);
1910 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1911 retroactively_stop_dependencies(u);
1914 /* stop unneeded units regardless if going down was expected or not */
1915 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1916 check_unneeded_dependencies(u);
1918 if (ns != os && ns == UNIT_FAILED) {
1919 log_unit_notice(u->id, "Unit %s entered failed state.", u->id);
1920 unit_start_on_failure(u);
1924 /* Some names are special */
1925 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1927 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1928 /* The bus might have just become available,
1929 * hence try to connect to it, if we aren't
1933 if (u->type == UNIT_SERVICE &&
1934 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1935 m->n_reloading <= 0) {
1936 /* Write audit record if we have just finished starting up */
1937 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1941 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1942 manager_send_unit_plymouth(m, u);
1946 /* We don't care about D-Bus here, since we'll get an
1947 * asynchronous notification for it anyway. */
1949 if (u->type == UNIT_SERVICE &&
1950 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1951 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1952 m->n_reloading <= 0) {
1954 /* Hmm, if there was no start record written
1955 * write it now, so that we always have a nice
1958 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1960 if (ns == UNIT_INACTIVE)
1961 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1963 /* Write audit record if we have just finished shutting down */
1964 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
1966 u->in_audit = false;
1970 manager_recheck_journal(m);
1971 unit_trigger_notify(u);
1973 if (u->manager->n_reloading <= 0) {
1974 /* Maybe we finished startup and are now ready for
1975 * being stopped because unneeded? */
1976 unit_check_unneeded(u);
1978 /* Maybe we finished startup, but something we needed
1979 * has vanished? Let's die then. (This happens when
1980 * something BindsTo= to a Type=oneshot unit, as these
1981 * units go directly from starting to inactive,
1982 * without ever entering started.) */
1983 unit_check_binds_to(u);
1986 unit_add_to_dbus_queue(u);
1987 unit_add_to_gc_queue(u);
1990 int unit_watch_pid(Unit *u, pid_t pid) {
1996 /* Watch a specific PID. We only support one or two units
1997 * watching each PID for now, not more. */
1999 r = set_ensure_allocated(&u->pids, NULL);
2003 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2007 r = hashmap_put(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2009 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2013 r = hashmap_put(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2016 q = set_put(u->pids, LONG_TO_PTR(pid));
2023 void unit_unwatch_pid(Unit *u, pid_t pid) {
2027 hashmap_remove_value(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2028 hashmap_remove_value(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2029 set_remove(u->pids, LONG_TO_PTR(pid));
2032 void unit_unwatch_all_pids(Unit *u) {
2035 while (!set_isempty(u->pids))
2036 unit_unwatch_pid(u, PTR_TO_LONG(set_first(u->pids)));
2042 static int unit_watch_pids_in_path(Unit *u, const char *path) {
2043 _cleanup_closedir_ DIR *d = NULL;
2044 _cleanup_fclose_ FILE *f = NULL;
2050 /* Adds all PIDs from a specific cgroup path to the set of PIDs we watch. */
2052 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2056 while ((r = cg_read_pid(f, &pid)) > 0) {
2057 r = unit_watch_pid(u, pid);
2058 if (r < 0 && ret >= 0)
2061 if (r < 0 && ret >= 0)
2064 } else if (ret >= 0)
2067 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2071 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2072 _cleanup_free_ char *p = NULL;
2074 p = strjoin(path, "/", fn, NULL);
2080 r = unit_watch_pids_in_path(u, p);
2081 if (r < 0 && ret >= 0)
2084 if (r < 0 && ret >= 0)
2087 } else if (ret >= 0)
2093 int unit_watch_all_pids(Unit *u) {
2096 /* Adds all PIDs from our cgroup to the set of PIDs we watch */
2098 if (!u->cgroup_path)
2101 return unit_watch_pids_in_path(u, u->cgroup_path);
2104 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2110 /* Cleans dead PIDs from our list */
2112 SET_FOREACH(e, u->pids, i) {
2113 pid_t pid = PTR_TO_LONG(e);
2115 if (pid == except1 || pid == except2)
2118 if (!pid_is_unwaited(pid))
2119 unit_unwatch_pid(u, pid);
2123 bool unit_job_is_applicable(Unit *u, JobType j) {
2125 assert(j >= 0 && j < _JOB_TYPE_MAX);
2129 case JOB_VERIFY_ACTIVE:
2136 case JOB_TRY_RESTART:
2137 return unit_can_start(u);
2140 return unit_can_reload(u);
2142 case JOB_RELOAD_OR_START:
2143 return unit_can_reload(u) && unit_can_start(u);
2146 assert_not_reached("Invalid job type");
2150 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency) {
2153 switch (dependency) {
2155 case UNIT_REQUIRES_OVERRIDABLE:
2157 case UNIT_REQUISITE:
2158 case UNIT_REQUISITE_OVERRIDABLE:
2161 case UNIT_REQUIRED_BY:
2162 case UNIT_REQUIRED_BY_OVERRIDABLE:
2163 case UNIT_WANTED_BY:
2165 case UNIT_CONSISTS_OF:
2166 case UNIT_REFERENCES:
2167 case UNIT_REFERENCED_BY:
2168 case UNIT_PROPAGATES_RELOAD_TO:
2169 case UNIT_RELOAD_PROPAGATED_FROM:
2170 case UNIT_JOINS_NAMESPACE_OF:
2173 case UNIT_CONFLICTS:
2174 case UNIT_CONFLICTED_BY:
2177 case UNIT_ON_FAILURE:
2179 case UNIT_TRIGGERED_BY:
2180 if (streq_ptr(id, other))
2181 log_unit_warning(id, "Dependency %s=%s dropped from unit %s",
2182 unit_dependency_to_string(dependency), id, other);
2184 log_unit_warning(id, "Dependency %s=%s dropped from unit %s merged into %s",
2185 unit_dependency_to_string(dependency), id,
2189 case _UNIT_DEPENDENCY_MAX:
2190 case _UNIT_DEPENDENCY_INVALID:
2194 assert_not_reached("Invalid dependency type");
2197 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2199 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2200 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2201 [UNIT_REQUIRES_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2202 [UNIT_WANTS] = UNIT_WANTED_BY,
2203 [UNIT_REQUISITE] = UNIT_REQUIRED_BY,
2204 [UNIT_REQUISITE_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2205 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2206 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2207 [UNIT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
2208 [UNIT_REQUIRED_BY_OVERRIDABLE] = _UNIT_DEPENDENCY_INVALID,
2209 [UNIT_WANTED_BY] = _UNIT_DEPENDENCY_INVALID,
2210 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2211 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2212 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2213 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2214 [UNIT_BEFORE] = UNIT_AFTER,
2215 [UNIT_AFTER] = UNIT_BEFORE,
2216 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2217 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2218 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2219 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2220 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2221 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2222 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2223 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2225 int r, q = 0, v = 0, w = 0;
2226 Unit *orig_u = u, *orig_other = other;
2229 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2232 u = unit_follow_merge(u);
2233 other = unit_follow_merge(other);
2235 /* We won't allow dependencies on ourselves. We will not
2236 * consider them an error however. */
2238 maybe_warn_about_dependency(orig_u->id, orig_other->id, d);
2242 r = set_ensure_allocated(&u->dependencies[d], NULL);
2246 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2247 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2252 if (add_reference) {
2253 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2257 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2262 q = set_put(u->dependencies[d], other);
2266 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2267 v = set_put(other->dependencies[inverse_table[d]], u);
2274 if (add_reference) {
2275 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2281 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2286 unit_add_to_dbus_queue(u);
2291 set_remove(u->dependencies[d], other);
2294 set_remove(other->dependencies[inverse_table[d]], u);
2297 set_remove(u->dependencies[UNIT_REFERENCES], other);
2302 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2307 r = unit_add_dependency(u, d, other, add_reference);
2311 r = unit_add_dependency(u, e, other, add_reference);
2318 static const char *resolve_template(Unit *u, const char *name, const char*path, char **p) {
2322 assert(name || path);
2326 name = basename(path);
2328 if (!unit_name_is_template(name)) {
2334 s = unit_name_replace_instance(name, u->instance);
2336 _cleanup_free_ char *i = NULL;
2338 i = unit_name_to_prefix(u->id);
2342 s = unit_name_replace_instance(name, i);
2352 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2355 _cleanup_free_ char *s = NULL;
2358 assert(name || path);
2360 name = resolve_template(u, name, path, &s);
2364 r = manager_load_unit(u->manager, name, path, NULL, &other);
2368 return unit_add_dependency(u, d, other, add_reference);
2371 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2372 _cleanup_free_ char *s = NULL;
2377 assert(name || path);
2379 name = resolve_template(u, name, path, &s);
2383 r = manager_load_unit(u->manager, name, path, NULL, &other);
2387 return unit_add_two_dependencies(u, d, e, other, add_reference);
2390 int unit_add_dependency_by_name_inverse(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2393 _cleanup_free_ char *s = NULL;
2396 assert(name || path);
2398 name = resolve_template(u, name, path, &s);
2402 r = manager_load_unit(u->manager, name, path, NULL, &other);
2406 return unit_add_dependency(other, d, u, add_reference);
2409 int unit_add_two_dependencies_by_name_inverse(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2412 _cleanup_free_ char *s = NULL;
2415 assert(name || path);
2417 name = resolve_template(u, name, path, &s);
2421 r = manager_load_unit(u->manager, name, path, NULL, &other);
2425 r = unit_add_two_dependencies(other, d, e, u, add_reference);
2432 int set_unit_path(const char *p) {
2433 /* This is mostly for debug purposes */
2434 if (setenv("SYSTEMD_UNIT_PATH", p, 0) < 0)
2440 char *unit_dbus_path(Unit *u) {
2446 return unit_dbus_path_from_name(u->id);
2449 char *unit_default_cgroup_path(Unit *u) {
2450 _cleanup_free_ char *escaped = NULL, *slice = NULL;
2455 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2456 return strdup(u->manager->cgroup_root);
2458 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
2459 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
2464 escaped = cg_escape(u->id);
2469 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
2471 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
2474 int unit_add_default_slice(Unit *u, CGroupContext *c) {
2475 _cleanup_free_ char *b = NULL;
2476 const char *slice_name;
2483 if (UNIT_ISSET(u->slice))
2487 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2489 /* Implicitly place all instantiated units in their
2490 * own per-template slice */
2492 prefix = unit_name_to_prefix(u->id);
2496 /* The prefix is already escaped, but it might include
2497 * "-" which has a special meaning for slice units,
2498 * hence escape it here extra. */
2499 escaped = strreplace(prefix, "-", "\\x2d");
2503 if (u->manager->running_as == SYSTEMD_SYSTEM)
2504 b = strjoin("system-", escaped, ".slice", NULL);
2506 b = strappend(escaped, ".slice");
2513 u->manager->running_as == SYSTEMD_SYSTEM
2514 ? SPECIAL_SYSTEM_SLICE
2515 : SPECIAL_ROOT_SLICE;
2517 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2521 unit_ref_set(&u->slice, slice);
2525 const char *unit_slice_name(Unit *u) {
2528 if (!UNIT_ISSET(u->slice))
2531 return UNIT_DEREF(u->slice)->id;
2534 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2535 _cleanup_free_ char *t = NULL;
2542 t = unit_name_change_suffix(u->id, type);
2546 assert(!unit_has_name(u, t));
2548 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2549 assert(r < 0 || *_found != u);
2553 int unit_watch_bus_name(Unit *u, const char *name) {
2557 /* Watch a specific name on the bus. We only support one unit
2558 * watching each name for now. */
2560 return hashmap_put(u->manager->watch_bus, name, u);
2563 void unit_unwatch_bus_name(Unit *u, const char *name) {
2567 hashmap_remove_value(u->manager->watch_bus, name, u);
2570 bool unit_can_serialize(Unit *u) {
2573 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2576 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2583 if (unit_can_serialize(u)) {
2586 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2590 rt = unit_get_exec_runtime(u);
2592 r = exec_runtime_serialize(rt, u, f, fds);
2598 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2599 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2600 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2601 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2602 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2603 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2605 if (dual_timestamp_is_set(&u->condition_timestamp))
2606 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2608 if (dual_timestamp_is_set(&u->assert_timestamp))
2609 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2611 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2614 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2616 if (serialize_jobs) {
2618 fprintf(f, "job\n");
2619 job_serialize(u->job, f, fds);
2623 fprintf(f, "job\n");
2624 job_serialize(u->nop_job, f, fds);
2633 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2644 va_start(ap, format);
2645 vfprintf(f, format, ap);
2651 void unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2657 fprintf(f, "%s=%s\n", key, value);
2660 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2661 ExecRuntime **rt = NULL;
2669 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2671 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2674 char line[LINE_MAX], *l, *v;
2677 if (!fgets(line, sizeof(line), f)) {
2690 k = strcspn(l, "=");
2698 if (streq(l, "job")) {
2700 /* new-style serialized job */
2707 r = job_deserialize(j, f, fds);
2713 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2719 r = job_install_deserialized(j);
2721 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2729 type = job_type_from_string(v);
2731 log_debug("Failed to parse job type value %s", v);
2733 u->deserialized_job = type;
2736 } else if (streq(l, "inactive-exit-timestamp")) {
2737 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2739 } else if (streq(l, "active-enter-timestamp")) {
2740 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2742 } else if (streq(l, "active-exit-timestamp")) {
2743 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2745 } else if (streq(l, "inactive-enter-timestamp")) {
2746 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2748 } else if (streq(l, "condition-timestamp")) {
2749 dual_timestamp_deserialize(v, &u->condition_timestamp);
2751 } else if (streq(l, "assert-timestamp")) {
2752 dual_timestamp_deserialize(v, &u->assert_timestamp);
2754 } else if (streq(l, "condition-result")) {
2757 b = parse_boolean(v);
2759 log_debug("Failed to parse condition result value %s", v);
2761 u->condition_result = b;
2765 } else if (streq(l, "assert-result")) {
2768 b = parse_boolean(v);
2770 log_debug("Failed to parse assert result value %s", v);
2772 u->assert_result = b;
2776 } else if (streq(l, "transient")) {
2779 b = parse_boolean(v);
2781 log_debug("Failed to parse transient bool %s", v);
2786 } else if (streq(l, "cgroup")) {
2793 if (u->cgroup_path) {
2796 p = hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2797 log_info("Removing cgroup_path %s from hashmap (%p)",
2799 free(u->cgroup_path);
2803 assert(hashmap_put(u->manager->cgroup_unit, s, u) == 1);
2808 if (unit_can_serialize(u)) {
2810 r = exec_runtime_deserialize_item(rt, u, l, v, fds);
2817 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2824 int unit_add_node_link(Unit *u, const char *what, bool wants) {
2826 _cleanup_free_ char *e = NULL;
2834 /* Adds in links to the device node that this unit is based on */
2836 if (!is_device_path(what))
2839 e = unit_name_from_path(what, ".device");
2843 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2848 r = unit_add_two_dependencies(u, UNIT_AFTER, UNIT_BINDS_TO, device, true);
2853 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2861 int unit_coldplug(Unit *u) {
2866 if (UNIT_VTABLE(u)->coldplug)
2867 if ((r = UNIT_VTABLE(u)->coldplug(u)) < 0)
2871 r = job_coldplug(u->job);
2874 } else if (u->deserialized_job >= 0) {
2876 r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL);
2880 u->deserialized_job = _JOB_TYPE_INVALID;
2886 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
2887 DISABLE_WARNING_FORMAT_NONLITERAL;
2888 manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
2889 status, unit_status_msg_format, unit_description(u));
2893 bool unit_need_daemon_reload(Unit *u) {
2894 _cleanup_strv_free_ char **t = NULL;
2897 unsigned loaded_cnt, current_cnt;
2901 if (u->fragment_path) {
2903 if (stat(u->fragment_path, &st) < 0)
2904 /* What, cannot access this anymore? */
2907 if (u->fragment_mtime > 0 &&
2908 timespec_load(&st.st_mtim) != u->fragment_mtime)
2912 if (u->source_path) {
2914 if (stat(u->source_path, &st) < 0)
2917 if (u->source_mtime > 0 &&
2918 timespec_load(&st.st_mtim) != u->source_mtime)
2922 (void) unit_find_dropin_paths(u, &t);
2923 loaded_cnt = strv_length(t);
2924 current_cnt = strv_length(u->dropin_paths);
2926 if (loaded_cnt == current_cnt) {
2927 if (loaded_cnt == 0)
2930 if (strv_overlap(u->dropin_paths, t)) {
2931 STRV_FOREACH(path, u->dropin_paths) {
2933 if (stat(*path, &st) < 0)
2936 if (u->dropin_mtime > 0 &&
2937 timespec_load(&st.st_mtim) > u->dropin_mtime)
2948 void unit_reset_failed(Unit *u) {
2951 if (UNIT_VTABLE(u)->reset_failed)
2952 UNIT_VTABLE(u)->reset_failed(u);
2955 Unit *unit_following(Unit *u) {
2958 if (UNIT_VTABLE(u)->following)
2959 return UNIT_VTABLE(u)->following(u);
2964 bool unit_stop_pending(Unit *u) {
2967 /* This call does check the current state of the unit. It's
2968 * hence useful to be called from state change calls of the
2969 * unit itself, where the state isn't updated yet. This is
2970 * different from unit_inactive_or_pending() which checks both
2971 * the current state and for a queued job. */
2973 return u->job && u->job->type == JOB_STOP;
2976 bool unit_inactive_or_pending(Unit *u) {
2979 /* Returns true if the unit is inactive or going down */
2981 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
2984 if (unit_stop_pending(u))
2990 bool unit_active_or_pending(Unit *u) {
2993 /* Returns true if the unit is active or going up */
2995 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
2999 (u->job->type == JOB_START ||
3000 u->job->type == JOB_RELOAD_OR_START ||
3001 u->job->type == JOB_RESTART))
3007 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3009 assert(w >= 0 && w < _KILL_WHO_MAX);
3011 assert(signo < _NSIG);
3013 if (!UNIT_VTABLE(u)->kill)
3016 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3019 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3023 pid_set = set_new(NULL);
3027 /* Exclude the main/control pids from being killed via the cgroup */
3029 r = set_put(pid_set, LONG_TO_PTR(main_pid));
3034 if (control_pid > 0) {
3035 r = set_put(pid_set, LONG_TO_PTR(control_pid));
3047 int unit_kill_common(
3053 sd_bus_error *error) {
3057 if (who == KILL_MAIN && main_pid <= 0) {
3059 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3061 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3064 if (who == KILL_CONTROL && control_pid <= 0) {
3065 if (control_pid < 0)
3066 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3068 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3071 if (who == KILL_CONTROL || who == KILL_ALL)
3072 if (control_pid > 0)
3073 if (kill(control_pid, signo) < 0)
3076 if (who == KILL_MAIN || who == KILL_ALL)
3078 if (kill(main_pid, signo) < 0)
3081 if (who == KILL_ALL && u->cgroup_path) {
3082 _cleanup_set_free_ Set *pid_set = NULL;
3085 /* Exclude the main/control pids from being killed via the cgroup */
3086 pid_set = unit_pid_set(main_pid, control_pid);
3090 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, true, false, pid_set);
3091 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3098 int unit_following_set(Unit *u, Set **s) {
3102 if (UNIT_VTABLE(u)->following_set)
3103 return UNIT_VTABLE(u)->following_set(u, s);
3109 UnitFileState unit_get_unit_file_state(Unit *u) {
3112 if (u->unit_file_state < 0 && u->fragment_path)
3113 u->unit_file_state = unit_file_get_state(
3114 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3115 NULL, basename(u->fragment_path));
3117 return u->unit_file_state;
3120 int unit_get_unit_file_preset(Unit *u) {
3123 if (u->unit_file_preset < 0 && u->fragment_path)
3124 u->unit_file_preset = unit_file_query_preset(
3125 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3126 NULL, basename(u->fragment_path));
3128 return u->unit_file_preset;
3131 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3136 unit_ref_unset(ref);
3139 LIST_PREPEND(refs, u->refs, ref);
3143 void unit_ref_unset(UnitRef *ref) {
3149 LIST_REMOVE(refs, ref->unit->refs, ref);
3153 int unit_patch_contexts(Unit *u) {
3161 /* Patch in the manager defaults into the exec and cgroup
3162 * contexts, _after_ the rest of the settings have been
3165 ec = unit_get_exec_context(u);
3167 /* This only copies in the ones that need memory */
3168 for (i = 0; i < _RLIMIT_MAX; i++)
3169 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3170 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3175 if (u->manager->running_as == SYSTEMD_USER &&
3176 !ec->working_directory) {
3178 r = get_home_dir(&ec->working_directory);
3183 if (u->manager->running_as == SYSTEMD_USER &&
3184 (ec->syscall_whitelist ||
3185 !set_isempty(ec->syscall_filter) ||
3186 !set_isempty(ec->syscall_archs) ||
3187 ec->address_families_whitelist ||
3188 !set_isempty(ec->address_families)))
3189 ec->no_new_privileges = true;
3191 if (ec->private_devices)
3192 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3195 cc = unit_get_cgroup_context(u);
3199 ec->private_devices &&
3200 cc->device_policy == CGROUP_AUTO)
3201 cc->device_policy = CGROUP_CLOSED;
3207 ExecContext *unit_get_exec_context(Unit *u) {
3214 offset = UNIT_VTABLE(u)->exec_context_offset;
3218 return (ExecContext*) ((uint8_t*) u + offset);
3221 KillContext *unit_get_kill_context(Unit *u) {
3228 offset = UNIT_VTABLE(u)->kill_context_offset;
3232 return (KillContext*) ((uint8_t*) u + offset);
3235 CGroupContext *unit_get_cgroup_context(Unit *u) {
3241 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3245 return (CGroupContext*) ((uint8_t*) u + offset);
3248 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3254 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3258 return *(ExecRuntime**) ((uint8_t*) u + offset);
3261 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3262 if (u->manager->running_as == SYSTEMD_USER) {
3265 if (mode == UNIT_PERSISTENT && !transient)
3266 r = user_config_home(dir);
3268 r = user_runtime_dir(dir);
3275 if (mode == UNIT_PERSISTENT && !transient)
3276 *dir = strdup("/etc/systemd/system");
3278 *dir = strdup("/run/systemd/system");
3285 static int unit_drop_in_file(Unit *u,
3286 UnitSetPropertiesMode mode, const char *name, char **p, char **q) {
3287 _cleanup_free_ char *dir = NULL;
3292 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3296 return drop_in_file(dir, u->id, 50, name, p, q);
3299 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3301 _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
3306 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3309 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3313 r = write_drop_in(dir, u->id, 50, name, data);
3317 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3321 r = strv_extend(&u->dropin_paths, q);
3325 strv_sort(u->dropin_paths);
3326 strv_uniq(u->dropin_paths);
3328 u->dropin_mtime = now(CLOCK_REALTIME);
3333 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3334 _cleanup_free_ char *p = NULL;
3342 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3345 va_start(ap, format);
3346 r = vasprintf(&p, format, ap);
3352 return unit_write_drop_in(u, mode, name, p);
3355 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3356 _cleanup_free_ char *ndata = NULL;
3362 if (!UNIT_VTABLE(u)->private_section)
3365 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3368 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3372 return unit_write_drop_in(u, mode, name, ndata);
3375 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3376 _cleanup_free_ char *p = NULL;
3384 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3387 va_start(ap, format);
3388 r = vasprintf(&p, format, ap);
3394 return unit_write_drop_in_private(u, mode, name, p);
3397 int unit_remove_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name) {
3398 _cleanup_free_ char *p = NULL, *q = NULL;
3403 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3406 r = unit_drop_in_file(u, mode, name, &p, &q);
3411 r = errno == ENOENT ? 0 : -errno;
3419 int unit_make_transient(Unit *u) {
3424 u->load_state = UNIT_STUB;
3426 u->transient = true;
3428 free(u->fragment_path);
3429 u->fragment_path = NULL;
3431 if (u->manager->running_as == SYSTEMD_USER) {
3432 _cleanup_free_ char *c = NULL;
3434 r = user_runtime_dir(&c);
3440 u->fragment_path = strjoin(c, "/", u->id, NULL);
3441 if (!u->fragment_path)
3446 u->fragment_path = strappend("/run/systemd/system/", u->id);
3447 if (!u->fragment_path)
3450 mkdir_p("/run/systemd/system", 0755);
3453 return write_string_file_atomic_label(u->fragment_path, "# Transient stub");
3456 int unit_kill_context(
3462 bool main_pid_alien) {
3464 int sig, wait_for_exit = false, r;
3469 if (c->kill_mode == KILL_NONE)
3479 case KILL_TERMINATE:
3480 sig = c->kill_signal;
3483 assert_not_reached("KillOperation unknown");
3487 r = kill_and_sigcont(main_pid, sig);
3489 if (r < 0 && r != -ESRCH) {
3490 _cleanup_free_ char *comm = NULL;
3491 get_process_comm(main_pid, &comm);
3493 log_unit_warning_errno(u->id, r, "Failed to kill main process " PID_FMT " (%s): %m", main_pid, strna(comm));
3495 if (!main_pid_alien)
3496 wait_for_exit = true;
3498 if (c->send_sighup && k != KILL_KILL)
3499 kill(main_pid, SIGHUP);
3503 if (control_pid > 0) {
3504 r = kill_and_sigcont(control_pid, sig);
3506 if (r < 0 && r != -ESRCH) {
3507 _cleanup_free_ char *comm = NULL;
3508 get_process_comm(control_pid, &comm);
3510 log_unit_warning_errno(u->id, r, "Failed to kill control process " PID_FMT " (%s): %m", control_pid, strna(comm));
3512 wait_for_exit = true;
3514 if (c->send_sighup && k != KILL_KILL)
3515 kill(control_pid, SIGHUP);
3519 if ((c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL)) && u->cgroup_path) {
3520 _cleanup_set_free_ Set *pid_set = NULL;
3522 /* Exclude the main/control pids from being killed via the cgroup */
3523 pid_set = unit_pid_set(main_pid, control_pid);
3527 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, true, false, pid_set);
3529 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3530 log_unit_warning_errno(u->id, r, "Failed to kill control group: %m");
3533 /* FIXME: For now, we will not wait for the
3534 * cgroup members to die, simply because
3535 * cgroup notification is unreliable. It
3536 * doesn't work at all in containers, and
3537 * outside of containers it can be confused
3538 * easily by leaving directories in the
3541 /* wait_for_exit = true; */
3543 if (c->send_sighup && k != KILL_KILL) {
3546 pid_set = unit_pid_set(main_pid, control_pid);
3550 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3555 return wait_for_exit;
3558 int unit_require_mounts_for(Unit *u, const char *path) {
3559 char prefix[strlen(path) + 1], *p;
3565 /* Registers a unit for requiring a certain path and all its
3566 * prefixes. We keep a simple array of these paths in the
3567 * unit, since its usually short. However, we build a prefix
3568 * table for all possible prefixes so that new appearing mount
3569 * units can easily determine which units to make themselves a
3572 if (!path_is_absolute(path))
3579 path_kill_slashes(p);
3581 if (!path_is_safe(p)) {
3586 if (strv_contains(u->requires_mounts_for, p)) {
3591 r = strv_consume(&u->requires_mounts_for, p);
3595 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3598 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3602 if (!u->manager->units_requiring_mounts_for) {
3603 u->manager->units_requiring_mounts_for = hashmap_new(&string_hash_ops);
3604 if (!u->manager->units_requiring_mounts_for)
3618 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3634 int unit_setup_exec_runtime(Unit *u) {
3640 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3643 /* Check if there already is an ExecRuntime for this unit? */
3644 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3648 /* Try to get it from somebody else */
3649 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3651 *rt = unit_get_exec_runtime(other);
3653 exec_runtime_ref(*rt);
3658 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3661 static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
3662 [UNIT_ACTIVE] = "active",
3663 [UNIT_RELOADING] = "reloading",
3664 [UNIT_INACTIVE] = "inactive",
3665 [UNIT_FAILED] = "failed",
3666 [UNIT_ACTIVATING] = "activating",
3667 [UNIT_DEACTIVATING] = "deactivating"
3670 DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState);