1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
26 #include <sys/timerfd.h>
33 #include "sd-messages.h"
38 #include "path-util.h"
39 #include "load-fragment.h"
40 #include "load-dropin.h"
42 #include "unit-name.h"
43 #include "dbus-unit.h"
45 #include "cgroup-util.h"
49 #include "fileio-label.h"
50 #include "bus-common-errors.h"
56 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
57 [UNIT_SERVICE] = &service_vtable,
58 [UNIT_SOCKET] = &socket_vtable,
59 [UNIT_BUSNAME] = &busname_vtable,
60 [UNIT_TARGET] = &target_vtable,
61 [UNIT_SNAPSHOT] = &snapshot_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable
72 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency);
74 Unit *unit_new(Manager *m, size_t size) {
78 assert(size >= sizeof(Unit));
84 u->names = set_new(&string_hash_ops);
91 u->type = _UNIT_TYPE_INVALID;
92 u->deserialized_job = _JOB_TYPE_INVALID;
93 u->default_dependencies = true;
94 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
95 u->unit_file_preset = -1;
96 u->on_failure_job_mode = JOB_REPLACE;
101 bool unit_has_name(Unit *u, const char *name) {
105 return !!set_get(u->names, (char*) name);
108 static void unit_init(Unit *u) {
115 assert(u->type >= 0);
117 cc = unit_get_cgroup_context(u);
119 cgroup_context_init(cc);
121 /* Copy in the manager defaults into the cgroup
122 * context, _before_ the rest of the settings have
123 * been initialized */
125 cc->cpu_accounting = u->manager->default_cpu_accounting;
126 cc->blockio_accounting = u->manager->default_blockio_accounting;
127 cc->memory_accounting = u->manager->default_memory_accounting;
130 ec = unit_get_exec_context(u);
132 exec_context_init(ec);
134 kc = unit_get_kill_context(u);
136 kill_context_init(kc);
138 if (UNIT_VTABLE(u)->init)
139 UNIT_VTABLE(u)->init(u);
142 int unit_add_name(Unit *u, const char *text) {
143 _cleanup_free_ char *s = NULL, *i = NULL;
150 if (unit_name_is_template(text)) {
155 s = unit_name_replace_instance(text, u->instance);
161 if (!unit_name_is_valid(s, TEMPLATE_INVALID))
164 assert_se((t = unit_name_to_type(s)) >= 0);
166 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
169 r = unit_name_to_instance(s, &i);
173 if (i && unit_vtable[t]->no_instances)
176 /* Ensure that this unit is either instanced or not instanced,
178 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
181 if (unit_vtable[t]->no_alias &&
182 !set_isempty(u->names) &&
183 !set_get(u->names, s))
186 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
189 r = set_put(u->names, s);
197 r = hashmap_put(u->manager->units, s, u);
199 set_remove(u->names, s);
203 if (u->type == _UNIT_TYPE_INVALID) {
208 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
217 unit_add_to_dbus_queue(u);
221 int unit_choose_id(Unit *u, const char *name) {
222 _cleanup_free_ char *t = NULL;
229 if (unit_name_is_template(name)) {
234 t = unit_name_replace_instance(name, u->instance);
241 /* Selects one of the names of this unit as the id */
242 s = set_get(u->names, (char*) name);
246 r = unit_name_to_instance(s, &i);
255 unit_add_to_dbus_queue(u);
260 int unit_set_description(Unit *u, const char *description) {
265 if (isempty(description))
268 s = strdup(description);
273 free(u->description);
276 unit_add_to_dbus_queue(u);
280 bool unit_check_gc(Unit *u) {
281 UnitActiveState state;
290 state = unit_active_state(u);
292 /* If the unit is inactive and failed and no job is queued for
293 * it, then release its runtime resources */
294 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
295 UNIT_VTABLE(u)->release_resources)
296 UNIT_VTABLE(u)->release_resources(u);
298 /* But we keep the unit object around for longer when it is
299 * referenced or configured to not be gc'ed */
300 if (state != UNIT_INACTIVE)
303 if (UNIT_VTABLE(u)->no_gc)
312 if (UNIT_VTABLE(u)->check_gc)
313 if (UNIT_VTABLE(u)->check_gc(u))
319 void unit_add_to_load_queue(Unit *u) {
321 assert(u->type != _UNIT_TYPE_INVALID);
323 if (u->load_state != UNIT_STUB || u->in_load_queue)
326 LIST_PREPEND(load_queue, u->manager->load_queue, u);
327 u->in_load_queue = true;
330 void unit_add_to_cleanup_queue(Unit *u) {
333 if (u->in_cleanup_queue)
336 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
337 u->in_cleanup_queue = true;
340 void unit_add_to_gc_queue(Unit *u) {
343 if (u->in_gc_queue || u->in_cleanup_queue)
346 if (unit_check_gc(u))
349 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
350 u->in_gc_queue = true;
352 u->manager->n_in_gc_queue ++;
355 void unit_add_to_dbus_queue(Unit *u) {
357 assert(u->type != _UNIT_TYPE_INVALID);
359 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
362 /* Shortcut things if nobody cares */
363 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
364 set_isempty(u->manager->private_buses)) {
365 u->sent_dbus_new_signal = true;
369 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
370 u->in_dbus_queue = true;
373 static void bidi_set_free(Unit *u, Set *s) {
379 /* Frees the set and makes sure we are dropped from the
380 * inverse pointers */
382 SET_FOREACH(other, s, i) {
385 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
386 set_remove(other->dependencies[d], u);
388 unit_add_to_gc_queue(other);
394 static void unit_remove_transient(Unit *u) {
402 if (u->fragment_path)
403 unlink(u->fragment_path);
405 STRV_FOREACH(i, u->dropin_paths) {
406 _cleanup_free_ char *p = NULL;
411 r = path_get_parent(*i, &p);
417 static void unit_free_requires_mounts_for(Unit *u) {
420 STRV_FOREACH(j, u->requires_mounts_for) {
421 char s[strlen(*j) + 1];
423 PATH_FOREACH_PREFIX_MORE(s, *j) {
427 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
433 if (set_isempty(x)) {
434 hashmap_remove(u->manager->units_requiring_mounts_for, y);
441 strv_free(u->requires_mounts_for);
442 u->requires_mounts_for = NULL;
445 static void unit_done(Unit *u) {
454 if (UNIT_VTABLE(u)->done)
455 UNIT_VTABLE(u)->done(u);
457 ec = unit_get_exec_context(u);
459 exec_context_done(ec);
461 cc = unit_get_cgroup_context(u);
463 cgroup_context_done(cc);
466 void unit_free(Unit *u) {
473 if (u->manager->n_reloading <= 0)
474 unit_remove_transient(u);
476 bus_unit_send_removed_signal(u);
480 unit_free_requires_mounts_for(u);
482 SET_FOREACH(t, u->names, i)
483 hashmap_remove_value(u->manager->units, t, u);
497 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
498 bidi_set_free(u, u->dependencies[d]);
500 if (u->type != _UNIT_TYPE_INVALID)
501 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
503 if (u->in_load_queue)
504 LIST_REMOVE(load_queue, u->manager->load_queue, u);
506 if (u->in_dbus_queue)
507 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
509 if (u->in_cleanup_queue)
510 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
512 if (u->in_gc_queue) {
513 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
514 u->manager->n_in_gc_queue--;
517 if (u->in_cgroup_queue)
518 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
520 if (u->cgroup_path) {
521 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
522 free(u->cgroup_path);
525 set_remove(u->manager->failed_units, u);
526 set_remove(u->manager->startup_units, u);
528 free(u->description);
529 strv_free(u->documentation);
530 free(u->fragment_path);
531 free(u->source_path);
532 strv_free(u->dropin_paths);
535 free(u->job_timeout_reboot_arg);
537 set_free_free(u->names);
539 unit_unwatch_all_pids(u);
541 condition_free_list(u->conditions);
542 condition_free_list(u->asserts);
544 unit_ref_unset(&u->slice);
547 unit_ref_unset(u->refs);
552 UnitActiveState unit_active_state(Unit *u) {
555 if (u->load_state == UNIT_MERGED)
556 return unit_active_state(unit_follow_merge(u));
558 /* After a reload it might happen that a unit is not correctly
559 * loaded but still has a process around. That's why we won't
560 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
562 return UNIT_VTABLE(u)->active_state(u);
565 const char* unit_sub_state_to_string(Unit *u) {
568 return UNIT_VTABLE(u)->sub_state_to_string(u);
571 static int complete_move(Set **s, Set **other) {
581 r = set_move(*s, *other);
592 static int merge_names(Unit *u, Unit *other) {
600 r = complete_move(&u->names, &other->names);
604 set_free_free(other->names);
608 SET_FOREACH(t, u->names, i)
609 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
614 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
619 assert(d < _UNIT_DEPENDENCY_MAX);
622 * If u does not have this dependency set allocated, there is no need
623 * to reserve anything. In that case other's set will be transferred
624 * as a whole to u by complete_move().
626 if (!u->dependencies[d])
629 /* merge_dependencies() will skip a u-on-u dependency */
630 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
632 return set_reserve(u->dependencies[d], n_reserve);
635 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
642 assert(d < _UNIT_DEPENDENCY_MAX);
644 /* Fix backwards pointers */
645 SET_FOREACH(back, other->dependencies[d], i) {
648 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
649 /* Do not add dependencies between u and itself */
651 if (set_remove(back->dependencies[k], other))
652 maybe_warn_about_dependency(u->id, other_id, k);
654 r = set_remove_and_put(back->dependencies[k], other, u);
656 set_remove(back->dependencies[k], other);
658 assert(r >= 0 || r == -ENOENT);
663 /* Also do not move dependencies on u to itself */
664 back = set_remove(other->dependencies[d], u);
666 maybe_warn_about_dependency(u->id, other_id, d);
668 /* The move cannot fail. The caller must have performed a reservation. */
669 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
671 set_free(other->dependencies[d]);
672 other->dependencies[d] = NULL;
675 int unit_merge(Unit *u, Unit *other) {
677 const char *other_id = NULL;
682 assert(u->manager == other->manager);
683 assert(u->type != _UNIT_TYPE_INVALID);
685 other = unit_follow_merge(other);
690 if (u->type != other->type)
693 if (!u->instance != !other->instance)
696 if (other->load_state != UNIT_STUB &&
697 other->load_state != UNIT_NOT_FOUND)
706 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
710 other_id = strdupa(other->id);
712 /* Make reservations to ensure merge_dependencies() won't fail */
713 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
714 r = reserve_dependencies(u, other, d);
716 * We don't rollback reservations if we fail. We don't have
717 * a way to undo reservations. A reservation is not a leak.
724 r = merge_names(u, other);
728 /* Redirect all references */
730 unit_ref_set(other->refs, u);
732 /* Merge dependencies */
733 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
734 merge_dependencies(u, other, other_id, d);
736 other->load_state = UNIT_MERGED;
737 other->merged_into = u;
739 /* If there is still some data attached to the other node, we
740 * don't need it anymore, and can free it. */
741 if (other->load_state != UNIT_STUB)
742 if (UNIT_VTABLE(other)->done)
743 UNIT_VTABLE(other)->done(other);
745 unit_add_to_dbus_queue(u);
746 unit_add_to_cleanup_queue(other);
751 int unit_merge_by_name(Unit *u, const char *name) {
754 _cleanup_free_ char *s = NULL;
759 if (unit_name_is_template(name)) {
763 s = unit_name_replace_instance(name, u->instance);
770 other = manager_get_unit(u->manager, name);
772 r = unit_add_name(u, name);
774 r = unit_merge(u, other);
779 Unit* unit_follow_merge(Unit *u) {
782 while (u->load_state == UNIT_MERGED)
783 assert_se(u = u->merged_into);
788 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
794 if (c->working_directory) {
795 r = unit_require_mounts_for(u, c->working_directory);
800 if (c->root_directory) {
801 r = unit_require_mounts_for(u, c->root_directory);
806 if (u->manager->running_as != SYSTEMD_SYSTEM)
809 if (c->private_tmp) {
810 r = unit_require_mounts_for(u, "/tmp");
814 r = unit_require_mounts_for(u, "/var/tmp");
819 if (c->std_output != EXEC_OUTPUT_KMSG &&
820 c->std_output != EXEC_OUTPUT_SYSLOG &&
821 c->std_output != EXEC_OUTPUT_JOURNAL &&
822 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
823 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
824 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
825 c->std_error != EXEC_OUTPUT_KMSG &&
826 c->std_error != EXEC_OUTPUT_SYSLOG &&
827 c->std_error != EXEC_OUTPUT_JOURNAL &&
828 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
829 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
830 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
833 /* If syslog or kernel logging is requested, make sure our own
834 * logging daemon is run first. */
836 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
843 const char *unit_description(Unit *u) {
847 return u->description;
852 void unit_dump(Unit *u, FILE *f, const char *prefix) {
858 timestamp1[FORMAT_TIMESTAMP_MAX],
859 timestamp2[FORMAT_TIMESTAMP_MAX],
860 timestamp3[FORMAT_TIMESTAMP_MAX],
861 timestamp4[FORMAT_TIMESTAMP_MAX],
862 timespan[FORMAT_TIMESPAN_MAX];
864 _cleanup_set_free_ Set *following_set = NULL;
868 assert(u->type >= 0);
870 prefix = strempty(prefix);
871 prefix2 = strappenda(prefix, "\t");
875 "%s\tDescription: %s\n"
877 "%s\tUnit Load State: %s\n"
878 "%s\tUnit Active State: %s\n"
879 "%s\tInactive Exit Timestamp: %s\n"
880 "%s\tActive Enter Timestamp: %s\n"
881 "%s\tActive Exit Timestamp: %s\n"
882 "%s\tInactive Enter Timestamp: %s\n"
883 "%s\tGC Check Good: %s\n"
884 "%s\tNeed Daemon Reload: %s\n"
885 "%s\tTransient: %s\n"
888 "%s\tCGroup realized: %s\n"
889 "%s\tCGroup mask: 0x%x\n"
890 "%s\tCGroup members mask: 0x%x\n",
892 prefix, unit_description(u),
893 prefix, strna(u->instance),
894 prefix, unit_load_state_to_string(u->load_state),
895 prefix, unit_active_state_to_string(unit_active_state(u)),
896 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
897 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
898 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
899 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
900 prefix, yes_no(unit_check_gc(u)),
901 prefix, yes_no(unit_need_daemon_reload(u)),
902 prefix, yes_no(u->transient),
903 prefix, strna(unit_slice_name(u)),
904 prefix, strna(u->cgroup_path),
905 prefix, yes_no(u->cgroup_realized),
906 prefix, u->cgroup_realized_mask,
907 prefix, u->cgroup_members_mask);
909 SET_FOREACH(t, u->names, i)
910 fprintf(f, "%s\tName: %s\n", prefix, t);
912 STRV_FOREACH(j, u->documentation)
913 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
915 following = unit_following(u);
917 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
919 r = unit_following_set(u, &following_set);
923 SET_FOREACH(other, following_set, i)
924 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
927 if (u->fragment_path)
928 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
931 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
933 STRV_FOREACH(j, u->dropin_paths)
934 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
936 if (u->job_timeout > 0)
937 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
939 if (u->job_timeout_action != FAILURE_ACTION_NONE)
940 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
942 if (u->job_timeout_reboot_arg)
943 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
945 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
946 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
948 if (dual_timestamp_is_set(&u->condition_timestamp))
950 "%s\tCondition Timestamp: %s\n"
951 "%s\tCondition Result: %s\n",
952 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
953 prefix, yes_no(u->condition_result));
955 if (dual_timestamp_is_set(&u->assert_timestamp))
957 "%s\tAssert Timestamp: %s\n"
958 "%s\tAssert Result: %s\n",
959 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
960 prefix, yes_no(u->assert_result));
962 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
965 SET_FOREACH(other, u->dependencies[d], i)
966 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
969 if (!strv_isempty(u->requires_mounts_for)) {
971 "%s\tRequiresMountsFor:", prefix);
973 STRV_FOREACH(j, u->requires_mounts_for)
974 fprintf(f, " %s", *j);
979 if (u->load_state == UNIT_LOADED) {
982 "%s\tStopWhenUnneeded: %s\n"
983 "%s\tRefuseManualStart: %s\n"
984 "%s\tRefuseManualStop: %s\n"
985 "%s\tDefaultDependencies: %s\n"
986 "%s\tOnFailureJobMode: %s\n"
987 "%s\tIgnoreOnIsolate: %s\n"
988 "%s\tIgnoreOnSnapshot: %s\n",
989 prefix, yes_no(u->stop_when_unneeded),
990 prefix, yes_no(u->refuse_manual_start),
991 prefix, yes_no(u->refuse_manual_stop),
992 prefix, yes_no(u->default_dependencies),
993 prefix, job_mode_to_string(u->on_failure_job_mode),
994 prefix, yes_no(u->ignore_on_isolate),
995 prefix, yes_no(u->ignore_on_snapshot));
997 if (UNIT_VTABLE(u)->dump)
998 UNIT_VTABLE(u)->dump(u, f, prefix2);
1000 } else if (u->load_state == UNIT_MERGED)
1002 "%s\tMerged into: %s\n",
1003 prefix, u->merged_into->id);
1004 else if (u->load_state == UNIT_ERROR)
1005 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1009 job_dump(u->job, f, prefix2);
1012 job_dump(u->nop_job, f, prefix2);
1016 /* Common implementation for multiple backends */
1017 int unit_load_fragment_and_dropin(Unit *u) {
1022 /* Load a .{service,socket,...} file */
1023 r = unit_load_fragment(u);
1027 if (u->load_state == UNIT_STUB)
1030 /* Load drop-in directory data */
1031 r = unit_load_dropin(unit_follow_merge(u));
1038 /* Common implementation for multiple backends */
1039 int unit_load_fragment_and_dropin_optional(Unit *u) {
1044 /* Same as unit_load_fragment_and_dropin(), but whether
1045 * something can be loaded or not doesn't matter. */
1047 /* Load a .service file */
1048 r = unit_load_fragment(u);
1052 if (u->load_state == UNIT_STUB)
1053 u->load_state = UNIT_LOADED;
1055 /* Load drop-in directory data */
1056 r = unit_load_dropin(unit_follow_merge(u));
1063 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1067 if (target->type != UNIT_TARGET)
1070 /* Only add the dependency if both units are loaded, so that
1071 * that loop check below is reliable */
1072 if (u->load_state != UNIT_LOADED ||
1073 target->load_state != UNIT_LOADED)
1076 /* If either side wants no automatic dependencies, then let's
1078 if (!u->default_dependencies ||
1079 !target->default_dependencies)
1082 /* Don't create loops */
1083 if (set_get(target->dependencies[UNIT_BEFORE], u))
1086 return unit_add_dependency(target, UNIT_AFTER, u, true);
1089 static int unit_add_target_dependencies(Unit *u) {
1091 static const UnitDependency deps[] = {
1093 UNIT_REQUIRED_BY_OVERRIDABLE,
1105 for (k = 0; k < ELEMENTSOF(deps); k++)
1106 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1107 r = unit_add_default_target_dependency(u, target);
1115 static int unit_add_slice_dependencies(Unit *u) {
1118 if (!unit_get_cgroup_context(u))
1121 if (UNIT_ISSET(u->slice))
1122 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_WANTS, UNIT_DEREF(u->slice), true);
1124 if (streq(u->id, SPECIAL_ROOT_SLICE))
1127 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, SPECIAL_ROOT_SLICE, NULL, true);
1130 static int unit_add_mount_dependencies(Unit *u) {
1136 STRV_FOREACH(i, u->requires_mounts_for) {
1137 char prefix[strlen(*i) + 1];
1139 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1142 r = manager_get_unit_by_path(u->manager, prefix, ".mount", &m);
1150 if (m->load_state != UNIT_LOADED)
1153 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1157 if (m->fragment_path) {
1158 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1168 static int unit_add_startup_units(Unit *u) {
1172 c = unit_get_cgroup_context(u);
1176 if (c->startup_cpu_shares == (unsigned long) -1 &&
1177 c->startup_blockio_weight == (unsigned long) -1)
1180 r = set_put(u->manager->startup_units, u);
1187 int unit_load(Unit *u) {
1192 if (u->in_load_queue) {
1193 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1194 u->in_load_queue = false;
1197 if (u->type == _UNIT_TYPE_INVALID)
1200 if (u->load_state != UNIT_STUB)
1203 if (UNIT_VTABLE(u)->load) {
1204 r = UNIT_VTABLE(u)->load(u);
1209 if (u->load_state == UNIT_STUB) {
1214 if (u->load_state == UNIT_LOADED) {
1216 r = unit_add_target_dependencies(u);
1220 r = unit_add_slice_dependencies(u);
1224 r = unit_add_mount_dependencies(u);
1228 r = unit_add_startup_units(u);
1232 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1233 log_unit_error(u->id, "More than one OnFailure= dependencies specified for %s but OnFailureJobMode=isolate set. Refusing.", u->id);
1238 unit_update_cgroup_members_masks(u);
1241 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1243 unit_add_to_dbus_queue(unit_follow_merge(u));
1244 unit_add_to_gc_queue(u);
1249 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1251 unit_add_to_dbus_queue(u);
1252 unit_add_to_gc_queue(u);
1254 log_unit_debug(u->id, "Failed to load configuration for %s: %s",
1255 u->id, strerror(-r));
1260 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1267 /* If the condition list is empty, then it is true */
1271 /* Otherwise, if all of the non-trigger conditions apply and
1272 * if any of the trigger conditions apply (unless there are
1273 * none) we return true */
1274 LIST_FOREACH(conditions, c, first) {
1277 r = condition_test(c);
1279 log_unit_warning(u->id,
1280 "Couldn't determine result for %s=%s%s%s for %s, assuming failed: %s",
1282 c->trigger ? "|" : "",
1283 c->negate ? "!" : "",
1288 log_unit_debug(u->id,
1289 "%s=%s%s%s %s for %s.",
1291 c->trigger ? "|" : "",
1292 c->negate ? "!" : "",
1294 condition_result_to_string(c->result),
1297 if (!c->trigger && r <= 0)
1300 if (c->trigger && triggered <= 0)
1304 return triggered != 0;
1307 static bool unit_condition_test(Unit *u) {
1310 dual_timestamp_get(&u->condition_timestamp);
1311 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1313 return u->condition_result;
1316 static bool unit_assert_test(Unit *u) {
1319 dual_timestamp_get(&u->assert_timestamp);
1320 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1322 return u->assert_result;
1325 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1326 const UnitStatusMessageFormats *format_table;
1330 assert(t < _JOB_TYPE_MAX);
1332 if (t != JOB_START && t != JOB_STOP)
1335 format_table = &UNIT_VTABLE(u)->status_message_formats;
1339 return format_table->starting_stopping[t == JOB_STOP];
1342 _pure_ static const char *unit_get_status_message_format_try_harder(Unit *u, JobType t) {
1347 assert(t < _JOB_TYPE_MAX);
1349 format = unit_get_status_message_format(u, t);
1353 /* Return generic strings */
1355 return "Starting %s.";
1356 else if (t == JOB_STOP)
1357 return "Stopping %s.";
1358 else if (t == JOB_RELOAD)
1359 return "Reloading %s.";
1364 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1369 /* We only print status messages for selected units on
1370 * selected operations. */
1372 format = unit_get_status_message_format(u, t);
1376 DISABLE_WARNING_FORMAT_NONLITERAL;
1377 unit_status_printf(u, "", format);
1381 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1388 if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
1391 if (log_on_console())
1394 /* We log status messages for all units and all operations. */
1396 format = unit_get_status_message_format_try_harder(u, t);
1400 DISABLE_WARNING_FORMAT_NONLITERAL;
1401 snprintf(buf, sizeof(buf), format, unit_description(u));
1405 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1406 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1407 SD_MESSAGE_UNIT_RELOADING;
1409 log_unit_struct(u->id,
1411 LOG_MESSAGE_ID(mid),
1412 LOG_MESSAGE("%s", buf),
1417 * -EBADR: This unit type does not support starting.
1418 * -EALREADY: Unit is already started.
1419 * -EAGAIN: An operation is already in progress. Retry later.
1420 * -ECANCELED: Too many requests for now.
1421 * -EPROTO: Assert failed
1423 int unit_start(Unit *u) {
1424 UnitActiveState state;
1430 if (u->load_state != UNIT_LOADED)
1433 /* If this is already started, then this will succeed. Note
1434 * that this will even succeed if this unit is not startable
1435 * by the user. This is relied on to detect when we need to
1436 * wait for units and when waiting is finished. */
1437 state = unit_active_state(u);
1438 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1441 /* If the conditions failed, don't do anything at all. If we
1442 * already are activating this call might still be useful to
1443 * speed up activation in case there is some hold-off time,
1444 * but we don't want to recheck the condition in that case. */
1445 if (state != UNIT_ACTIVATING &&
1446 !unit_condition_test(u)) {
1447 log_unit_debug(u->id, "Starting of %s requested but condition failed. Not starting unit.", u->id);
1451 /* If the asserts failed, fail the entire job */
1452 if (state != UNIT_ACTIVATING &&
1453 !unit_assert_test(u)) {
1454 log_unit_debug(u->id, "Starting of %s requested but asserts failed.", u->id);
1458 /* Forward to the main object, if we aren't it. */
1459 following = unit_following(u);
1461 log_unit_debug(u->id, "Redirecting start request from %s to %s.", u->id, following->id);
1462 return unit_start(following);
1465 if (UNIT_VTABLE(u)->supported && !UNIT_VTABLE(u)->supported(u->manager))
1468 /* If it is stopped, but we cannot start it, then fail */
1469 if (!UNIT_VTABLE(u)->start)
1472 /* We don't suppress calls to ->start() here when we are
1473 * already starting, to allow this request to be used as a
1474 * "hurry up" call, for example when the unit is in some "auto
1475 * restart" state where it waits for a holdoff timer to elapse
1476 * before it will start again. */
1478 unit_add_to_dbus_queue(u);
1480 r = UNIT_VTABLE(u)->start(u);
1484 /* Log if the start function actually did something */
1485 unit_status_log_starting_stopping_reloading(u, JOB_START);
1486 unit_status_print_starting_stopping(u, JOB_START);
1490 bool unit_can_start(Unit *u) {
1493 return !!UNIT_VTABLE(u)->start;
1496 bool unit_can_isolate(Unit *u) {
1499 return unit_can_start(u) &&
1504 * -EBADR: This unit type does not support stopping.
1505 * -EALREADY: Unit is already stopped.
1506 * -EAGAIN: An operation is already in progress. Retry later.
1508 int unit_stop(Unit *u) {
1509 UnitActiveState state;
1515 state = unit_active_state(u);
1516 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1519 following = unit_following(u);
1521 log_unit_debug(u->id, "Redirecting stop request from %s to %s.", u->id, following->id);
1522 return unit_stop(following);
1525 if (!UNIT_VTABLE(u)->stop)
1528 unit_add_to_dbus_queue(u);
1530 r = UNIT_VTABLE(u)->stop(u);
1534 unit_status_log_starting_stopping_reloading(u, JOB_STOP);
1535 unit_status_print_starting_stopping(u, JOB_STOP);
1540 * -EBADR: This unit type does not support reloading.
1541 * -ENOEXEC: Unit is not started.
1542 * -EAGAIN: An operation is already in progress. Retry later.
1544 int unit_reload(Unit *u) {
1545 UnitActiveState state;
1551 if (u->load_state != UNIT_LOADED)
1554 if (!unit_can_reload(u))
1557 state = unit_active_state(u);
1558 if (state == UNIT_RELOADING)
1561 if (state != UNIT_ACTIVE) {
1562 log_unit_warning(u->id, "Unit %s cannot be reloaded because it is inactive.", u->id);
1566 following = unit_following(u);
1568 log_unit_debug(u->id, "Redirecting reload request from %s to %s.", u->id, following->id);
1569 return unit_reload(following);
1572 unit_add_to_dbus_queue(u);
1574 r = UNIT_VTABLE(u)->reload(u);
1578 unit_status_log_starting_stopping_reloading(u, JOB_RELOAD);
1582 bool unit_can_reload(Unit *u) {
1585 if (!UNIT_VTABLE(u)->reload)
1588 if (!UNIT_VTABLE(u)->can_reload)
1591 return UNIT_VTABLE(u)->can_reload(u);
1594 static void unit_check_unneeded(Unit *u) {
1600 /* If this service shall be shut down when unneeded then do
1603 if (!u->stop_when_unneeded)
1606 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1609 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY], i)
1610 if (unit_active_or_pending(other))
1613 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
1614 if (unit_active_or_pending(other))
1617 SET_FOREACH(other, u->dependencies[UNIT_WANTED_BY], i)
1618 if (unit_active_or_pending(other))
1621 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1622 if (unit_active_or_pending(other))
1625 log_unit_info(u->id, "Unit %s is not needed anymore. Stopping.", u->id);
1627 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1628 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1631 static void unit_check_binds_to(Unit *u) {
1641 if (unit_active_state(u) != UNIT_ACTIVE)
1644 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1648 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1657 log_unit_info(u->id, "Unit %s is bound to inactive service. Stopping, too.", u->id);
1659 /* A unit we need to run is gone. Sniff. Let's stop this. */
1660 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1663 static void retroactively_start_dependencies(Unit *u) {
1668 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1670 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1671 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1672 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1673 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1675 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1676 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1677 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1678 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1680 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1681 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1682 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1683 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1685 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1686 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1687 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1688 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1690 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1691 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1692 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1694 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1695 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1696 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1699 static void retroactively_stop_dependencies(Unit *u) {
1704 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1706 /* Pull down units which are bound to us recursively if enabled */
1707 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1708 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1709 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1712 static void check_unneeded_dependencies(Unit *u) {
1717 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1719 /* Garbage collect services that might not be needed anymore, if enabled */
1720 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1721 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1722 unit_check_unneeded(other);
1723 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1724 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1725 unit_check_unneeded(other);
1726 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1727 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1728 unit_check_unneeded(other);
1729 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1730 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1731 unit_check_unneeded(other);
1732 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1733 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1734 unit_check_unneeded(other);
1735 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1736 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1737 unit_check_unneeded(other);
1740 void unit_start_on_failure(Unit *u) {
1746 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1749 log_unit_info(u->id, "Triggering OnFailure= dependencies of %s.", u->id);
1751 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1754 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, true, NULL, NULL);
1756 log_unit_error_errno(u->id, r, "Failed to enqueue OnFailure= job: %m");
1760 void unit_trigger_notify(Unit *u) {
1766 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1767 if (UNIT_VTABLE(other)->trigger_notify)
1768 UNIT_VTABLE(other)->trigger_notify(other, u);
1771 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1776 assert(os < _UNIT_ACTIVE_STATE_MAX);
1777 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1779 /* Note that this is called for all low-level state changes,
1780 * even if they might map to the same high-level
1781 * UnitActiveState! That means that ns == os is an expected
1782 * behavior here. For example: if a mount point is remounted
1783 * this function will be called too! */
1787 /* Update timestamps for state changes */
1788 if (m->n_reloading <= 0) {
1791 dual_timestamp_get(&ts);
1793 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1794 u->inactive_exit_timestamp = ts;
1795 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1796 u->inactive_enter_timestamp = ts;
1798 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1799 u->active_enter_timestamp = ts;
1800 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1801 u->active_exit_timestamp = ts;
1804 /* Keep track of failed units */
1805 if (ns == UNIT_FAILED)
1806 set_put(u->manager->failed_units, u);
1808 set_remove(u->manager->failed_units, u);
1810 /* Make sure the cgroup is always removed when we become inactive */
1811 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1812 unit_destroy_cgroup_if_empty(u);
1814 /* Note that this doesn't apply to RemainAfterExit services exiting
1815 * successfully, since there's no change of state in that case. Which is
1816 * why it is handled in service_set_state() */
1817 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1820 ec = unit_get_exec_context(u);
1821 if (ec && exec_context_may_touch_console(ec)) {
1822 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1825 if (m->n_on_console == 0)
1826 /* unset no_console_output flag, since the console is free */
1827 m->no_console_output = false;
1836 if (u->job->state == JOB_WAITING)
1838 /* So we reached a different state for this
1839 * job. Let's see if we can run it now if it
1840 * failed previously due to EAGAIN. */
1841 job_add_to_run_queue(u->job);
1843 /* Let's check whether this state change constitutes a
1844 * finished job, or maybe contradicts a running job and
1845 * hence needs to invalidate jobs. */
1847 switch (u->job->type) {
1850 case JOB_VERIFY_ACTIVE:
1852 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1853 job_finish_and_invalidate(u->job, JOB_DONE, true);
1854 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1857 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1858 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1864 case JOB_RELOAD_OR_START:
1866 if (u->job->state == JOB_RUNNING) {
1867 if (ns == UNIT_ACTIVE)
1868 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1869 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1872 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1873 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1881 case JOB_TRY_RESTART:
1883 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1884 job_finish_and_invalidate(u->job, JOB_DONE, true);
1885 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1887 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1893 assert_not_reached("Job type unknown");
1899 if (m->n_reloading <= 0) {
1901 /* If this state change happened without being
1902 * requested by a job, then let's retroactively start
1903 * or stop dependencies. We skip that step when
1904 * deserializing, since we don't want to create any
1905 * additional jobs just because something is already
1909 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1910 retroactively_start_dependencies(u);
1911 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1912 retroactively_stop_dependencies(u);
1915 /* stop unneeded units regardless if going down was expected or not */
1916 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1917 check_unneeded_dependencies(u);
1919 if (ns != os && ns == UNIT_FAILED) {
1920 log_unit_notice(u->id, "Unit %s entered failed state.", u->id);
1921 unit_start_on_failure(u);
1925 /* Some names are special */
1926 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1928 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1929 /* The bus might have just become available,
1930 * hence try to connect to it, if we aren't
1934 if (u->type == UNIT_SERVICE &&
1935 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1936 m->n_reloading <= 0) {
1937 /* Write audit record if we have just finished starting up */
1938 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1942 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1943 manager_send_unit_plymouth(m, u);
1947 /* We don't care about D-Bus here, since we'll get an
1948 * asynchronous notification for it anyway. */
1950 if (u->type == UNIT_SERVICE &&
1951 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1952 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1953 m->n_reloading <= 0) {
1955 /* Hmm, if there was no start record written
1956 * write it now, so that we always have a nice
1959 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1961 if (ns == UNIT_INACTIVE)
1962 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1964 /* Write audit record if we have just finished shutting down */
1965 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
1967 u->in_audit = false;
1971 manager_recheck_journal(m);
1972 unit_trigger_notify(u);
1974 if (u->manager->n_reloading <= 0) {
1975 /* Maybe we finished startup and are now ready for
1976 * being stopped because unneeded? */
1977 unit_check_unneeded(u);
1979 /* Maybe we finished startup, but something we needed
1980 * has vanished? Let's die then. (This happens when
1981 * something BindsTo= to a Type=oneshot unit, as these
1982 * units go directly from starting to inactive,
1983 * without ever entering started.) */
1984 unit_check_binds_to(u);
1987 unit_add_to_dbus_queue(u);
1988 unit_add_to_gc_queue(u);
1991 int unit_watch_pid(Unit *u, pid_t pid) {
1997 /* Watch a specific PID. We only support one or two units
1998 * watching each PID for now, not more. */
2000 r = set_ensure_allocated(&u->pids, NULL);
2004 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2008 r = hashmap_put(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2010 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2014 r = hashmap_put(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2017 q = set_put(u->pids, LONG_TO_PTR(pid));
2024 void unit_unwatch_pid(Unit *u, pid_t pid) {
2028 hashmap_remove_value(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2029 hashmap_remove_value(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2030 set_remove(u->pids, LONG_TO_PTR(pid));
2033 void unit_unwatch_all_pids(Unit *u) {
2036 while (!set_isempty(u->pids))
2037 unit_unwatch_pid(u, PTR_TO_LONG(set_first(u->pids)));
2043 static int unit_watch_pids_in_path(Unit *u, const char *path) {
2044 _cleanup_closedir_ DIR *d = NULL;
2045 _cleanup_fclose_ FILE *f = NULL;
2051 /* Adds all PIDs from a specific cgroup path to the set of PIDs we watch. */
2053 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2057 while ((r = cg_read_pid(f, &pid)) > 0) {
2058 r = unit_watch_pid(u, pid);
2059 if (r < 0 && ret >= 0)
2062 if (r < 0 && ret >= 0)
2065 } else if (ret >= 0)
2068 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2072 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2073 _cleanup_free_ char *p = NULL;
2075 p = strjoin(path, "/", fn, NULL);
2081 r = unit_watch_pids_in_path(u, p);
2082 if (r < 0 && ret >= 0)
2085 if (r < 0 && ret >= 0)
2088 } else if (ret >= 0)
2094 int unit_watch_all_pids(Unit *u) {
2097 /* Adds all PIDs from our cgroup to the set of PIDs we watch */
2099 if (!u->cgroup_path)
2102 return unit_watch_pids_in_path(u, u->cgroup_path);
2105 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2111 /* Cleans dead PIDs from our list */
2113 SET_FOREACH(e, u->pids, i) {
2114 pid_t pid = PTR_TO_LONG(e);
2116 if (pid == except1 || pid == except2)
2119 if (!pid_is_unwaited(pid))
2120 unit_unwatch_pid(u, pid);
2124 bool unit_job_is_applicable(Unit *u, JobType j) {
2126 assert(j >= 0 && j < _JOB_TYPE_MAX);
2130 case JOB_VERIFY_ACTIVE:
2137 case JOB_TRY_RESTART:
2138 return unit_can_start(u);
2141 return unit_can_reload(u);
2143 case JOB_RELOAD_OR_START:
2144 return unit_can_reload(u) && unit_can_start(u);
2147 assert_not_reached("Invalid job type");
2151 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency) {
2154 switch (dependency) {
2156 case UNIT_REQUIRES_OVERRIDABLE:
2158 case UNIT_REQUISITE:
2159 case UNIT_REQUISITE_OVERRIDABLE:
2162 case UNIT_REQUIRED_BY:
2163 case UNIT_REQUIRED_BY_OVERRIDABLE:
2164 case UNIT_WANTED_BY:
2166 case UNIT_CONSISTS_OF:
2167 case UNIT_REFERENCES:
2168 case UNIT_REFERENCED_BY:
2169 case UNIT_PROPAGATES_RELOAD_TO:
2170 case UNIT_RELOAD_PROPAGATED_FROM:
2171 case UNIT_JOINS_NAMESPACE_OF:
2174 case UNIT_CONFLICTS:
2175 case UNIT_CONFLICTED_BY:
2178 case UNIT_ON_FAILURE:
2180 case UNIT_TRIGGERED_BY:
2181 if (streq_ptr(id, other))
2182 log_unit_warning(id, "Dependency %s=%s dropped from unit %s",
2183 unit_dependency_to_string(dependency), id, other);
2185 log_unit_warning(id, "Dependency %s=%s dropped from unit %s merged into %s",
2186 unit_dependency_to_string(dependency), id,
2190 case _UNIT_DEPENDENCY_MAX:
2191 case _UNIT_DEPENDENCY_INVALID:
2195 assert_not_reached("Invalid dependency type");
2198 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2200 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2201 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2202 [UNIT_REQUIRES_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2203 [UNIT_WANTS] = UNIT_WANTED_BY,
2204 [UNIT_REQUISITE] = UNIT_REQUIRED_BY,
2205 [UNIT_REQUISITE_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2206 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2207 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2208 [UNIT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
2209 [UNIT_REQUIRED_BY_OVERRIDABLE] = _UNIT_DEPENDENCY_INVALID,
2210 [UNIT_WANTED_BY] = _UNIT_DEPENDENCY_INVALID,
2211 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2212 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2213 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2214 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2215 [UNIT_BEFORE] = UNIT_AFTER,
2216 [UNIT_AFTER] = UNIT_BEFORE,
2217 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2218 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2219 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2220 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2221 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2222 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2223 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2224 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2226 int r, q = 0, v = 0, w = 0;
2227 Unit *orig_u = u, *orig_other = other;
2230 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2233 u = unit_follow_merge(u);
2234 other = unit_follow_merge(other);
2236 /* We won't allow dependencies on ourselves. We will not
2237 * consider them an error however. */
2239 maybe_warn_about_dependency(orig_u->id, orig_other->id, d);
2243 r = set_ensure_allocated(&u->dependencies[d], NULL);
2247 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2248 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2253 if (add_reference) {
2254 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2258 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2263 q = set_put(u->dependencies[d], other);
2267 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2268 v = set_put(other->dependencies[inverse_table[d]], u);
2275 if (add_reference) {
2276 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2282 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2287 unit_add_to_dbus_queue(u);
2292 set_remove(u->dependencies[d], other);
2295 set_remove(other->dependencies[inverse_table[d]], u);
2298 set_remove(u->dependencies[UNIT_REFERENCES], other);
2303 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2308 r = unit_add_dependency(u, d, other, add_reference);
2312 r = unit_add_dependency(u, e, other, add_reference);
2319 static const char *resolve_template(Unit *u, const char *name, const char*path, char **p) {
2323 assert(name || path);
2327 name = basename(path);
2329 if (!unit_name_is_template(name)) {
2335 s = unit_name_replace_instance(name, u->instance);
2337 _cleanup_free_ char *i = NULL;
2339 i = unit_name_to_prefix(u->id);
2343 s = unit_name_replace_instance(name, i);
2353 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2356 _cleanup_free_ char *s = NULL;
2359 assert(name || path);
2361 name = resolve_template(u, name, path, &s);
2365 r = manager_load_unit(u->manager, name, path, NULL, &other);
2369 return unit_add_dependency(u, d, other, add_reference);
2372 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2373 _cleanup_free_ char *s = NULL;
2378 assert(name || path);
2380 name = resolve_template(u, name, path, &s);
2384 r = manager_load_unit(u->manager, name, path, NULL, &other);
2388 return unit_add_two_dependencies(u, d, e, other, add_reference);
2391 int unit_add_dependency_by_name_inverse(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2394 _cleanup_free_ char *s = NULL;
2397 assert(name || path);
2399 name = resolve_template(u, name, path, &s);
2403 r = manager_load_unit(u->manager, name, path, NULL, &other);
2407 return unit_add_dependency(other, d, u, add_reference);
2410 int unit_add_two_dependencies_by_name_inverse(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2413 _cleanup_free_ char *s = NULL;
2416 assert(name || path);
2418 name = resolve_template(u, name, path, &s);
2422 r = manager_load_unit(u->manager, name, path, NULL, &other);
2426 r = unit_add_two_dependencies(other, d, e, u, add_reference);
2433 int set_unit_path(const char *p) {
2434 /* This is mostly for debug purposes */
2435 if (setenv("SYSTEMD_UNIT_PATH", p, 0) < 0)
2441 char *unit_dbus_path(Unit *u) {
2447 return unit_dbus_path_from_name(u->id);
2450 char *unit_default_cgroup_path(Unit *u) {
2451 _cleanup_free_ char *escaped = NULL, *slice = NULL;
2456 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2457 return strdup(u->manager->cgroup_root);
2459 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
2460 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
2465 escaped = cg_escape(u->id);
2470 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
2472 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
2475 int unit_add_default_slice(Unit *u, CGroupContext *c) {
2476 _cleanup_free_ char *b = NULL;
2477 const char *slice_name;
2484 if (UNIT_ISSET(u->slice))
2488 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2490 /* Implicitly place all instantiated units in their
2491 * own per-template slice */
2493 prefix = unit_name_to_prefix(u->id);
2497 /* The prefix is already escaped, but it might include
2498 * "-" which has a special meaning for slice units,
2499 * hence escape it here extra. */
2500 escaped = strreplace(prefix, "-", "\\x2d");
2504 if (u->manager->running_as == SYSTEMD_SYSTEM)
2505 b = strjoin("system-", escaped, ".slice", NULL);
2507 b = strappend(escaped, ".slice");
2514 u->manager->running_as == SYSTEMD_SYSTEM
2515 ? SPECIAL_SYSTEM_SLICE
2516 : SPECIAL_ROOT_SLICE;
2518 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2522 unit_ref_set(&u->slice, slice);
2526 const char *unit_slice_name(Unit *u) {
2529 if (!UNIT_ISSET(u->slice))
2532 return UNIT_DEREF(u->slice)->id;
2535 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2536 _cleanup_free_ char *t = NULL;
2543 t = unit_name_change_suffix(u->id, type);
2547 assert(!unit_has_name(u, t));
2549 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2550 assert(r < 0 || *_found != u);
2554 int unit_watch_bus_name(Unit *u, const char *name) {
2558 /* Watch a specific name on the bus. We only support one unit
2559 * watching each name for now. */
2561 return hashmap_put(u->manager->watch_bus, name, u);
2564 void unit_unwatch_bus_name(Unit *u, const char *name) {
2568 hashmap_remove_value(u->manager->watch_bus, name, u);
2571 bool unit_can_serialize(Unit *u) {
2574 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2577 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2584 if (unit_can_serialize(u)) {
2587 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2591 rt = unit_get_exec_runtime(u);
2593 r = exec_runtime_serialize(rt, u, f, fds);
2599 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2600 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2601 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2602 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2603 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2604 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2606 if (dual_timestamp_is_set(&u->condition_timestamp))
2607 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2609 if (dual_timestamp_is_set(&u->assert_timestamp))
2610 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2612 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2615 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2617 if (serialize_jobs) {
2619 fprintf(f, "job\n");
2620 job_serialize(u->job, f, fds);
2624 fprintf(f, "job\n");
2625 job_serialize(u->nop_job, f, fds);
2634 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2645 va_start(ap, format);
2646 vfprintf(f, format, ap);
2652 void unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2658 fprintf(f, "%s=%s\n", key, value);
2661 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2662 ExecRuntime **rt = NULL;
2670 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2672 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2675 char line[LINE_MAX], *l, *v;
2678 if (!fgets(line, sizeof(line), f)) {
2691 k = strcspn(l, "=");
2699 if (streq(l, "job")) {
2701 /* new-style serialized job */
2708 r = job_deserialize(j, f, fds);
2714 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2720 r = job_install_deserialized(j);
2722 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2730 type = job_type_from_string(v);
2732 log_debug("Failed to parse job type value %s", v);
2734 u->deserialized_job = type;
2737 } else if (streq(l, "inactive-exit-timestamp")) {
2738 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2740 } else if (streq(l, "active-enter-timestamp")) {
2741 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2743 } else if (streq(l, "active-exit-timestamp")) {
2744 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2746 } else if (streq(l, "inactive-enter-timestamp")) {
2747 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2749 } else if (streq(l, "condition-timestamp")) {
2750 dual_timestamp_deserialize(v, &u->condition_timestamp);
2752 } else if (streq(l, "assert-timestamp")) {
2753 dual_timestamp_deserialize(v, &u->assert_timestamp);
2755 } else if (streq(l, "condition-result")) {
2758 b = parse_boolean(v);
2760 log_debug("Failed to parse condition result value %s", v);
2762 u->condition_result = b;
2766 } else if (streq(l, "assert-result")) {
2769 b = parse_boolean(v);
2771 log_debug("Failed to parse assert result value %s", v);
2773 u->assert_result = b;
2777 } else if (streq(l, "transient")) {
2780 b = parse_boolean(v);
2782 log_debug("Failed to parse transient bool %s", v);
2787 } else if (streq(l, "cgroup")) {
2794 if (u->cgroup_path) {
2797 p = hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2798 log_info("Removing cgroup_path %s from hashmap (%p)",
2800 free(u->cgroup_path);
2804 assert(hashmap_put(u->manager->cgroup_unit, s, u) == 1);
2809 if (unit_can_serialize(u)) {
2811 r = exec_runtime_deserialize_item(rt, u, l, v, fds);
2818 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2825 int unit_add_node_link(Unit *u, const char *what, bool wants) {
2827 _cleanup_free_ char *e = NULL;
2835 /* Adds in links to the device node that this unit is based on */
2837 if (!is_device_path(what))
2840 e = unit_name_from_path(what, ".device");
2844 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2849 r = unit_add_two_dependencies(u, UNIT_AFTER, UNIT_BINDS_TO, device, true);
2854 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2862 int unit_coldplug(Unit *u) {
2867 if (UNIT_VTABLE(u)->coldplug)
2868 if ((r = UNIT_VTABLE(u)->coldplug(u)) < 0)
2872 r = job_coldplug(u->job);
2875 } else if (u->deserialized_job >= 0) {
2877 r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL);
2881 u->deserialized_job = _JOB_TYPE_INVALID;
2887 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
2888 DISABLE_WARNING_FORMAT_NONLITERAL;
2889 manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
2890 status, unit_status_msg_format, unit_description(u));
2894 bool unit_need_daemon_reload(Unit *u) {
2895 _cleanup_strv_free_ char **t = NULL;
2898 unsigned loaded_cnt, current_cnt;
2902 if (u->fragment_path) {
2904 if (stat(u->fragment_path, &st) < 0)
2905 /* What, cannot access this anymore? */
2908 if (u->fragment_mtime > 0 &&
2909 timespec_load(&st.st_mtim) != u->fragment_mtime)
2913 if (u->source_path) {
2915 if (stat(u->source_path, &st) < 0)
2918 if (u->source_mtime > 0 &&
2919 timespec_load(&st.st_mtim) != u->source_mtime)
2923 (void) unit_find_dropin_paths(u, &t);
2924 loaded_cnt = strv_length(t);
2925 current_cnt = strv_length(u->dropin_paths);
2927 if (loaded_cnt == current_cnt) {
2928 if (loaded_cnt == 0)
2931 if (strv_overlap(u->dropin_paths, t)) {
2932 STRV_FOREACH(path, u->dropin_paths) {
2934 if (stat(*path, &st) < 0)
2937 if (u->dropin_mtime > 0 &&
2938 timespec_load(&st.st_mtim) > u->dropin_mtime)
2949 void unit_reset_failed(Unit *u) {
2952 if (UNIT_VTABLE(u)->reset_failed)
2953 UNIT_VTABLE(u)->reset_failed(u);
2956 Unit *unit_following(Unit *u) {
2959 if (UNIT_VTABLE(u)->following)
2960 return UNIT_VTABLE(u)->following(u);
2965 bool unit_stop_pending(Unit *u) {
2968 /* This call does check the current state of the unit. It's
2969 * hence useful to be called from state change calls of the
2970 * unit itself, where the state isn't updated yet. This is
2971 * different from unit_inactive_or_pending() which checks both
2972 * the current state and for a queued job. */
2974 return u->job && u->job->type == JOB_STOP;
2977 bool unit_inactive_or_pending(Unit *u) {
2980 /* Returns true if the unit is inactive or going down */
2982 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
2985 if (unit_stop_pending(u))
2991 bool unit_active_or_pending(Unit *u) {
2994 /* Returns true if the unit is active or going up */
2996 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3000 (u->job->type == JOB_START ||
3001 u->job->type == JOB_RELOAD_OR_START ||
3002 u->job->type == JOB_RESTART))
3008 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3010 assert(w >= 0 && w < _KILL_WHO_MAX);
3012 assert(signo < _NSIG);
3014 if (!UNIT_VTABLE(u)->kill)
3017 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3020 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3024 pid_set = set_new(NULL);
3028 /* Exclude the main/control pids from being killed via the cgroup */
3030 r = set_put(pid_set, LONG_TO_PTR(main_pid));
3035 if (control_pid > 0) {
3036 r = set_put(pid_set, LONG_TO_PTR(control_pid));
3048 int unit_kill_common(
3054 sd_bus_error *error) {
3058 if (who == KILL_MAIN && main_pid <= 0) {
3060 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3062 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3065 if (who == KILL_CONTROL && control_pid <= 0) {
3066 if (control_pid < 0)
3067 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3069 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3072 if (who == KILL_CONTROL || who == KILL_ALL)
3073 if (control_pid > 0)
3074 if (kill(control_pid, signo) < 0)
3077 if (who == KILL_MAIN || who == KILL_ALL)
3079 if (kill(main_pid, signo) < 0)
3082 if (who == KILL_ALL && u->cgroup_path) {
3083 _cleanup_set_free_ Set *pid_set = NULL;
3086 /* Exclude the main/control pids from being killed via the cgroup */
3087 pid_set = unit_pid_set(main_pid, control_pid);
3091 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, true, false, pid_set);
3092 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3099 int unit_following_set(Unit *u, Set **s) {
3103 if (UNIT_VTABLE(u)->following_set)
3104 return UNIT_VTABLE(u)->following_set(u, s);
3110 UnitFileState unit_get_unit_file_state(Unit *u) {
3113 if (u->unit_file_state < 0 && u->fragment_path)
3114 u->unit_file_state = unit_file_get_state(
3115 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3116 NULL, basename(u->fragment_path));
3118 return u->unit_file_state;
3121 int unit_get_unit_file_preset(Unit *u) {
3124 if (u->unit_file_preset < 0 && u->fragment_path)
3125 u->unit_file_preset = unit_file_query_preset(
3126 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3127 NULL, basename(u->fragment_path));
3129 return u->unit_file_preset;
3132 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3137 unit_ref_unset(ref);
3140 LIST_PREPEND(refs, u->refs, ref);
3144 void unit_ref_unset(UnitRef *ref) {
3150 LIST_REMOVE(refs, ref->unit->refs, ref);
3154 int unit_patch_contexts(Unit *u) {
3162 /* Patch in the manager defaults into the exec and cgroup
3163 * contexts, _after_ the rest of the settings have been
3166 ec = unit_get_exec_context(u);
3168 /* This only copies in the ones that need memory */
3169 for (i = 0; i < _RLIMIT_MAX; i++)
3170 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3171 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3176 if (u->manager->running_as == SYSTEMD_USER &&
3177 !ec->working_directory) {
3179 r = get_home_dir(&ec->working_directory);
3184 if (u->manager->running_as == SYSTEMD_USER &&
3185 (ec->syscall_whitelist ||
3186 !set_isempty(ec->syscall_filter) ||
3187 !set_isempty(ec->syscall_archs) ||
3188 ec->address_families_whitelist ||
3189 !set_isempty(ec->address_families)))
3190 ec->no_new_privileges = true;
3192 if (ec->private_devices)
3193 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3196 cc = unit_get_cgroup_context(u);
3200 ec->private_devices &&
3201 cc->device_policy == CGROUP_AUTO)
3202 cc->device_policy = CGROUP_CLOSED;
3208 ExecContext *unit_get_exec_context(Unit *u) {
3215 offset = UNIT_VTABLE(u)->exec_context_offset;
3219 return (ExecContext*) ((uint8_t*) u + offset);
3222 KillContext *unit_get_kill_context(Unit *u) {
3229 offset = UNIT_VTABLE(u)->kill_context_offset;
3233 return (KillContext*) ((uint8_t*) u + offset);
3236 CGroupContext *unit_get_cgroup_context(Unit *u) {
3242 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3246 return (CGroupContext*) ((uint8_t*) u + offset);
3249 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3255 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3259 return *(ExecRuntime**) ((uint8_t*) u + offset);
3262 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3263 if (u->manager->running_as == SYSTEMD_USER) {
3266 if (mode == UNIT_PERSISTENT && !transient)
3267 r = user_config_home(dir);
3269 r = user_runtime_dir(dir);
3276 if (mode == UNIT_PERSISTENT && !transient)
3277 *dir = strdup("/etc/systemd/system");
3279 *dir = strdup("/run/systemd/system");
3286 static int unit_drop_in_file(Unit *u,
3287 UnitSetPropertiesMode mode, const char *name, char **p, char **q) {
3288 _cleanup_free_ char *dir = NULL;
3293 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3297 return drop_in_file(dir, u->id, 50, name, p, q);
3300 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3302 _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
3307 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3310 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3314 r = write_drop_in(dir, u->id, 50, name, data);
3318 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3322 r = strv_extend(&u->dropin_paths, q);
3326 strv_sort(u->dropin_paths);
3327 strv_uniq(u->dropin_paths);
3329 u->dropin_mtime = now(CLOCK_REALTIME);
3334 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3335 _cleanup_free_ char *p = NULL;
3343 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3346 va_start(ap, format);
3347 r = vasprintf(&p, format, ap);
3353 return unit_write_drop_in(u, mode, name, p);
3356 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3357 _cleanup_free_ char *ndata = NULL;
3363 if (!UNIT_VTABLE(u)->private_section)
3366 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3369 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3373 return unit_write_drop_in(u, mode, name, ndata);
3376 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3377 _cleanup_free_ char *p = NULL;
3385 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3388 va_start(ap, format);
3389 r = vasprintf(&p, format, ap);
3395 return unit_write_drop_in_private(u, mode, name, p);
3398 int unit_remove_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name) {
3399 _cleanup_free_ char *p = NULL, *q = NULL;
3404 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3407 r = unit_drop_in_file(u, mode, name, &p, &q);
3412 r = errno == ENOENT ? 0 : -errno;
3420 int unit_make_transient(Unit *u) {
3425 u->load_state = UNIT_STUB;
3427 u->transient = true;
3429 free(u->fragment_path);
3430 u->fragment_path = NULL;
3432 if (u->manager->running_as == SYSTEMD_USER) {
3433 _cleanup_free_ char *c = NULL;
3435 r = user_runtime_dir(&c);
3441 u->fragment_path = strjoin(c, "/", u->id, NULL);
3442 if (!u->fragment_path)
3447 u->fragment_path = strappend("/run/systemd/system/", u->id);
3448 if (!u->fragment_path)
3451 mkdir_p("/run/systemd/system", 0755);
3454 return write_string_file_atomic_label(u->fragment_path, "# Transient stub");
3457 int unit_kill_context(
3463 bool main_pid_alien) {
3465 int sig, wait_for_exit = false, r;
3470 if (c->kill_mode == KILL_NONE)
3480 case KILL_TERMINATE:
3481 sig = c->kill_signal;
3484 assert_not_reached("KillOperation unknown");
3488 r = kill_and_sigcont(main_pid, sig);
3490 if (r < 0 && r != -ESRCH) {
3491 _cleanup_free_ char *comm = NULL;
3492 get_process_comm(main_pid, &comm);
3494 log_unit_warning_errno(u->id, r, "Failed to kill main process " PID_FMT " (%s): %m", main_pid, strna(comm));
3496 if (!main_pid_alien)
3497 wait_for_exit = true;
3499 if (c->send_sighup && k != KILL_KILL)
3500 kill(main_pid, SIGHUP);
3504 if (control_pid > 0) {
3505 r = kill_and_sigcont(control_pid, sig);
3507 if (r < 0 && r != -ESRCH) {
3508 _cleanup_free_ char *comm = NULL;
3509 get_process_comm(control_pid, &comm);
3511 log_unit_warning_errno(u->id, r, "Failed to kill control process " PID_FMT " (%s): %m", control_pid, strna(comm));
3513 wait_for_exit = true;
3515 if (c->send_sighup && k != KILL_KILL)
3516 kill(control_pid, SIGHUP);
3520 if ((c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL)) && u->cgroup_path) {
3521 _cleanup_set_free_ Set *pid_set = NULL;
3523 /* Exclude the main/control pids from being killed via the cgroup */
3524 pid_set = unit_pid_set(main_pid, control_pid);
3528 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, true, false, pid_set);
3530 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3531 log_unit_warning_errno(u->id, r, "Failed to kill control group: %m");
3534 /* FIXME: For now, we will not wait for the
3535 * cgroup members to die, simply because
3536 * cgroup notification is unreliable. It
3537 * doesn't work at all in containers, and
3538 * outside of containers it can be confused
3539 * easily by leaving directories in the
3542 /* wait_for_exit = true; */
3544 if (c->send_sighup && k != KILL_KILL) {
3547 pid_set = unit_pid_set(main_pid, control_pid);
3551 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3556 return wait_for_exit;
3559 int unit_require_mounts_for(Unit *u, const char *path) {
3560 char prefix[strlen(path) + 1], *p;
3566 /* Registers a unit for requiring a certain path and all its
3567 * prefixes. We keep a simple array of these paths in the
3568 * unit, since its usually short. However, we build a prefix
3569 * table for all possible prefixes so that new appearing mount
3570 * units can easily determine which units to make themselves a
3573 if (!path_is_absolute(path))
3580 path_kill_slashes(p);
3582 if (!path_is_safe(p)) {
3587 if (strv_contains(u->requires_mounts_for, p)) {
3592 r = strv_consume(&u->requires_mounts_for, p);
3596 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3599 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3603 if (!u->manager->units_requiring_mounts_for) {
3604 u->manager->units_requiring_mounts_for = hashmap_new(&string_hash_ops);
3605 if (!u->manager->units_requiring_mounts_for)
3619 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3635 int unit_setup_exec_runtime(Unit *u) {
3641 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3644 /* Check if there already is an ExecRuntime for this unit? */
3645 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3649 /* Try to get it from somebody else */
3650 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3652 *rt = unit_get_exec_runtime(other);
3654 exec_runtime_ref(*rt);
3659 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3662 static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
3663 [UNIT_ACTIVE] = "active",
3664 [UNIT_RELOADING] = "reloading",
3665 [UNIT_INACTIVE] = "inactive",
3666 [UNIT_FAILED] = "failed",
3667 [UNIT_ACTIVATING] = "activating",
3668 [UNIT_DEACTIVATING] = "deactivating"
3671 DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState);