1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
26 #include <sys/timerfd.h>
33 #include "sd-messages.h"
38 #include "path-util.h"
39 #include "load-fragment.h"
40 #include "load-dropin.h"
42 #include "unit-name.h"
43 #include "dbus-unit.h"
45 #include "cgroup-util.h"
49 #include "fileio-label.h"
50 #include "bus-errors.h"
56 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
57 [UNIT_SERVICE] = &service_vtable,
58 [UNIT_SOCKET] = &socket_vtable,
59 [UNIT_BUSNAME] = &busname_vtable,
60 [UNIT_TARGET] = &target_vtable,
61 [UNIT_SNAPSHOT] = &snapshot_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable
72 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency);
74 Unit *unit_new(Manager *m, size_t size) {
78 assert(size >= sizeof(Unit));
84 u->names = set_new(&string_hash_ops);
91 u->type = _UNIT_TYPE_INVALID;
92 u->deserialized_job = _JOB_TYPE_INVALID;
93 u->default_dependencies = true;
94 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
95 u->on_failure_job_mode = JOB_REPLACE;
100 bool unit_has_name(Unit *u, const char *name) {
104 return !!set_get(u->names, (char*) name);
107 static void unit_init(Unit *u) {
114 assert(u->type >= 0);
116 cc = unit_get_cgroup_context(u);
118 cgroup_context_init(cc);
120 /* Copy in the manager defaults into the cgroup
121 * context, _before_ the rest of the settings have
122 * been initialized */
124 cc->cpu_accounting = u->manager->default_cpu_accounting;
125 cc->blockio_accounting = u->manager->default_blockio_accounting;
126 cc->memory_accounting = u->manager->default_memory_accounting;
129 ec = unit_get_exec_context(u);
131 exec_context_init(ec);
133 kc = unit_get_kill_context(u);
135 kill_context_init(kc);
137 if (UNIT_VTABLE(u)->init)
138 UNIT_VTABLE(u)->init(u);
141 int unit_add_name(Unit *u, const char *text) {
142 _cleanup_free_ char *s = NULL, *i = NULL;
149 if (unit_name_is_template(text)) {
154 s = unit_name_replace_instance(text, u->instance);
160 if (!unit_name_is_valid(s, TEMPLATE_INVALID))
163 assert_se((t = unit_name_to_type(s)) >= 0);
165 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
168 r = unit_name_to_instance(s, &i);
172 if (i && unit_vtable[t]->no_instances)
175 /* Ensure that this unit is either instanced or not instanced,
177 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
180 if (unit_vtable[t]->no_alias &&
181 !set_isempty(u->names) &&
182 !set_get(u->names, s))
185 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
188 r = set_put(u->names, s);
196 r = hashmap_put(u->manager->units, s, u);
198 set_remove(u->names, s);
202 if (u->type == _UNIT_TYPE_INVALID) {
207 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
216 unit_add_to_dbus_queue(u);
220 int unit_choose_id(Unit *u, const char *name) {
221 _cleanup_free_ char *t = NULL;
228 if (unit_name_is_template(name)) {
233 t = unit_name_replace_instance(name, u->instance);
240 /* Selects one of the names of this unit as the id */
241 s = set_get(u->names, (char*) name);
245 r = unit_name_to_instance(s, &i);
254 unit_add_to_dbus_queue(u);
259 int unit_set_description(Unit *u, const char *description) {
264 if (isempty(description))
267 s = strdup(description);
272 free(u->description);
275 unit_add_to_dbus_queue(u);
279 bool unit_check_gc(Unit *u) {
282 if (UNIT_VTABLE(u)->no_gc)
294 if (unit_active_state(u) != UNIT_INACTIVE)
300 if (UNIT_VTABLE(u)->check_gc)
301 if (UNIT_VTABLE(u)->check_gc(u))
307 void unit_add_to_load_queue(Unit *u) {
309 assert(u->type != _UNIT_TYPE_INVALID);
311 if (u->load_state != UNIT_STUB || u->in_load_queue)
314 LIST_PREPEND(load_queue, u->manager->load_queue, u);
315 u->in_load_queue = true;
318 void unit_add_to_cleanup_queue(Unit *u) {
321 if (u->in_cleanup_queue)
324 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
325 u->in_cleanup_queue = true;
328 void unit_add_to_gc_queue(Unit *u) {
331 if (u->in_gc_queue || u->in_cleanup_queue)
334 if (unit_check_gc(u))
337 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
338 u->in_gc_queue = true;
340 u->manager->n_in_gc_queue ++;
343 void unit_add_to_dbus_queue(Unit *u) {
345 assert(u->type != _UNIT_TYPE_INVALID);
347 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
350 /* Shortcut things if nobody cares */
351 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
352 set_isempty(u->manager->private_buses)) {
353 u->sent_dbus_new_signal = true;
357 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
358 u->in_dbus_queue = true;
361 static void bidi_set_free(Unit *u, Set *s) {
367 /* Frees the set and makes sure we are dropped from the
368 * inverse pointers */
370 SET_FOREACH(other, s, i) {
373 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
374 set_remove(other->dependencies[d], u);
376 unit_add_to_gc_queue(other);
382 static void unit_remove_transient(Unit *u) {
390 if (u->fragment_path)
391 unlink(u->fragment_path);
393 STRV_FOREACH(i, u->dropin_paths) {
394 _cleanup_free_ char *p = NULL;
399 r = path_get_parent(*i, &p);
405 static void unit_free_requires_mounts_for(Unit *u) {
408 STRV_FOREACH(j, u->requires_mounts_for) {
409 char s[strlen(*j) + 1];
411 PATH_FOREACH_PREFIX_MORE(s, *j) {
415 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
421 if (set_isempty(x)) {
422 hashmap_remove(u->manager->units_requiring_mounts_for, y);
429 strv_free(u->requires_mounts_for);
430 u->requires_mounts_for = NULL;
433 static void unit_done(Unit *u) {
442 if (UNIT_VTABLE(u)->done)
443 UNIT_VTABLE(u)->done(u);
445 ec = unit_get_exec_context(u);
447 exec_context_done(ec);
449 cc = unit_get_cgroup_context(u);
451 cgroup_context_done(cc);
454 void unit_free(Unit *u) {
461 if (u->manager->n_reloading <= 0)
462 unit_remove_transient(u);
464 bus_unit_send_removed_signal(u);
468 unit_free_requires_mounts_for(u);
470 SET_FOREACH(t, u->names, i)
471 hashmap_remove_value(u->manager->units, t, u);
485 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
486 bidi_set_free(u, u->dependencies[d]);
488 if (u->type != _UNIT_TYPE_INVALID)
489 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
491 if (u->in_load_queue)
492 LIST_REMOVE(load_queue, u->manager->load_queue, u);
494 if (u->in_dbus_queue)
495 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
497 if (u->in_cleanup_queue)
498 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
500 if (u->in_gc_queue) {
501 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
502 u->manager->n_in_gc_queue--;
505 if (u->in_cgroup_queue)
506 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
508 if (u->cgroup_path) {
509 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
510 free(u->cgroup_path);
513 set_remove(u->manager->failed_units, u);
514 set_remove(u->manager->startup_units, u);
516 free(u->description);
517 strv_free(u->documentation);
518 free(u->fragment_path);
519 free(u->source_path);
520 strv_free(u->dropin_paths);
523 free(u->job_timeout_reboot_arg);
525 set_free_free(u->names);
527 unit_unwatch_all_pids(u);
529 condition_free_list(u->conditions);
530 condition_free_list(u->asserts);
532 unit_ref_unset(&u->slice);
535 unit_ref_unset(u->refs);
540 UnitActiveState unit_active_state(Unit *u) {
543 if (u->load_state == UNIT_MERGED)
544 return unit_active_state(unit_follow_merge(u));
546 /* After a reload it might happen that a unit is not correctly
547 * loaded but still has a process around. That's why we won't
548 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
550 return UNIT_VTABLE(u)->active_state(u);
553 const char* unit_sub_state_to_string(Unit *u) {
556 return UNIT_VTABLE(u)->sub_state_to_string(u);
559 static int complete_move(Set **s, Set **other) {
569 r = set_move(*s, *other);
580 static int merge_names(Unit *u, Unit *other) {
588 r = complete_move(&u->names, &other->names);
592 set_free_free(other->names);
596 SET_FOREACH(t, u->names, i)
597 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
602 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
607 assert(d < _UNIT_DEPENDENCY_MAX);
610 * If u does not have this dependency set allocated, there is no need
611 * to reserve anything. In that case other's set will be transfered
612 * as a whole to u by complete_move().
614 if (!u->dependencies[d])
617 /* merge_dependencies() will skip a u-on-u dependency */
618 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
620 return set_reserve(u->dependencies[d], n_reserve);
623 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
630 assert(d < _UNIT_DEPENDENCY_MAX);
632 /* Fix backwards pointers */
633 SET_FOREACH(back, other->dependencies[d], i) {
636 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
637 /* Do not add dependencies between u and itself */
639 if (set_remove(back->dependencies[k], other))
640 maybe_warn_about_dependency(u->id, other_id, k);
642 r = set_remove_and_put(back->dependencies[k], other, u);
644 set_remove(back->dependencies[k], other);
646 assert(r >= 0 || r == -ENOENT);
651 /* Also do not move dependencies on u to itself */
652 back = set_remove(other->dependencies[d], u);
654 maybe_warn_about_dependency(u->id, other_id, d);
656 /* The move cannot fail. The caller must have performed a reservation. */
657 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
659 set_free(other->dependencies[d]);
660 other->dependencies[d] = NULL;
663 int unit_merge(Unit *u, Unit *other) {
665 const char *other_id = NULL;
670 assert(u->manager == other->manager);
671 assert(u->type != _UNIT_TYPE_INVALID);
673 other = unit_follow_merge(other);
678 if (u->type != other->type)
681 if (!u->instance != !other->instance)
684 if (other->load_state != UNIT_STUB &&
685 other->load_state != UNIT_NOT_FOUND)
694 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
698 other_id = strdupa(other->id);
700 /* Make reservations to ensure merge_dependencies() won't fail */
701 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
702 r = reserve_dependencies(u, other, d);
704 * We don't rollback reservations if we fail. We don't have
705 * a way to undo reservations. A reservation is not a leak.
712 r = merge_names(u, other);
716 /* Redirect all references */
718 unit_ref_set(other->refs, u);
720 /* Merge dependencies */
721 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
722 merge_dependencies(u, other, other_id, d);
724 other->load_state = UNIT_MERGED;
725 other->merged_into = u;
727 /* If there is still some data attached to the other node, we
728 * don't need it anymore, and can free it. */
729 if (other->load_state != UNIT_STUB)
730 if (UNIT_VTABLE(other)->done)
731 UNIT_VTABLE(other)->done(other);
733 unit_add_to_dbus_queue(u);
734 unit_add_to_cleanup_queue(other);
739 int unit_merge_by_name(Unit *u, const char *name) {
742 _cleanup_free_ char *s = NULL;
747 if (unit_name_is_template(name)) {
751 s = unit_name_replace_instance(name, u->instance);
758 other = manager_get_unit(u->manager, name);
760 r = unit_add_name(u, name);
762 r = unit_merge(u, other);
767 Unit* unit_follow_merge(Unit *u) {
770 while (u->load_state == UNIT_MERGED)
771 assert_se(u = u->merged_into);
776 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
782 if (c->working_directory) {
783 r = unit_require_mounts_for(u, c->working_directory);
788 if (c->root_directory) {
789 r = unit_require_mounts_for(u, c->root_directory);
794 if (u->manager->running_as != SYSTEMD_SYSTEM)
797 if (c->private_tmp) {
798 r = unit_require_mounts_for(u, "/tmp");
802 r = unit_require_mounts_for(u, "/var/tmp");
807 if (c->std_output != EXEC_OUTPUT_KMSG &&
808 c->std_output != EXEC_OUTPUT_SYSLOG &&
809 c->std_output != EXEC_OUTPUT_JOURNAL &&
810 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
811 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
812 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
813 c->std_error != EXEC_OUTPUT_KMSG &&
814 c->std_error != EXEC_OUTPUT_SYSLOG &&
815 c->std_error != EXEC_OUTPUT_JOURNAL &&
816 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
817 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
818 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
821 /* If syslog or kernel logging is requested, make sure our own
822 * logging daemon is run first. */
824 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
831 const char *unit_description(Unit *u) {
835 return u->description;
840 void unit_dump(Unit *u, FILE *f, const char *prefix) {
846 timestamp1[FORMAT_TIMESTAMP_MAX],
847 timestamp2[FORMAT_TIMESTAMP_MAX],
848 timestamp3[FORMAT_TIMESTAMP_MAX],
849 timestamp4[FORMAT_TIMESTAMP_MAX],
850 timespan[FORMAT_TIMESPAN_MAX];
852 _cleanup_set_free_ Set *following_set = NULL;
856 assert(u->type >= 0);
858 prefix = strempty(prefix);
859 prefix2 = strappenda(prefix, "\t");
863 "%s\tDescription: %s\n"
865 "%s\tUnit Load State: %s\n"
866 "%s\tUnit Active State: %s\n"
867 "%s\tInactive Exit Timestamp: %s\n"
868 "%s\tActive Enter Timestamp: %s\n"
869 "%s\tActive Exit Timestamp: %s\n"
870 "%s\tInactive Enter Timestamp: %s\n"
871 "%s\tGC Check Good: %s\n"
872 "%s\tNeed Daemon Reload: %s\n"
873 "%s\tTransient: %s\n"
876 "%s\tCGroup realized: %s\n"
877 "%s\tCGroup mask: 0x%x\n"
878 "%s\tCGroup members mask: 0x%x\n",
880 prefix, unit_description(u),
881 prefix, strna(u->instance),
882 prefix, unit_load_state_to_string(u->load_state),
883 prefix, unit_active_state_to_string(unit_active_state(u)),
884 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
885 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
886 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
887 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
888 prefix, yes_no(unit_check_gc(u)),
889 prefix, yes_no(unit_need_daemon_reload(u)),
890 prefix, yes_no(u->transient),
891 prefix, strna(unit_slice_name(u)),
892 prefix, strna(u->cgroup_path),
893 prefix, yes_no(u->cgroup_realized),
894 prefix, u->cgroup_realized_mask,
895 prefix, u->cgroup_members_mask);
897 SET_FOREACH(t, u->names, i)
898 fprintf(f, "%s\tName: %s\n", prefix, t);
900 STRV_FOREACH(j, u->documentation)
901 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
903 following = unit_following(u);
905 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
907 r = unit_following_set(u, &following_set);
911 SET_FOREACH(other, following_set, i)
912 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
915 if (u->fragment_path)
916 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
919 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
921 STRV_FOREACH(j, u->dropin_paths)
922 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
924 if (u->job_timeout > 0)
925 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
927 if (u->job_timeout_action != FAILURE_ACTION_NONE)
928 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
930 if (u->job_timeout_reboot_arg)
931 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
933 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
934 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
936 if (dual_timestamp_is_set(&u->condition_timestamp))
938 "%s\tCondition Timestamp: %s\n"
939 "%s\tCondition Result: %s\n",
940 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
941 prefix, yes_no(u->condition_result));
943 if (dual_timestamp_is_set(&u->assert_timestamp))
945 "%s\tAssert Timestamp: %s\n"
946 "%s\tAssert Result: %s\n",
947 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
948 prefix, yes_no(u->assert_result));
950 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
953 SET_FOREACH(other, u->dependencies[d], i)
954 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
957 if (!strv_isempty(u->requires_mounts_for)) {
959 "%s\tRequiresMountsFor:", prefix);
961 STRV_FOREACH(j, u->requires_mounts_for)
962 fprintf(f, " %s", *j);
967 if (u->load_state == UNIT_LOADED) {
970 "%s\tStopWhenUnneeded: %s\n"
971 "%s\tRefuseManualStart: %s\n"
972 "%s\tRefuseManualStop: %s\n"
973 "%s\tDefaultDependencies: %s\n"
974 "%s\tOnFailureJobMode: %s\n"
975 "%s\tIgnoreOnIsolate: %s\n"
976 "%s\tIgnoreOnSnapshot: %s\n",
977 prefix, yes_no(u->stop_when_unneeded),
978 prefix, yes_no(u->refuse_manual_start),
979 prefix, yes_no(u->refuse_manual_stop),
980 prefix, yes_no(u->default_dependencies),
981 prefix, job_mode_to_string(u->on_failure_job_mode),
982 prefix, yes_no(u->ignore_on_isolate),
983 prefix, yes_no(u->ignore_on_snapshot));
985 if (UNIT_VTABLE(u)->dump)
986 UNIT_VTABLE(u)->dump(u, f, prefix2);
988 } else if (u->load_state == UNIT_MERGED)
990 "%s\tMerged into: %s\n",
991 prefix, u->merged_into->id);
992 else if (u->load_state == UNIT_ERROR)
993 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
997 job_dump(u->job, f, prefix2);
1000 job_dump(u->nop_job, f, prefix2);
1004 /* Common implementation for multiple backends */
1005 int unit_load_fragment_and_dropin(Unit *u) {
1010 /* Load a .{service,socket,...} file */
1011 r = unit_load_fragment(u);
1015 if (u->load_state == UNIT_STUB)
1018 /* Load drop-in directory data */
1019 r = unit_load_dropin(unit_follow_merge(u));
1026 /* Common implementation for multiple backends */
1027 int unit_load_fragment_and_dropin_optional(Unit *u) {
1032 /* Same as unit_load_fragment_and_dropin(), but whether
1033 * something can be loaded or not doesn't matter. */
1035 /* Load a .service file */
1036 r = unit_load_fragment(u);
1040 if (u->load_state == UNIT_STUB)
1041 u->load_state = UNIT_LOADED;
1043 /* Load drop-in directory data */
1044 r = unit_load_dropin(unit_follow_merge(u));
1051 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1055 if (target->type != UNIT_TARGET)
1058 /* Only add the dependency if both units are loaded, so that
1059 * that loop check below is reliable */
1060 if (u->load_state != UNIT_LOADED ||
1061 target->load_state != UNIT_LOADED)
1064 /* If either side wants no automatic dependencies, then let's
1066 if (!u->default_dependencies ||
1067 !target->default_dependencies)
1070 /* Don't create loops */
1071 if (set_get(target->dependencies[UNIT_BEFORE], u))
1074 return unit_add_dependency(target, UNIT_AFTER, u, true);
1077 static int unit_add_target_dependencies(Unit *u) {
1079 static const UnitDependency deps[] = {
1081 UNIT_REQUIRED_BY_OVERRIDABLE,
1093 for (k = 0; k < ELEMENTSOF(deps); k++)
1094 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1095 r = unit_add_default_target_dependency(u, target);
1103 static int unit_add_slice_dependencies(Unit *u) {
1106 if (!unit_get_cgroup_context(u))
1109 if (UNIT_ISSET(u->slice))
1110 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_WANTS, UNIT_DEREF(u->slice), true);
1112 if (streq(u->id, SPECIAL_ROOT_SLICE))
1115 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, SPECIAL_ROOT_SLICE, NULL, true);
1118 static int unit_add_mount_dependencies(Unit *u) {
1124 STRV_FOREACH(i, u->requires_mounts_for) {
1125 char prefix[strlen(*i) + 1];
1127 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1130 r = manager_get_unit_by_path(u->manager, prefix, ".mount", &m);
1138 if (m->load_state != UNIT_LOADED)
1141 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1145 if (m->fragment_path) {
1146 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1156 static int unit_add_startup_units(Unit *u) {
1160 c = unit_get_cgroup_context(u);
1164 if (c->startup_cpu_shares == (unsigned long) -1 &&
1165 c->startup_blockio_weight == (unsigned long) -1)
1168 r = set_put(u->manager->startup_units, u);
1175 int unit_load(Unit *u) {
1180 if (u->in_load_queue) {
1181 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1182 u->in_load_queue = false;
1185 if (u->type == _UNIT_TYPE_INVALID)
1188 if (u->load_state != UNIT_STUB)
1191 if (UNIT_VTABLE(u)->load) {
1192 r = UNIT_VTABLE(u)->load(u);
1197 if (u->load_state == UNIT_STUB) {
1202 if (u->load_state == UNIT_LOADED) {
1204 r = unit_add_target_dependencies(u);
1208 r = unit_add_slice_dependencies(u);
1212 r = unit_add_mount_dependencies(u);
1216 r = unit_add_startup_units(u);
1220 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1221 log_unit_error(u->id, "More than one OnFailure= dependencies specified for %s but OnFailureJobMode=isolate set. Refusing.", u->id);
1226 unit_update_cgroup_members_masks(u);
1229 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1231 unit_add_to_dbus_queue(unit_follow_merge(u));
1232 unit_add_to_gc_queue(u);
1237 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1239 unit_add_to_dbus_queue(u);
1240 unit_add_to_gc_queue(u);
1242 log_unit_debug(u->id, "Failed to load configuration for %s: %s",
1243 u->id, strerror(-r));
1248 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1255 /* If the condition list is empty, then it is true */
1259 /* Otherwise, if all of the non-trigger conditions apply and
1260 * if any of the trigger conditions apply (unless there are
1261 * none) we return true */
1262 LIST_FOREACH(conditions, c, first) {
1265 r = condition_test(c);
1267 log_unit_warning(u->id,
1268 "Couldn't determine result for %s=%s%s%s for %s, assuming failed: %s",
1270 c->trigger ? "|" : "",
1271 c->negate ? "!" : "",
1276 log_unit_debug(u->id,
1277 "%s=%s%s%s %s for %s.",
1279 c->trigger ? "|" : "",
1280 c->negate ? "!" : "",
1282 condition_result_to_string(c->result),
1285 if (!c->trigger && r <= 0)
1288 if (c->trigger && triggered <= 0)
1292 return triggered != 0;
1295 static bool unit_condition_test(Unit *u) {
1298 dual_timestamp_get(&u->condition_timestamp);
1299 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1301 return u->condition_result;
1304 static bool unit_assert_test(Unit *u) {
1307 dual_timestamp_get(&u->assert_timestamp);
1308 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1310 return u->assert_result;
1313 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1314 const UnitStatusMessageFormats *format_table;
1318 assert(t < _JOB_TYPE_MAX);
1320 if (t != JOB_START && t != JOB_STOP)
1323 format_table = &UNIT_VTABLE(u)->status_message_formats;
1327 return format_table->starting_stopping[t == JOB_STOP];
1330 _pure_ static const char *unit_get_status_message_format_try_harder(Unit *u, JobType t) {
1335 assert(t < _JOB_TYPE_MAX);
1337 format = unit_get_status_message_format(u, t);
1341 /* Return generic strings */
1343 return "Starting %s.";
1344 else if (t == JOB_STOP)
1345 return "Stopping %s.";
1346 else if (t == JOB_RELOAD)
1347 return "Reloading %s.";
1352 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1357 /* We only print status messages for selected units on
1358 * selected operations. */
1360 format = unit_get_status_message_format(u, t);
1364 DISABLE_WARNING_FORMAT_NONLITERAL;
1365 unit_status_printf(u, "", format);
1369 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1376 if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
1379 if (log_on_console())
1382 /* We log status messages for all units and all operations. */
1384 format = unit_get_status_message_format_try_harder(u, t);
1388 DISABLE_WARNING_FORMAT_NONLITERAL;
1389 snprintf(buf, sizeof(buf), format, unit_description(u));
1393 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1394 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1395 SD_MESSAGE_UNIT_RELOADING;
1397 log_unit_struct(u->id,
1399 LOG_MESSAGE_ID(mid),
1400 LOG_MESSAGE("%s", buf),
1405 * -EBADR: This unit type does not support starting.
1406 * -EALREADY: Unit is already started.
1407 * -EAGAIN: An operation is already in progress. Retry later.
1408 * -ECANCELED: Too many requests for now.
1409 * -EPROTO: Assert failed
1411 int unit_start(Unit *u) {
1412 UnitActiveState state;
1417 if (u->load_state != UNIT_LOADED)
1420 /* If this is already started, then this will succeed. Note
1421 * that this will even succeed if this unit is not startable
1422 * by the user. This is relied on to detect when we need to
1423 * wait for units and when waiting is finished. */
1424 state = unit_active_state(u);
1425 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1428 /* If the conditions failed, don't do anything at all. If we
1429 * already are activating this call might still be useful to
1430 * speed up activation in case there is some hold-off time,
1431 * but we don't want to recheck the condition in that case. */
1432 if (state != UNIT_ACTIVATING &&
1433 !unit_condition_test(u)) {
1434 log_unit_debug(u->id, "Starting of %s requested but condition failed. Not starting unit.", u->id);
1438 /* If the asserts failed, fail the entire job */
1439 if (state != UNIT_ACTIVATING &&
1440 !unit_assert_test(u)) {
1441 log_unit_debug(u->id, "Starting of %s requested but asserts failed.", u->id);
1445 /* Forward to the main object, if we aren't it. */
1446 following = unit_following(u);
1448 log_unit_debug(u->id, "Redirecting start request from %s to %s.", u->id, following->id);
1449 return unit_start(following);
1452 unit_status_log_starting_stopping_reloading(u, JOB_START);
1453 unit_status_print_starting_stopping(u, JOB_START);
1455 /* If it is stopped, but we cannot start it, then fail */
1456 if (!UNIT_VTABLE(u)->start)
1459 /* We don't suppress calls to ->start() here when we are
1460 * already starting, to allow this request to be used as a
1461 * "hurry up" call, for example when the unit is in some "auto
1462 * restart" state where it waits for a holdoff timer to elapse
1463 * before it will start again. */
1465 unit_add_to_dbus_queue(u);
1467 return UNIT_VTABLE(u)->start(u);
1470 bool unit_can_start(Unit *u) {
1473 return !!UNIT_VTABLE(u)->start;
1476 bool unit_can_isolate(Unit *u) {
1479 return unit_can_start(u) &&
1484 * -EBADR: This unit type does not support stopping.
1485 * -EALREADY: Unit is already stopped.
1486 * -EAGAIN: An operation is already in progress. Retry later.
1488 int unit_stop(Unit *u) {
1489 UnitActiveState state;
1494 state = unit_active_state(u);
1495 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1498 if ((following = unit_following(u))) {
1499 log_unit_debug(u->id, "Redirecting stop request from %s to %s.",
1500 u->id, following->id);
1501 return unit_stop(following);
1504 unit_status_log_starting_stopping_reloading(u, JOB_STOP);
1505 unit_status_print_starting_stopping(u, JOB_STOP);
1507 if (!UNIT_VTABLE(u)->stop)
1510 unit_add_to_dbus_queue(u);
1512 return UNIT_VTABLE(u)->stop(u);
1516 * -EBADR: This unit type does not support reloading.
1517 * -ENOEXEC: Unit is not started.
1518 * -EAGAIN: An operation is already in progress. Retry later.
1520 int unit_reload(Unit *u) {
1521 UnitActiveState state;
1526 if (u->load_state != UNIT_LOADED)
1529 if (!unit_can_reload(u))
1532 state = unit_active_state(u);
1533 if (state == UNIT_RELOADING)
1536 if (state != UNIT_ACTIVE) {
1537 log_unit_warning(u->id, "Unit %s cannot be reloaded because it is inactive.",
1542 following = unit_following(u);
1544 log_unit_debug(u->id, "Redirecting reload request from %s to %s.",
1545 u->id, following->id);
1546 return unit_reload(following);
1549 unit_status_log_starting_stopping_reloading(u, JOB_RELOAD);
1551 unit_add_to_dbus_queue(u);
1552 return UNIT_VTABLE(u)->reload(u);
1555 bool unit_can_reload(Unit *u) {
1558 if (!UNIT_VTABLE(u)->reload)
1561 if (!UNIT_VTABLE(u)->can_reload)
1564 return UNIT_VTABLE(u)->can_reload(u);
1567 static void unit_check_unneeded(Unit *u) {
1573 /* If this service shall be shut down when unneeded then do
1576 if (!u->stop_when_unneeded)
1579 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1582 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY], i)
1583 if (unit_active_or_pending(other))
1586 SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
1587 if (unit_active_or_pending(other))
1590 SET_FOREACH(other, u->dependencies[UNIT_WANTED_BY], i)
1591 if (unit_active_or_pending(other))
1594 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1595 if (unit_active_or_pending(other))
1598 log_unit_info(u->id, "Unit %s is not needed anymore. Stopping.", u->id);
1600 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1601 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1604 static void unit_check_binds_to(Unit *u) {
1614 if (unit_active_state(u) != UNIT_ACTIVE)
1617 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1621 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1630 log_unit_info(u->id, "Unit %s is bound to inactive service. Stopping, too.", u->id);
1632 /* A unit we need to run is gone. Sniff. Let's stop this. */
1633 manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1636 static void retroactively_start_dependencies(Unit *u) {
1641 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1643 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1644 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1645 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1646 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1648 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1649 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1650 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1651 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1653 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1654 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1655 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1656 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1658 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1659 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1660 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1661 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1663 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1664 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1665 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1667 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1668 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1669 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1672 static void retroactively_stop_dependencies(Unit *u) {
1677 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1679 /* Pull down units which are bound to us recursively if enabled */
1680 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1681 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1682 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1685 static void check_unneeded_dependencies(Unit *u) {
1690 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1692 /* Garbage collect services that might not be needed anymore, if enabled */
1693 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1694 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1695 unit_check_unneeded(other);
1696 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1697 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1698 unit_check_unneeded(other);
1699 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1700 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1701 unit_check_unneeded(other);
1702 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1703 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1704 unit_check_unneeded(other);
1705 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1706 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1707 unit_check_unneeded(other);
1708 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1709 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1710 unit_check_unneeded(other);
1713 void unit_start_on_failure(Unit *u) {
1719 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1722 log_unit_info(u->id, "Triggering OnFailure= dependencies of %s.", u->id);
1724 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1727 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, true, NULL, NULL);
1729 log_unit_error_errno(u->id, r, "Failed to enqueue OnFailure= job: %m");
1733 void unit_trigger_notify(Unit *u) {
1739 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1740 if (UNIT_VTABLE(other)->trigger_notify)
1741 UNIT_VTABLE(other)->trigger_notify(other, u);
1744 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1749 assert(os < _UNIT_ACTIVE_STATE_MAX);
1750 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1752 /* Note that this is called for all low-level state changes,
1753 * even if they might map to the same high-level
1754 * UnitActiveState! That means that ns == os is an expected
1755 * behavior here. For example: if a mount point is remounted
1756 * this function will be called too! */
1760 /* Update timestamps for state changes */
1761 if (m->n_reloading <= 0) {
1764 dual_timestamp_get(&ts);
1766 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1767 u->inactive_exit_timestamp = ts;
1768 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1769 u->inactive_enter_timestamp = ts;
1771 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1772 u->active_enter_timestamp = ts;
1773 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1774 u->active_exit_timestamp = ts;
1777 /* Keep track of failed units */
1778 if (ns == UNIT_FAILED)
1779 set_put(u->manager->failed_units, u);
1781 set_remove(u->manager->failed_units, u);
1783 /* Make sure the cgroup is always removed when we become inactive */
1784 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1785 unit_destroy_cgroup(u);
1787 /* Note that this doesn't apply to RemainAfterExit services exiting
1788 * successfully, since there's no change of state in that case. Which is
1789 * why it is handled in service_set_state() */
1790 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1793 ec = unit_get_exec_context(u);
1794 if (ec && exec_context_may_touch_console(ec)) {
1795 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1798 if (m->n_on_console == 0)
1799 /* unset no_console_output flag, since the console is free */
1800 m->no_console_output = false;
1809 if (u->job->state == JOB_WAITING)
1811 /* So we reached a different state for this
1812 * job. Let's see if we can run it now if it
1813 * failed previously due to EAGAIN. */
1814 job_add_to_run_queue(u->job);
1816 /* Let's check whether this state change constitutes a
1817 * finished job, or maybe contradicts a running job and
1818 * hence needs to invalidate jobs. */
1820 switch (u->job->type) {
1823 case JOB_VERIFY_ACTIVE:
1825 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1826 job_finish_and_invalidate(u->job, JOB_DONE, true);
1827 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1830 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1831 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1837 case JOB_RELOAD_OR_START:
1839 if (u->job->state == JOB_RUNNING) {
1840 if (ns == UNIT_ACTIVE)
1841 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1842 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1845 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1846 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1854 case JOB_TRY_RESTART:
1856 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1857 job_finish_and_invalidate(u->job, JOB_DONE, true);
1858 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1860 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1866 assert_not_reached("Job type unknown");
1872 if (m->n_reloading <= 0) {
1874 /* If this state change happened without being
1875 * requested by a job, then let's retroactively start
1876 * or stop dependencies. We skip that step when
1877 * deserializing, since we don't want to create any
1878 * additional jobs just because something is already
1882 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1883 retroactively_start_dependencies(u);
1884 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1885 retroactively_stop_dependencies(u);
1888 /* stop unneeded units regardless if going down was expected or not */
1889 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1890 check_unneeded_dependencies(u);
1892 if (ns != os && ns == UNIT_FAILED) {
1893 log_unit_notice(u->id, "Unit %s entered failed state.", u->id);
1894 unit_start_on_failure(u);
1898 /* Some names are special */
1899 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1901 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1902 /* The bus might have just become available,
1903 * hence try to connect to it, if we aren't
1907 if (u->type == UNIT_SERVICE &&
1908 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1909 m->n_reloading <= 0) {
1910 /* Write audit record if we have just finished starting up */
1911 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1915 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1916 manager_send_unit_plymouth(m, u);
1920 /* We don't care about D-Bus here, since we'll get an
1921 * asynchronous notification for it anyway. */
1923 if (u->type == UNIT_SERVICE &&
1924 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1925 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1926 m->n_reloading <= 0) {
1928 /* Hmm, if there was no start record written
1929 * write it now, so that we always have a nice
1932 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1934 if (ns == UNIT_INACTIVE)
1935 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1937 /* Write audit record if we have just finished shutting down */
1938 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
1940 u->in_audit = false;
1944 manager_recheck_journal(m);
1945 unit_trigger_notify(u);
1947 if (u->manager->n_reloading <= 0) {
1948 /* Maybe we finished startup and are now ready for
1949 * being stopped because unneeded? */
1950 unit_check_unneeded(u);
1952 /* Maybe we finished startup, but something we needed
1953 * has vanished? Let's die then. (This happens when
1954 * something BindsTo= to a Type=oneshot unit, as these
1955 * units go directly from starting to inactive,
1956 * without ever entering started.) */
1957 unit_check_binds_to(u);
1960 unit_add_to_dbus_queue(u);
1961 unit_add_to_gc_queue(u);
1964 int unit_watch_pid(Unit *u, pid_t pid) {
1970 /* Watch a specific PID. We only support one or two units
1971 * watching each PID for now, not more. */
1973 r = set_ensure_allocated(&u->pids, NULL);
1977 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
1981 r = hashmap_put(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
1983 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
1987 r = hashmap_put(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
1990 q = set_put(u->pids, LONG_TO_PTR(pid));
1997 void unit_unwatch_pid(Unit *u, pid_t pid) {
2001 hashmap_remove_value(u->manager->watch_pids1, LONG_TO_PTR(pid), u);
2002 hashmap_remove_value(u->manager->watch_pids2, LONG_TO_PTR(pid), u);
2003 set_remove(u->pids, LONG_TO_PTR(pid));
2006 void unit_unwatch_all_pids(Unit *u) {
2009 while (!set_isempty(u->pids))
2010 unit_unwatch_pid(u, PTR_TO_LONG(set_first(u->pids)));
2016 static int unit_watch_pids_in_path(Unit *u, const char *path) {
2017 _cleanup_closedir_ DIR *d = NULL;
2018 _cleanup_fclose_ FILE *f = NULL;
2024 /* Adds all PIDs from a specific cgroup path to the set of PIDs we watch. */
2026 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2030 while ((r = cg_read_pid(f, &pid)) > 0) {
2031 r = unit_watch_pid(u, pid);
2032 if (r < 0 && ret >= 0)
2035 if (r < 0 && ret >= 0)
2038 } else if (ret >= 0)
2041 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2045 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2046 _cleanup_free_ char *p = NULL;
2048 p = strjoin(path, "/", fn, NULL);
2054 r = unit_watch_pids_in_path(u, p);
2055 if (r < 0 && ret >= 0)
2058 if (r < 0 && ret >= 0)
2061 } else if (ret >= 0)
2067 int unit_watch_all_pids(Unit *u) {
2070 /* Adds all PIDs from our cgroup to the set of PIDs we watch */
2072 if (!u->cgroup_path)
2075 return unit_watch_pids_in_path(u, u->cgroup_path);
2078 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2084 /* Cleans dead PIDs from our list */
2086 SET_FOREACH(e, u->pids, i) {
2087 pid_t pid = PTR_TO_LONG(e);
2089 if (pid == except1 || pid == except2)
2092 if (!pid_is_unwaited(pid))
2093 unit_unwatch_pid(u, pid);
2097 bool unit_job_is_applicable(Unit *u, JobType j) {
2099 assert(j >= 0 && j < _JOB_TYPE_MAX);
2103 case JOB_VERIFY_ACTIVE:
2110 case JOB_TRY_RESTART:
2111 return unit_can_start(u);
2114 return unit_can_reload(u);
2116 case JOB_RELOAD_OR_START:
2117 return unit_can_reload(u) && unit_can_start(u);
2120 assert_not_reached("Invalid job type");
2124 static int maybe_warn_about_dependency(const char *id, const char *other, UnitDependency dependency) {
2127 switch (dependency) {
2129 case UNIT_REQUIRES_OVERRIDABLE:
2131 case UNIT_REQUISITE:
2132 case UNIT_REQUISITE_OVERRIDABLE:
2135 case UNIT_REQUIRED_BY:
2136 case UNIT_REQUIRED_BY_OVERRIDABLE:
2137 case UNIT_WANTED_BY:
2139 case UNIT_CONSISTS_OF:
2140 case UNIT_REFERENCES:
2141 case UNIT_REFERENCED_BY:
2142 case UNIT_PROPAGATES_RELOAD_TO:
2143 case UNIT_RELOAD_PROPAGATED_FROM:
2144 case UNIT_JOINS_NAMESPACE_OF:
2147 case UNIT_CONFLICTS:
2148 case UNIT_CONFLICTED_BY:
2151 case UNIT_ON_FAILURE:
2153 case UNIT_TRIGGERED_BY:
2154 if (streq_ptr(id, other))
2155 log_unit_warning(id, "Dependency %s=%s dropped from unit %s",
2156 unit_dependency_to_string(dependency), id, other);
2158 log_unit_warning(id, "Dependency %s=%s dropped from unit %s merged into %s",
2159 unit_dependency_to_string(dependency), id,
2163 case _UNIT_DEPENDENCY_MAX:
2164 case _UNIT_DEPENDENCY_INVALID:
2168 assert_not_reached("Invalid dependency type");
2171 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2173 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2174 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2175 [UNIT_REQUIRES_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2176 [UNIT_WANTS] = UNIT_WANTED_BY,
2177 [UNIT_REQUISITE] = UNIT_REQUIRED_BY,
2178 [UNIT_REQUISITE_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2179 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2180 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2181 [UNIT_REQUIRED_BY] = _UNIT_DEPENDENCY_INVALID,
2182 [UNIT_REQUIRED_BY_OVERRIDABLE] = _UNIT_DEPENDENCY_INVALID,
2183 [UNIT_WANTED_BY] = _UNIT_DEPENDENCY_INVALID,
2184 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2185 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2186 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2187 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2188 [UNIT_BEFORE] = UNIT_AFTER,
2189 [UNIT_AFTER] = UNIT_BEFORE,
2190 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2191 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2192 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2193 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2194 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2195 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2196 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2197 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2199 int r, q = 0, v = 0, w = 0;
2200 Unit *orig_u = u, *orig_other = other;
2203 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2206 u = unit_follow_merge(u);
2207 other = unit_follow_merge(other);
2209 /* We won't allow dependencies on ourselves. We will not
2210 * consider them an error however. */
2212 maybe_warn_about_dependency(orig_u->id, orig_other->id, d);
2216 r = set_ensure_allocated(&u->dependencies[d], NULL);
2220 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2221 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2226 if (add_reference) {
2227 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2231 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2236 q = set_put(u->dependencies[d], other);
2240 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2241 v = set_put(other->dependencies[inverse_table[d]], u);
2248 if (add_reference) {
2249 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2255 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2260 unit_add_to_dbus_queue(u);
2265 set_remove(u->dependencies[d], other);
2268 set_remove(other->dependencies[inverse_table[d]], u);
2271 set_remove(u->dependencies[UNIT_REFERENCES], other);
2276 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2281 r = unit_add_dependency(u, d, other, add_reference);
2285 r = unit_add_dependency(u, e, other, add_reference);
2292 static const char *resolve_template(Unit *u, const char *name, const char*path, char **p) {
2296 assert(name || path);
2300 name = basename(path);
2302 if (!unit_name_is_template(name)) {
2308 s = unit_name_replace_instance(name, u->instance);
2310 _cleanup_free_ char *i = NULL;
2312 i = unit_name_to_prefix(u->id);
2316 s = unit_name_replace_instance(name, i);
2326 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2329 _cleanup_free_ char *s = NULL;
2332 assert(name || path);
2334 name = resolve_template(u, name, path, &s);
2338 r = manager_load_unit(u->manager, name, path, NULL, &other);
2342 return unit_add_dependency(u, d, other, add_reference);
2345 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2346 _cleanup_free_ char *s = NULL;
2351 assert(name || path);
2353 name = resolve_template(u, name, path, &s);
2357 r = manager_load_unit(u->manager, name, path, NULL, &other);
2361 return unit_add_two_dependencies(u, d, e, other, add_reference);
2364 int unit_add_dependency_by_name_inverse(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2367 _cleanup_free_ char *s = NULL;
2370 assert(name || path);
2372 name = resolve_template(u, name, path, &s);
2376 r = manager_load_unit(u->manager, name, path, NULL, &other);
2380 return unit_add_dependency(other, d, u, add_reference);
2383 int unit_add_two_dependencies_by_name_inverse(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2386 _cleanup_free_ char *s = NULL;
2389 assert(name || path);
2391 name = resolve_template(u, name, path, &s);
2395 r = manager_load_unit(u->manager, name, path, NULL, &other);
2399 r = unit_add_two_dependencies(other, d, e, u, add_reference);
2406 int set_unit_path(const char *p) {
2407 /* This is mostly for debug purposes */
2408 if (setenv("SYSTEMD_UNIT_PATH", p, 0) < 0)
2414 char *unit_dbus_path(Unit *u) {
2420 return unit_dbus_path_from_name(u->id);
2423 char *unit_default_cgroup_path(Unit *u) {
2424 _cleanup_free_ char *escaped = NULL, *slice = NULL;
2429 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2430 return strdup(u->manager->cgroup_root);
2432 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
2433 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
2438 escaped = cg_escape(u->id);
2443 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
2445 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
2448 int unit_add_default_slice(Unit *u, CGroupContext *c) {
2449 _cleanup_free_ char *b = NULL;
2450 const char *slice_name;
2457 if (UNIT_ISSET(u->slice))
2461 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2463 /* Implicitly place all instantiated units in their
2464 * own per-template slice */
2466 prefix = unit_name_to_prefix(u->id);
2470 /* The prefix is already escaped, but it might include
2471 * "-" which has a special meaning for slice units,
2472 * hence escape it here extra. */
2473 escaped = strreplace(prefix, "-", "\\x2d");
2477 if (u->manager->running_as == SYSTEMD_SYSTEM)
2478 b = strjoin("system-", escaped, ".slice", NULL);
2480 b = strappend(escaped, ".slice");
2487 u->manager->running_as == SYSTEMD_SYSTEM
2488 ? SPECIAL_SYSTEM_SLICE
2489 : SPECIAL_ROOT_SLICE;
2491 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2495 unit_ref_set(&u->slice, slice);
2499 const char *unit_slice_name(Unit *u) {
2502 if (!UNIT_ISSET(u->slice))
2505 return UNIT_DEREF(u->slice)->id;
2508 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2509 _cleanup_free_ char *t = NULL;
2516 t = unit_name_change_suffix(u->id, type);
2520 assert(!unit_has_name(u, t));
2522 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2523 assert(r < 0 || *_found != u);
2527 int unit_watch_bus_name(Unit *u, const char *name) {
2531 /* Watch a specific name on the bus. We only support one unit
2532 * watching each name for now. */
2534 return hashmap_put(u->manager->watch_bus, name, u);
2537 void unit_unwatch_bus_name(Unit *u, const char *name) {
2541 hashmap_remove_value(u->manager->watch_bus, name, u);
2544 bool unit_can_serialize(Unit *u) {
2547 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2550 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2557 if (unit_can_serialize(u)) {
2560 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2564 rt = unit_get_exec_runtime(u);
2566 r = exec_runtime_serialize(rt, u, f, fds);
2572 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2573 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2574 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2575 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2576 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2577 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2579 if (dual_timestamp_is_set(&u->condition_timestamp))
2580 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2582 if (dual_timestamp_is_set(&u->assert_timestamp))
2583 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2585 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2588 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2590 if (serialize_jobs) {
2592 fprintf(f, "job\n");
2593 job_serialize(u->job, f, fds);
2597 fprintf(f, "job\n");
2598 job_serialize(u->nop_job, f, fds);
2607 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2618 va_start(ap, format);
2619 vfprintf(f, format, ap);
2625 void unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2631 fprintf(f, "%s=%s\n", key, value);
2634 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2635 ExecRuntime **rt = NULL;
2643 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2645 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2648 char line[LINE_MAX], *l, *v;
2651 if (!fgets(line, sizeof(line), f)) {
2664 k = strcspn(l, "=");
2672 if (streq(l, "job")) {
2674 /* new-style serialized job */
2675 Job *j = job_new_raw(u);
2679 r = job_deserialize(j, f, fds);
2685 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2691 r = job_install_deserialized(j);
2693 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2698 if (j->state == JOB_RUNNING)
2699 u->manager->n_running_jobs++;
2702 JobType type = job_type_from_string(v);
2704 log_debug("Failed to parse job type value %s", v);
2706 u->deserialized_job = type;
2709 } else if (streq(l, "inactive-exit-timestamp")) {
2710 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2712 } else if (streq(l, "active-enter-timestamp")) {
2713 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2715 } else if (streq(l, "active-exit-timestamp")) {
2716 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2718 } else if (streq(l, "inactive-enter-timestamp")) {
2719 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2721 } else if (streq(l, "condition-timestamp")) {
2722 dual_timestamp_deserialize(v, &u->condition_timestamp);
2724 } else if (streq(l, "assert-timestamp")) {
2725 dual_timestamp_deserialize(v, &u->assert_timestamp);
2727 } else if (streq(l, "condition-result")) {
2730 b = parse_boolean(v);
2732 log_debug("Failed to parse condition result value %s", v);
2734 u->condition_result = b;
2738 } else if (streq(l, "assert-result")) {
2741 b = parse_boolean(v);
2743 log_debug("Failed to parse assert result value %s", v);
2745 u->assert_result = b;
2749 } else if (streq(l, "transient")) {
2752 b = parse_boolean(v);
2754 log_debug("Failed to parse transient bool %s", v);
2759 } else if (streq(l, "cgroup")) {
2766 if (u->cgroup_path) {
2769 p = hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2770 log_info("Removing cgroup_path %s from hashmap (%p)",
2772 free(u->cgroup_path);
2776 assert(hashmap_put(u->manager->cgroup_unit, s, u) == 1);
2781 if (unit_can_serialize(u)) {
2783 r = exec_runtime_deserialize_item(rt, u, l, v, fds);
2790 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2797 int unit_add_node_link(Unit *u, const char *what, bool wants) {
2799 _cleanup_free_ char *e = NULL;
2807 /* Adds in links to the device node that this unit is based on */
2809 if (!is_device_path(what))
2812 e = unit_name_from_path(what, ".device");
2816 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2821 r = unit_add_two_dependencies(u, UNIT_AFTER, UNIT_BINDS_TO, device, true);
2826 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2834 int unit_coldplug(Unit *u) {
2839 if (UNIT_VTABLE(u)->coldplug)
2840 if ((r = UNIT_VTABLE(u)->coldplug(u)) < 0)
2844 r = job_coldplug(u->job);
2847 } else if (u->deserialized_job >= 0) {
2849 r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL);
2853 u->deserialized_job = _JOB_TYPE_INVALID;
2859 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
2860 DISABLE_WARNING_FORMAT_NONLITERAL;
2861 manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
2862 status, unit_status_msg_format, unit_description(u));
2866 bool unit_need_daemon_reload(Unit *u) {
2867 _cleanup_strv_free_ char **t = NULL;
2870 unsigned loaded_cnt, current_cnt;
2874 if (u->fragment_path) {
2876 if (stat(u->fragment_path, &st) < 0)
2877 /* What, cannot access this anymore? */
2880 if (u->fragment_mtime > 0 &&
2881 timespec_load(&st.st_mtim) != u->fragment_mtime)
2885 if (u->source_path) {
2887 if (stat(u->source_path, &st) < 0)
2890 if (u->source_mtime > 0 &&
2891 timespec_load(&st.st_mtim) != u->source_mtime)
2895 t = unit_find_dropin_paths(u);
2896 loaded_cnt = strv_length(t);
2897 current_cnt = strv_length(u->dropin_paths);
2899 if (loaded_cnt == current_cnt) {
2900 if (loaded_cnt == 0)
2903 if (strv_overlap(u->dropin_paths, t)) {
2904 STRV_FOREACH(path, u->dropin_paths) {
2906 if (stat(*path, &st) < 0)
2909 if (u->dropin_mtime > 0 &&
2910 timespec_load(&st.st_mtim) > u->dropin_mtime)
2921 void unit_reset_failed(Unit *u) {
2924 if (UNIT_VTABLE(u)->reset_failed)
2925 UNIT_VTABLE(u)->reset_failed(u);
2928 Unit *unit_following(Unit *u) {
2931 if (UNIT_VTABLE(u)->following)
2932 return UNIT_VTABLE(u)->following(u);
2937 bool unit_stop_pending(Unit *u) {
2940 /* This call does check the current state of the unit. It's
2941 * hence useful to be called from state change calls of the
2942 * unit itself, where the state isn't updated yet. This is
2943 * different from unit_inactive_or_pending() which checks both
2944 * the current state and for a queued job. */
2946 return u->job && u->job->type == JOB_STOP;
2949 bool unit_inactive_or_pending(Unit *u) {
2952 /* Returns true if the unit is inactive or going down */
2954 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
2957 if (unit_stop_pending(u))
2963 bool unit_active_or_pending(Unit *u) {
2966 /* Returns true if the unit is active or going up */
2968 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
2972 (u->job->type == JOB_START ||
2973 u->job->type == JOB_RELOAD_OR_START ||
2974 u->job->type == JOB_RESTART))
2980 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
2982 assert(w >= 0 && w < _KILL_WHO_MAX);
2984 assert(signo < _NSIG);
2986 if (!UNIT_VTABLE(u)->kill)
2989 return UNIT_VTABLE(u)->kill(u, w, signo, error);
2992 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
2996 pid_set = set_new(NULL);
3000 /* Exclude the main/control pids from being killed via the cgroup */
3002 r = set_put(pid_set, LONG_TO_PTR(main_pid));
3007 if (control_pid > 0) {
3008 r = set_put(pid_set, LONG_TO_PTR(control_pid));
3020 int unit_kill_common(
3026 sd_bus_error *error) {
3030 if (who == KILL_MAIN && main_pid <= 0) {
3032 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3034 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3037 if (who == KILL_CONTROL && control_pid <= 0) {
3038 if (control_pid < 0)
3039 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3041 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3044 if (who == KILL_CONTROL || who == KILL_ALL)
3045 if (control_pid > 0)
3046 if (kill(control_pid, signo) < 0)
3049 if (who == KILL_MAIN || who == KILL_ALL)
3051 if (kill(main_pid, signo) < 0)
3054 if (who == KILL_ALL && u->cgroup_path) {
3055 _cleanup_set_free_ Set *pid_set = NULL;
3058 /* Exclude the main/control pids from being killed via the cgroup */
3059 pid_set = unit_pid_set(main_pid, control_pid);
3063 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, true, false, pid_set);
3064 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3071 int unit_following_set(Unit *u, Set **s) {
3075 if (UNIT_VTABLE(u)->following_set)
3076 return UNIT_VTABLE(u)->following_set(u, s);
3082 UnitFileState unit_get_unit_file_state(Unit *u) {
3085 if (u->unit_file_state < 0 && u->fragment_path)
3086 u->unit_file_state = unit_file_get_state(
3087 u->manager->running_as == SYSTEMD_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3088 NULL, basename(u->fragment_path));
3090 return u->unit_file_state;
3093 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3098 unit_ref_unset(ref);
3101 LIST_PREPEND(refs, u->refs, ref);
3105 void unit_ref_unset(UnitRef *ref) {
3111 LIST_REMOVE(refs, ref->unit->refs, ref);
3115 int unit_patch_contexts(Unit *u) {
3123 /* Patch in the manager defaults into the exec and cgroup
3124 * contexts, _after_ the rest of the settings have been
3127 ec = unit_get_exec_context(u);
3129 /* This only copies in the ones that need memory */
3130 for (i = 0; i < _RLIMIT_MAX; i++)
3131 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3132 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3137 if (u->manager->running_as == SYSTEMD_USER &&
3138 !ec->working_directory) {
3140 r = get_home_dir(&ec->working_directory);
3145 if (u->manager->running_as == SYSTEMD_USER &&
3146 (ec->syscall_whitelist ||
3147 !set_isempty(ec->syscall_filter) ||
3148 !set_isempty(ec->syscall_archs) ||
3149 ec->address_families_whitelist ||
3150 !set_isempty(ec->address_families)))
3151 ec->no_new_privileges = true;
3153 if (ec->private_devices)
3154 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3157 cc = unit_get_cgroup_context(u);
3161 ec->private_devices &&
3162 cc->device_policy == CGROUP_AUTO)
3163 cc->device_policy = CGROUP_CLOSED;
3169 ExecContext *unit_get_exec_context(Unit *u) {
3176 offset = UNIT_VTABLE(u)->exec_context_offset;
3180 return (ExecContext*) ((uint8_t*) u + offset);
3183 KillContext *unit_get_kill_context(Unit *u) {
3190 offset = UNIT_VTABLE(u)->kill_context_offset;
3194 return (KillContext*) ((uint8_t*) u + offset);
3197 CGroupContext *unit_get_cgroup_context(Unit *u) {
3203 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3207 return (CGroupContext*) ((uint8_t*) u + offset);
3210 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3216 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3220 return *(ExecRuntime**) ((uint8_t*) u + offset);
3223 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3224 if (u->manager->running_as == SYSTEMD_USER) {
3227 if (mode == UNIT_PERSISTENT && !transient)
3228 r = user_config_home(dir);
3230 r = user_runtime_dir(dir);
3237 if (mode == UNIT_PERSISTENT && !transient)
3238 *dir = strdup("/etc/systemd/system");
3240 *dir = strdup("/run/systemd/system");
3247 static int unit_drop_in_file(Unit *u,
3248 UnitSetPropertiesMode mode, const char *name, char **p, char **q) {
3249 _cleanup_free_ char *dir = NULL;
3254 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3258 return drop_in_file(dir, u->id, 50, name, p, q);
3261 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3263 _cleanup_free_ char *dir = NULL;
3268 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3271 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3275 return write_drop_in(dir, u->id, 50, name, data);
3278 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3279 _cleanup_free_ char *p = NULL;
3287 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3290 va_start(ap, format);
3291 r = vasprintf(&p, format, ap);
3297 return unit_write_drop_in(u, mode, name, p);
3300 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3301 _cleanup_free_ char *ndata = NULL;
3307 if (!UNIT_VTABLE(u)->private_section)
3310 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3313 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3317 return unit_write_drop_in(u, mode, name, ndata);
3320 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3321 _cleanup_free_ char *p = NULL;
3329 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3332 va_start(ap, format);
3333 r = vasprintf(&p, format, ap);
3339 return unit_write_drop_in_private(u, mode, name, p);
3342 int unit_remove_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name) {
3343 _cleanup_free_ char *p = NULL, *q = NULL;
3348 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3351 r = unit_drop_in_file(u, mode, name, &p, &q);
3356 r = errno == ENOENT ? 0 : -errno;
3364 int unit_make_transient(Unit *u) {
3369 u->load_state = UNIT_STUB;
3371 u->transient = true;
3373 free(u->fragment_path);
3374 u->fragment_path = NULL;
3376 if (u->manager->running_as == SYSTEMD_USER) {
3377 _cleanup_free_ char *c = NULL;
3379 r = user_runtime_dir(&c);
3385 u->fragment_path = strjoin(c, "/", u->id, NULL);
3386 if (!u->fragment_path)
3391 u->fragment_path = strappend("/run/systemd/system/", u->id);
3392 if (!u->fragment_path)
3395 mkdir_p("/run/systemd/system", 0755);
3398 return write_string_file_atomic_label(u->fragment_path, "# Transient stub");
3401 int unit_kill_context(
3407 bool main_pid_alien) {
3409 int sig, wait_for_exit = false, r;
3414 if (c->kill_mode == KILL_NONE)
3424 case KILL_TERMINATE:
3425 sig = c->kill_signal;
3428 assert_not_reached("KillOperation unknown");
3432 r = kill_and_sigcont(main_pid, sig);
3434 if (r < 0 && r != -ESRCH) {
3435 _cleanup_free_ char *comm = NULL;
3436 get_process_comm(main_pid, &comm);
3438 log_unit_warning_errno(u->id, r, "Failed to kill main process " PID_FMT " (%s): %m", main_pid, strna(comm));
3440 if (!main_pid_alien)
3441 wait_for_exit = true;
3443 if (c->send_sighup && k != KILL_KILL)
3444 kill(main_pid, SIGHUP);
3448 if (control_pid > 0) {
3449 r = kill_and_sigcont(control_pid, sig);
3451 if (r < 0 && r != -ESRCH) {
3452 _cleanup_free_ char *comm = NULL;
3453 get_process_comm(control_pid, &comm);
3455 log_unit_warning_errno(u->id, r, "Failed to kill control process " PID_FMT " (%s): %m", control_pid, strna(comm));
3457 wait_for_exit = true;
3459 if (c->send_sighup && k != KILL_KILL)
3460 kill(control_pid, SIGHUP);
3464 if ((c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL)) && u->cgroup_path) {
3465 _cleanup_set_free_ Set *pid_set = NULL;
3467 /* Exclude the main/control pids from being killed via the cgroup */
3468 pid_set = unit_pid_set(main_pid, control_pid);
3472 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, true, false, pid_set);
3474 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3475 log_unit_warning_errno(u->id, r, "Failed to kill control group: %m");
3478 /* FIXME: For now, we will not wait for the
3479 * cgroup members to die, simply because
3480 * cgroup notification is unreliable. It
3481 * doesn't work at all in containers, and
3482 * outside of containers it can be confused
3483 * easily by leaving directories in the
3486 /* wait_for_exit = true; */
3488 if (c->send_sighup && k != KILL_KILL) {
3491 pid_set = unit_pid_set(main_pid, control_pid);
3495 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3500 return wait_for_exit;
3503 int unit_require_mounts_for(Unit *u, const char *path) {
3504 char prefix[strlen(path) + 1], *p;
3510 /* Registers a unit for requiring a certain path and all its
3511 * prefixes. We keep a simple array of these paths in the
3512 * unit, since its usually short. However, we build a prefix
3513 * table for all possible prefixes so that new appearing mount
3514 * units can easily determine which units to make themselves a
3517 if (!path_is_absolute(path))
3524 path_kill_slashes(p);
3526 if (!path_is_safe(p)) {
3531 if (strv_contains(u->requires_mounts_for, p)) {
3536 r = strv_consume(&u->requires_mounts_for, p);
3540 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3543 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3547 if (!u->manager->units_requiring_mounts_for) {
3548 u->manager->units_requiring_mounts_for = hashmap_new(&string_hash_ops);
3549 if (!u->manager->units_requiring_mounts_for)
3563 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3579 int unit_setup_exec_runtime(Unit *u) {
3585 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3588 /* Check if there already is an ExecRuntime for this unit? */
3589 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3593 /* Try to get it from somebody else */
3594 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3596 *rt = unit_get_exec_runtime(other);
3598 exec_runtime_ref(*rt);
3603 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3606 static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
3607 [UNIT_ACTIVE] = "active",
3608 [UNIT_RELOADING] = "reloading",
3609 [UNIT_INACTIVE] = "inactive",
3610 [UNIT_FAILED] = "failed",
3611 [UNIT_ACTIVATING] = "activating",
3612 [UNIT_DEACTIVATING] = "deactivating"
3615 DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState);