1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
27 #include <sys/signalfd.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
36 #include <sys/types.h>
44 #include <systemd/sd-daemon.h>
53 #include "ratelimit.h"
55 #include "mount-setup.h"
56 #include "unit-name.h"
57 #include "dbus-unit.h"
60 #include "path-lookup.h"
62 #include "bus-errors.h"
63 #include "exit-status.h"
66 #include "cgroup-util.h"
68 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
69 #define GC_QUEUE_ENTRIES_MAX 16
71 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
72 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
74 /* Where clients shall send notification messages to */
75 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
76 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
78 static int manager_setup_notify(Manager *m) {
81 struct sockaddr_un un;
83 struct epoll_event ev;
89 m->notify_watch.type = WATCH_NOTIFY;
90 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
91 log_error("Failed to allocate notification socket: %m");
96 sa.sa.sa_family = AF_UNIX;
99 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
101 unlink(NOTIFY_SOCKET_SYSTEM);
102 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
105 if (sa.un.sun_path[0] == '@')
106 sa.un.sun_path[0] = 0;
109 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
113 log_error("bind() failed: %m");
117 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
118 log_error("SO_PASSCRED failed: %m");
124 ev.data.ptr = &m->notify_watch;
126 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
129 if (sa.un.sun_path[0] == 0)
130 sa.un.sun_path[0] = '@';
132 if (!(m->notify_socket = strdup(sa.un.sun_path)))
135 log_debug("Using notification socket %s", m->notify_socket);
140 static int enable_special_signals(Manager *m) {
145 /* Enable that we get SIGINT on control-alt-del. In containers
146 * this will fail with EPERM, so ignore that. */
147 if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM)
148 log_warning("Failed to enable ctrl-alt-del handling: %m");
150 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
152 /* Support systems without virtual console */
154 log_warning("Failed to open /dev/tty0: %m");
156 /* Enable that we get SIGWINCH on kbrequest */
157 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
158 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
160 close_nointr_nofail(fd);
166 static int manager_setup_signals(Manager *m) {
168 struct epoll_event ev;
173 /* We are not interested in SIGSTOP and friends. */
175 sa.sa_handler = SIG_DFL;
176 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
177 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
179 assert_se(sigemptyset(&mask) == 0);
181 sigset_add_many(&mask,
182 SIGCHLD, /* Child died */
183 SIGTERM, /* Reexecute daemon */
184 SIGHUP, /* Reload configuration */
185 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
186 SIGUSR2, /* systemd: dump status */
187 SIGINT, /* Kernel sends us this on control-alt-del */
188 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
189 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
190 SIGRTMIN+0, /* systemd: start default.target */
191 SIGRTMIN+1, /* systemd: isolate rescue.target */
192 SIGRTMIN+2, /* systemd: isolate emergency.target */
193 SIGRTMIN+3, /* systemd: start halt.target */
194 SIGRTMIN+4, /* systemd: start poweroff.target */
195 SIGRTMIN+5, /* systemd: start reboot.target */
196 SIGRTMIN+6, /* systemd: start kexec.target */
197 SIGRTMIN+13, /* systemd: Immediate halt */
198 SIGRTMIN+14, /* systemd: Immediate poweroff */
199 SIGRTMIN+15, /* systemd: Immediate reboot */
200 SIGRTMIN+16, /* systemd: Immediate kexec */
201 SIGRTMIN+20, /* systemd: enable status messages */
202 SIGRTMIN+21, /* systemd: disable status messages */
203 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
204 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
205 SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */
206 SIGRTMIN+27, /* systemd: set log target to console */
207 SIGRTMIN+28, /* systemd: set log target to kmsg */
208 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
210 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
212 m->signal_watch.type = WATCH_SIGNAL;
213 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
218 ev.data.ptr = &m->signal_watch;
220 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
223 if (m->running_as == MANAGER_SYSTEM)
224 return enable_special_signals(m);
229 static void manager_strip_environment(Manager *m) {
232 /* Remove variables from the inherited set that are part of
233 * the container interface:
234 * http://www.freedesktop.org/wiki/Software/systemd/ContainerInterface */
235 strv_remove_prefix(m->environment, "container=");
236 strv_remove_prefix(m->environment, "container_");
238 /* Remove variables from the inherited set that are part of
239 * the initrd interface:
240 * http://www.freedesktop.org/wiki/Software/systemd/InitrdInterface */
241 strv_remove_prefix(m->environment, "RD_");
244 int manager_new(ManagerRunningAs running_as, Manager **_m) {
249 assert(running_as >= 0);
250 assert(running_as < _MANAGER_RUNNING_AS_MAX);
252 if (!(m = new0(Manager, 1)))
255 dual_timestamp_get(&m->startup_timestamp);
257 m->running_as = running_as;
258 m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1;
259 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
260 m->pin_cgroupfs_fd = -1;
266 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
267 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
269 m->environment = strv_copy(environ);
273 manager_strip_environment(m);
275 if (running_as == MANAGER_SYSTEM) {
276 m->default_controllers = strv_new("cpu", NULL);
277 if (!m->default_controllers)
281 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
284 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
287 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
290 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
293 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
296 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
299 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
302 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
305 if ((r = manager_setup_signals(m)) < 0)
308 if ((r = manager_setup_cgroup(m)) < 0)
311 if ((r = manager_setup_notify(m)) < 0)
314 /* Try to connect to the busses, if possible. */
315 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
319 if ((m->audit_fd = audit_open()) < 0 &&
320 /* If the kernel lacks netlink or audit support,
321 * don't worry about it. */
322 errno != EAFNOSUPPORT && errno != EPROTONOSUPPORT)
323 log_error("Failed to connect to audit log: %m");
326 m->taint_usr = dir_is_empty("/usr") > 0;
336 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
342 while ((u = m->cleanup_queue)) {
343 assert(u->in_cleanup_queue);
353 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
354 GC_OFFSET_UNSURE, /* No clue */
355 GC_OFFSET_GOOD, /* We still need this unit */
356 GC_OFFSET_BAD, /* We don't need this unit anymore */
360 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
367 if (u->gc_marker == gc_marker + GC_OFFSET_GOOD ||
368 u->gc_marker == gc_marker + GC_OFFSET_BAD ||
369 u->gc_marker == gc_marker + GC_OFFSET_IN_PATH)
372 if (u->in_cleanup_queue)
375 if (unit_check_gc(u))
378 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
382 SET_FOREACH(other, u->dependencies[UNIT_REFERENCED_BY], i) {
383 unit_gc_sweep(other, gc_marker);
385 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
388 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
395 /* We were unable to find anything out about this entry, so
396 * let's investigate it later */
397 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
398 unit_add_to_gc_queue(u);
402 /* We definitely know that this one is not useful anymore, so
403 * let's mark it for deletion */
404 u->gc_marker = gc_marker + GC_OFFSET_BAD;
405 unit_add_to_cleanup_queue(u);
409 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
412 static unsigned manager_dispatch_gc_queue(Manager *m) {
419 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
420 (m->gc_queue_timestamp <= 0 ||
421 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
424 log_debug("Running GC...");
426 m->gc_marker += _GC_OFFSET_MAX;
427 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
430 gc_marker = m->gc_marker;
432 while ((u = m->gc_queue)) {
433 assert(u->in_gc_queue);
435 unit_gc_sweep(u, gc_marker);
437 LIST_REMOVE(Unit, gc_queue, m->gc_queue, u);
438 u->in_gc_queue = false;
442 if (u->gc_marker == gc_marker + GC_OFFSET_BAD ||
443 u->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
444 log_debug("Collecting %s", u->id);
445 u->gc_marker = gc_marker + GC_OFFSET_BAD;
446 unit_add_to_cleanup_queue(u);
450 m->n_in_gc_queue = 0;
451 m->gc_queue_timestamp = 0;
456 static void manager_clear_jobs_and_units(Manager *m) {
462 while ((j = hashmap_first(m->transaction_jobs)))
465 while ((u = hashmap_first(m->units)))
468 manager_dispatch_cleanup_queue(m);
470 assert(!m->load_queue);
471 assert(!m->run_queue);
472 assert(!m->dbus_unit_queue);
473 assert(!m->dbus_job_queue);
474 assert(!m->cleanup_queue);
475 assert(!m->gc_queue);
477 assert(hashmap_isempty(m->transaction_jobs));
478 assert(hashmap_isempty(m->jobs));
479 assert(hashmap_isempty(m->units));
482 void manager_free(Manager *m) {
487 manager_clear_jobs_and_units(m);
489 for (c = 0; c < _UNIT_TYPE_MAX; c++)
490 if (unit_vtable[c]->shutdown)
491 unit_vtable[c]->shutdown(m);
493 /* If we reexecute ourselves, we keep the root cgroup
495 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
497 manager_undo_generators(m);
501 hashmap_free(m->units);
502 hashmap_free(m->jobs);
503 hashmap_free(m->transaction_jobs);
504 hashmap_free(m->watch_pids);
505 hashmap_free(m->watch_bus);
507 if (m->epoll_fd >= 0)
508 close_nointr_nofail(m->epoll_fd);
509 if (m->signal_watch.fd >= 0)
510 close_nointr_nofail(m->signal_watch.fd);
511 if (m->notify_watch.fd >= 0)
512 close_nointr_nofail(m->notify_watch.fd);
515 if (m->audit_fd >= 0)
516 audit_close(m->audit_fd);
519 free(m->notify_socket);
521 lookup_paths_free(&m->lookup_paths);
522 strv_free(m->environment);
524 strv_free(m->default_controllers);
526 hashmap_free(m->cgroup_bondings);
527 set_free_free(m->unit_path_cache);
532 int manager_enumerate(Manager *m) {
538 /* Let's ask every type to load all units from disk/kernel
539 * that it might know */
540 for (c = 0; c < _UNIT_TYPE_MAX; c++)
541 if (unit_vtable[c]->enumerate)
542 if ((q = unit_vtable[c]->enumerate(m)) < 0)
545 manager_dispatch_load_queue(m);
549 int manager_coldplug(Manager *m) {
557 /* Then, let's set up their initial state. */
558 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
564 if ((q = unit_coldplug(u)) < 0)
571 static void manager_build_unit_path_cache(Manager *m) {
578 set_free_free(m->unit_path_cache);
580 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
581 log_error("Failed to allocate unit path cache.");
585 /* This simply builds a list of files we know exist, so that
586 * we don't always have to go to disk */
588 STRV_FOREACH(i, m->lookup_paths.unit_path) {
591 if (!(d = opendir(*i))) {
592 log_error("Failed to open directory: %m");
596 while ((de = readdir(d))) {
599 if (ignore_file(de->d_name))
602 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
608 if ((r = set_put(m->unit_path_cache, p)) < 0) {
621 log_error("Failed to build unit path cache: %s", strerror(-r));
623 set_free_free(m->unit_path_cache);
624 m->unit_path_cache = NULL;
630 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
635 manager_run_generators(m);
637 manager_build_unit_path_cache(m);
639 /* If we will deserialize make sure that during enumeration
640 * this is already known, so we increase the counter here
645 /* First, enumerate what we can from all config files */
646 r = manager_enumerate(m);
648 /* Second, deserialize if there is something to deserialize */
650 if ((q = manager_deserialize(m, serialization, fds)) < 0)
653 /* Third, fire things up! */
654 if ((q = manager_coldplug(m)) < 0)
658 assert(m->n_reloading > 0);
665 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
669 /* Deletes one job from the transaction */
671 manager_transaction_unlink_job(m, j, delete_dependencies);
677 static void transaction_delete_unit(Manager *m, Unit *u) {
680 /* Deletes all jobs associated with a certain unit from the
683 while ((j = hashmap_get(m->transaction_jobs, u)))
684 transaction_delete_job(m, j, true);
687 static void transaction_clean_dependencies(Manager *m) {
693 /* Drops all dependencies of all installed jobs */
695 HASHMAP_FOREACH(j, m->jobs, i) {
696 while (j->subject_list)
697 job_dependency_free(j->subject_list);
698 while (j->object_list)
699 job_dependency_free(j->object_list);
702 assert(!m->transaction_anchor);
705 static void transaction_abort(Manager *m) {
710 while ((j = hashmap_first(m->transaction_jobs)))
712 transaction_delete_job(m, j, true);
716 assert(hashmap_isempty(m->transaction_jobs));
718 transaction_clean_dependencies(m);
721 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
726 /* A recursive sweep through the graph that marks all units
727 * that matter to the anchor job, i.e. are directly or
728 * indirectly a dependency of the anchor job via paths that
729 * are fully marked as mattering. */
734 l = m->transaction_anchor;
736 LIST_FOREACH(subject, l, l) {
738 /* This link does not matter */
742 /* This unit has already been marked */
743 if (l->object->generation == generation)
746 l->object->matters_to_anchor = true;
747 l->object->generation = generation;
749 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
753 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
754 JobDependency *l, *last;
758 assert(j->unit == other->unit);
759 assert(!j->installed);
761 /* Merges 'other' into 'j' and then deletes j. */
764 j->state = JOB_WAITING;
765 j->override = j->override || other->override;
767 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
769 /* Patch us in as new owner of the JobDependency objects */
771 LIST_FOREACH(subject, l, other->subject_list) {
772 assert(l->subject == other);
777 /* Merge both lists */
779 last->subject_next = j->subject_list;
781 j->subject_list->subject_prev = last;
782 j->subject_list = other->subject_list;
785 /* Patch us in as new owner of the JobDependency objects */
787 LIST_FOREACH(object, l, other->object_list) {
788 assert(l->object == other);
793 /* Merge both lists */
795 last->object_next = j->object_list;
797 j->object_list->object_prev = last;
798 j->object_list = other->object_list;
801 /* Kill the other job */
802 other->subject_list = NULL;
803 other->object_list = NULL;
804 transaction_delete_job(m, other, true);
806 static bool job_is_conflicted_by(Job *j) {
811 /* Returns true if this job is pulled in by a least one
812 * ConflictedBy dependency. */
814 LIST_FOREACH(object, l, j->object_list)
821 static int delete_one_unmergeable_job(Manager *m, Job *j) {
826 /* Tries to delete one item in the linked list
827 * j->transaction_next->transaction_next->... that conflicts
828 * with another one, in an attempt to make an inconsistent
829 * transaction work. */
831 /* We rely here on the fact that if a merged with b does not
832 * merge with c, either a or b merge with c neither */
833 LIST_FOREACH(transaction, j, j)
834 LIST_FOREACH(transaction, k, j->transaction_next) {
837 /* Is this one mergeable? Then skip it */
838 if (job_type_is_mergeable(j->type, k->type))
841 /* Ok, we found two that conflict, let's see if we can
842 * drop one of them */
843 if (!j->matters_to_anchor && !k->matters_to_anchor) {
845 /* Both jobs don't matter, so let's
846 * find the one that is smarter to
847 * remove. Let's think positive and
848 * rather remove stops then starts --
849 * except if something is being
850 * stopped because it is conflicted by
851 * another unit in which case we
852 * rather remove the start. */
854 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
855 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
857 if (j->type == JOB_STOP) {
859 if (job_is_conflicted_by(j))
864 } else if (k->type == JOB_STOP) {
866 if (job_is_conflicted_by(k))
873 } else if (!j->matters_to_anchor)
875 else if (!k->matters_to_anchor)
880 /* Ok, we can drop one, so let's do so. */
881 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
882 transaction_delete_job(m, d, true);
889 static int transaction_merge_jobs(Manager *m, DBusError *e) {
896 /* First step, check whether any of the jobs for one specific
897 * task conflict. If so, try to drop one of them. */
898 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
903 LIST_FOREACH(transaction, k, j->transaction_next) {
904 if (job_type_merge(&t, k->type) >= 0)
907 /* OK, we could not merge all jobs for this
908 * action. Let's see if we can get rid of one
911 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
912 /* Ok, we managed to drop one, now
913 * let's ask our callers to call us
914 * again after garbage collecting */
917 /* We couldn't merge anything. Failure */
918 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
919 job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
924 /* Second step, merge the jobs. */
925 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
929 /* Merge all transactions */
930 LIST_FOREACH(transaction, k, j->transaction_next)
931 assert_se(job_type_merge(&t, k->type) == 0);
933 /* If an active job is mergeable, merge it too */
935 job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
937 while ((k = j->transaction_next)) {
939 transaction_merge_and_delete_job(m, k, j, t);
942 transaction_merge_and_delete_job(m, j, k, t);
945 if (j->unit->job && !j->installed)
946 transaction_merge_and_delete_job(m, j, j->unit->job, t);
948 assert(!j->transaction_next);
949 assert(!j->transaction_prev);
955 static void transaction_drop_redundant(Manager *m) {
960 /* Goes through the transaction and removes all jobs that are
969 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
970 bool changes_something = false;
973 LIST_FOREACH(transaction, k, j) {
975 if (!job_is_anchor(k) &&
976 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
977 (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
980 changes_something = true;
984 if (changes_something)
987 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
988 transaction_delete_job(m, j, false);
996 static bool unit_matters_to_anchor(Unit *u, Job *j) {
998 assert(!j->transaction_prev);
1000 /* Checks whether at least one of the jobs for this unit
1001 * matters to the anchor. */
1003 LIST_FOREACH(transaction, j, j)
1004 if (j->matters_to_anchor)
1010 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
1017 assert(!j->transaction_prev);
1019 /* Does a recursive sweep through the ordering graph, looking
1020 * for a cycle. If we find cycle we try to break it. */
1022 /* Have we seen this before? */
1023 if (j->generation == generation) {
1026 /* If the marker is NULL we have been here already and
1027 * decided the job was loop-free from here. Hence
1028 * shortcut things and return right-away. */
1032 /* So, the marker is not NULL and we already have been
1033 * here. We have a cycle. Let's try to break it. We go
1034 * backwards in our path and try to find a suitable
1035 * job to remove. We use the marker to find our way
1036 * back, since smart how we are we stored our way back
1038 log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
1041 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1043 log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
1047 !unit_matters_to_anchor(k->unit, k)) {
1048 /* Ok, we can drop this one, so let's
1053 /* Check if this in fact was the beginning of
1061 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
1062 transaction_delete_unit(m, delete->unit);
1066 log_error("Unable to break cycle");
1068 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1072 /* Make the marker point to where we come from, so that we can
1073 * find our way backwards if we want to break a cycle. We use
1074 * a special marker for the beginning: we point to
1076 j->marker = from ? from : j;
1077 j->generation = generation;
1079 /* We assume that the the dependencies are bidirectional, and
1080 * hence can ignore UNIT_AFTER */
1081 SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
1084 /* Is there a job for this unit? */
1085 if (!(o = hashmap_get(m->transaction_jobs, u)))
1087 /* Ok, there is no job for this in the
1088 * transaction, but maybe there is already one
1093 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1097 /* Ok, let's backtrack, and remember that this entry is not on
1098 * our path anymore. */
1104 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1113 /* Check if the ordering graph is cyclic. If it is, try to fix
1114 * that up by dropping one of the jobs. */
1116 g = (*generation)++;
1118 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1119 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1125 static void transaction_collect_garbage(Manager *m) {
1130 /* Drop jobs that are not required by any other job */
1138 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1139 if (j->object_list) {
1140 /* log_debug("Keeping job %s/%s because of %s/%s", */
1141 /* j->unit->id, job_type_to_string(j->type), */
1142 /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
1143 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1147 /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
1148 transaction_delete_job(m, j, true);
1156 static int transaction_is_destructive(Manager *m, DBusError *e) {
1162 /* Checks whether applying this transaction means that
1163 * existing jobs would be replaced */
1165 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1168 assert(!j->transaction_prev);
1169 assert(!j->transaction_next);
1172 j->unit->job != j &&
1173 !job_type_is_superset(j->type, j->unit->job->type)) {
1175 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1183 static void transaction_minimize_impact(Manager *m) {
1187 /* Drops all unnecessary jobs that reverse already active jobs
1188 * or that stop a running service. */
1196 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1197 LIST_FOREACH(transaction, j, j) {
1198 bool stops_running_service, changes_existing_job;
1200 /* If it matters, we shouldn't drop it */
1201 if (j->matters_to_anchor)
1204 /* Would this stop a running service?
1205 * Would this change an existing job?
1206 * If so, let's drop this entry */
1208 stops_running_service =
1209 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1211 changes_existing_job =
1213 job_type_is_conflicting(j->type, j->unit->job->type);
1215 if (!stops_running_service && !changes_existing_job)
1218 if (stops_running_service)
1219 log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
1221 if (changes_existing_job)
1222 log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
1224 /* Ok, let's get rid of this */
1225 log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
1227 transaction_delete_job(m, j, true);
1239 static int transaction_apply(Manager *m, JobMode mode) {
1244 /* Moves the transaction jobs to the set of active jobs */
1246 if (mode == JOB_ISOLATE) {
1248 /* When isolating first kill all installed jobs which
1249 * aren't part of the new transaction */
1251 HASHMAP_FOREACH(j, m->jobs, i) {
1252 assert(j->installed);
1254 if (hashmap_get(m->transaction_jobs, j->unit))
1257 /* 'j' itself is safe to remove, but if other jobs
1258 are invalidated recursively, our iterator may become
1259 invalid and we need to start over. */
1260 if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
1265 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1267 assert(!j->transaction_prev);
1268 assert(!j->transaction_next);
1273 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1277 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1279 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
1284 job_free(j->unit->job);
1287 j->installed = true;
1288 m->n_installed_jobs ++;
1290 /* We're fully installed. Now let's free data we don't
1293 assert(!j->transaction_next);
1294 assert(!j->transaction_prev);
1296 job_add_to_run_queue(j);
1297 job_add_to_dbus_queue(j);
1300 log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
1303 /* As last step, kill all remaining job dependencies. */
1304 transaction_clean_dependencies(m);
1310 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1314 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1320 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1322 unsigned generation = 1;
1326 /* This applies the changes recorded in transaction_jobs to
1327 * the actual list of jobs, if possible. */
1329 /* First step: figure out which jobs matter */
1330 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1332 /* Second step: Try not to stop any running services if
1333 * we don't have to. Don't try to reverse running
1334 * jobs if we don't have to. */
1335 if (mode == JOB_FAIL)
1336 transaction_minimize_impact(m);
1338 /* Third step: Drop redundant jobs */
1339 transaction_drop_redundant(m);
1342 /* Fourth step: Let's remove unneeded jobs that might
1344 if (mode != JOB_ISOLATE)
1345 transaction_collect_garbage(m);
1347 /* Fifth step: verify order makes sense and correct
1348 * cycles if necessary and possible */
1349 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1353 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1357 /* Let's see if the resulting transaction ordering
1358 * graph is still cyclic... */
1362 /* Sixth step: let's drop unmergeable entries if
1363 * necessary and possible, merge entries we can
1365 if ((r = transaction_merge_jobs(m, e)) >= 0)
1369 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1373 /* Seventh step: an entry got dropped, let's garbage
1374 * collect its dependencies. */
1375 if (mode != JOB_ISOLATE)
1376 transaction_collect_garbage(m);
1378 /* Let's see if the resulting transaction still has
1379 * unmergeable entries ... */
1382 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1383 transaction_drop_redundant(m);
1385 /* Ninth step: check whether we can actually apply this */
1386 if (mode == JOB_FAIL)
1387 if ((r = transaction_is_destructive(m, e)) < 0) {
1388 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1392 /* Tenth step: apply changes */
1393 if ((r = transaction_apply(m, mode)) < 0) {
1394 log_warning("Failed to apply transaction: %s", strerror(-r));
1398 assert(hashmap_isempty(m->transaction_jobs));
1399 assert(!m->transaction_anchor);
1404 transaction_abort(m);
1408 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1414 /* Looks for an existing prospective job and returns that. If
1415 * it doesn't exist it is created and added to the prospective
1418 f = hashmap_get(m->transaction_jobs, unit);
1420 LIST_FOREACH(transaction, j, f) {
1421 assert(j->unit == unit);
1423 if (j->type == type) {
1430 if (unit->job && unit->job->type == type)
1432 else if (!(j = job_new(m, type, unit)))
1437 j->matters_to_anchor = false;
1438 j->override = override;
1440 LIST_PREPEND(Job, transaction, f, j);
1442 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1450 /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
1455 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1459 if (j->transaction_prev)
1460 j->transaction_prev->transaction_next = j->transaction_next;
1461 else if (j->transaction_next)
1462 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1464 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1466 if (j->transaction_next)
1467 j->transaction_next->transaction_prev = j->transaction_prev;
1469 j->transaction_prev = j->transaction_next = NULL;
1471 while (j->subject_list)
1472 job_dependency_free(j->subject_list);
1474 while (j->object_list) {
1475 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1477 job_dependency_free(j->object_list);
1479 if (other && delete_dependencies) {
1480 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1481 other->unit->id, job_type_to_string(other->type),
1482 j->unit->id, job_type_to_string(j->type));
1483 transaction_delete_job(m, other, delete_dependencies);
1488 static int transaction_add_job_and_dependencies(
1496 bool ignore_requirements,
1507 assert(type < _JOB_TYPE_MAX);
1510 /* log_debug("Pulling in %s/%s from %s/%s", */
1511 /* unit->id, job_type_to_string(type), */
1512 /* by ? by->unit->id : "NA", */
1513 /* by ? job_type_to_string(by->type) : "NA"); */
1515 if (unit->load_state != UNIT_LOADED &&
1516 unit->load_state != UNIT_ERROR &&
1517 unit->load_state != UNIT_MASKED) {
1518 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
1522 if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
1523 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1524 "Unit %s failed to load: %s. "
1525 "See system logs and 'systemctl status %s' for details.",
1527 strerror(-unit->load_error),
1532 if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
1533 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
1537 if (!unit_job_is_applicable(unit, type)) {
1538 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
1542 /* First add the job. */
1543 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1546 ret->ignore_order = ret->ignore_order || ignore_order;
1548 /* Then, add a link to the job. */
1549 if (!job_dependency_new(by, ret, matters, conflicts))
1552 if (is_new && !ignore_requirements) {
1555 /* If we are following some other unit, make sure we
1556 * add all dependencies of everybody following. */
1557 if (unit_following_set(ret->unit, &following) > 0) {
1558 SET_FOREACH(dep, following, i)
1559 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1560 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1566 set_free(following);
1569 /* Finally, recursively add in all dependencies. */
1570 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1571 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i)
1572 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1580 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i)
1581 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1590 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1591 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1592 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1598 SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i)
1599 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1600 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1606 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i)
1607 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1616 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1617 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1618 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1624 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i)
1625 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1634 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i)
1635 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1636 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1644 if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1646 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i)
1647 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1656 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i)
1657 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1667 if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
1669 SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
1670 r = transaction_add_job_and_dependencies(m, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
1673 log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1681 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1693 static int transaction_add_isolate_jobs(Manager *m) {
1701 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1703 /* ignore aliases */
1707 if (u->ignore_on_isolate)
1710 /* No need to stop inactive jobs */
1711 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
1714 /* Is there already something listed for this? */
1715 if (hashmap_get(m->transaction_jobs, u))
1718 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1719 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
1725 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1730 assert(type < _JOB_TYPE_MAX);
1732 assert(mode < _JOB_MODE_MAX);
1734 if (mode == JOB_ISOLATE && type != JOB_START) {
1735 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1739 if (mode == JOB_ISOLATE && !unit->allow_isolate) {
1740 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1744 log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1746 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1747 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1748 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1749 transaction_abort(m);
1753 if (mode == JOB_ISOLATE)
1754 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1755 transaction_abort(m);
1759 if ((r = transaction_activate(m, mode, e)) < 0)
1762 log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
1770 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1775 assert(type < _JOB_TYPE_MAX);
1777 assert(mode < _JOB_MODE_MAX);
1779 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1782 return manager_add_job(m, type, unit, mode, override, e, _ret);
1785 Job *manager_get_job(Manager *m, uint32_t id) {
1788 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1791 Unit *manager_get_unit(Manager *m, const char *name) {
1795 return hashmap_get(m->units, name);
1798 unsigned manager_dispatch_load_queue(Manager *m) {
1804 /* Make sure we are not run recursively */
1805 if (m->dispatching_load_queue)
1808 m->dispatching_load_queue = true;
1810 /* Dispatches the load queue. Takes a unit from the queue and
1811 * tries to load its data until the queue is empty */
1813 while ((u = m->load_queue)) {
1814 assert(u->in_load_queue);
1820 m->dispatching_load_queue = false;
1824 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1830 assert(name || path);
1832 /* This will prepare the unit for loading, but not actually
1833 * load anything from disk. */
1835 if (path && !is_path(path)) {
1836 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1841 name = file_name_from_path(path);
1843 t = unit_name_to_type(name);
1845 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid_no_type(name, false)) {
1846 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1850 ret = manager_get_unit(m, name);
1856 ret = unit_new(m, unit_vtable[t]->object_size);
1861 ret->fragment_path = strdup(path);
1862 if (!ret->fragment_path) {
1868 if ((r = unit_add_name(ret, name)) < 0) {
1873 unit_add_to_load_queue(ret);
1874 unit_add_to_dbus_queue(ret);
1875 unit_add_to_gc_queue(ret);
1883 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1888 /* This will load the service information files, but not actually
1889 * start any services or anything. */
1891 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1894 manager_dispatch_load_queue(m);
1897 *_ret = unit_follow_merge(*_ret);
1902 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1909 HASHMAP_FOREACH(j, s->jobs, i)
1910 job_dump(j, f, prefix);
1913 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1921 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1923 unit_dump(u, f, prefix);
1926 void manager_clear_jobs(Manager *m) {
1931 transaction_abort(m);
1933 while ((j = hashmap_first(m->jobs)))
1934 job_finish_and_invalidate(j, JOB_CANCELED);
1937 unsigned manager_dispatch_run_queue(Manager *m) {
1941 if (m->dispatching_run_queue)
1944 m->dispatching_run_queue = true;
1946 while ((j = m->run_queue)) {
1947 assert(j->installed);
1948 assert(j->in_run_queue);
1950 job_run_and_invalidate(j);
1954 m->dispatching_run_queue = false;
1958 unsigned manager_dispatch_dbus_queue(Manager *m) {
1965 if (m->dispatching_dbus_queue)
1968 m->dispatching_dbus_queue = true;
1970 while ((u = m->dbus_unit_queue)) {
1971 assert(u->in_dbus_queue);
1973 bus_unit_send_change_signal(u);
1977 while ((j = m->dbus_job_queue)) {
1978 assert(j->in_dbus_queue);
1980 bus_job_send_change_signal(j);
1984 m->dispatching_dbus_queue = false;
1988 static int manager_process_notify_fd(Manager *m) {
1995 struct msghdr msghdr;
1997 struct ucred *ucred;
1999 struct cmsghdr cmsghdr;
2000 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
2006 iovec.iov_base = buf;
2007 iovec.iov_len = sizeof(buf)-1;
2011 msghdr.msg_iov = &iovec;
2012 msghdr.msg_iovlen = 1;
2013 msghdr.msg_control = &control;
2014 msghdr.msg_controllen = sizeof(control);
2016 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
2020 if (errno == EAGAIN || errno == EINTR)
2026 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
2027 control.cmsghdr.cmsg_level != SOL_SOCKET ||
2028 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
2029 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
2030 log_warning("Received notify message without credentials. Ignoring.");
2034 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
2036 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
2037 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
2038 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
2042 assert((size_t) n < sizeof(buf));
2044 if (!(tags = strv_split(buf, "\n\r")))
2047 log_debug("Got notification message for unit %s", u->id);
2049 if (UNIT_VTABLE(u)->notify_message)
2050 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
2058 static int manager_dispatch_sigchld(Manager *m) {
2068 /* First we call waitd() for a PID and do not reap the
2069 * zombie. That way we can still access /proc/$PID for
2070 * it while it is a zombie. */
2071 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2073 if (errno == ECHILD)
2085 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2088 get_process_comm(si.si_pid, &name);
2089 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2093 /* Let's flush any message the dying child might still
2094 * have queued for us. This ensures that the process
2095 * still exists in /proc so that we can figure out
2096 * which cgroup and hence unit it belongs to. */
2097 if ((r = manager_process_notify_fd(m)) < 0)
2100 /* And now figure out the unit this belongs to */
2101 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2102 u = cgroup_unit_by_pid(m, si.si_pid);
2104 /* And now, we actually reap the zombie. */
2105 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2112 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2115 log_debug("Child %lu died (code=%s, status=%i/%s)",
2116 (long unsigned) si.si_pid,
2117 sigchld_code_to_string(si.si_code),
2119 strna(si.si_code == CLD_EXITED
2120 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2121 : signal_to_string(si.si_status)));
2126 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->id);
2128 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2129 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2135 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2139 dbus_error_init(&error);
2141 log_debug("Activating special unit %s", name);
2143 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2144 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2146 dbus_error_free(&error);
2151 static int manager_process_signal_fd(Manager *m) {
2153 struct signalfd_siginfo sfsi;
2154 bool sigchld = false;
2159 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2164 if (errno == EINTR || errno == EAGAIN)
2170 if (sfsi.ssi_pid > 0) {
2173 get_process_comm(sfsi.ssi_pid, &p);
2175 log_debug("Received SIG%s from PID %lu (%s).",
2176 signal_to_string(sfsi.ssi_signo),
2177 (unsigned long) sfsi.ssi_pid, strna(p));
2180 log_debug("Received SIG%s.", signal_to_string(sfsi.ssi_signo));
2182 switch (sfsi.ssi_signo) {
2189 if (m->running_as == MANAGER_SYSTEM) {
2190 /* This is for compatibility with the
2191 * original sysvinit */
2192 m->exit_code = MANAGER_REEXECUTE;
2199 if (m->running_as == MANAGER_SYSTEM) {
2200 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2204 /* Run the exit target if there is one, if not, just exit. */
2205 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2206 m->exit_code = MANAGER_EXIT;
2213 if (m->running_as == MANAGER_SYSTEM)
2214 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2216 /* This is a nop on non-init */
2220 if (m->running_as == MANAGER_SYSTEM)
2221 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2223 /* This is a nop on non-init */
2229 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2231 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2232 log_info("Trying to reconnect to bus...");
2236 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2237 log_info("Loading D-Bus service...");
2238 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2249 if (!(f = open_memstream(&dump, &size))) {
2250 log_warning("Failed to allocate memory stream.");
2254 manager_dump_units(m, f, "\t");
2255 manager_dump_jobs(m, f, "\t");
2260 log_warning("Failed to write status stream");
2265 log_dump(LOG_INFO, dump);
2272 m->exit_code = MANAGER_RELOAD;
2277 /* Starting SIGRTMIN+0 */
2278 static const char * const target_table[] = {
2279 [0] = SPECIAL_DEFAULT_TARGET,
2280 [1] = SPECIAL_RESCUE_TARGET,
2281 [2] = SPECIAL_EMERGENCY_TARGET,
2282 [3] = SPECIAL_HALT_TARGET,
2283 [4] = SPECIAL_POWEROFF_TARGET,
2284 [5] = SPECIAL_REBOOT_TARGET,
2285 [6] = SPECIAL_KEXEC_TARGET
2288 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2289 static const ManagerExitCode code_table[] = {
2291 [1] = MANAGER_POWEROFF,
2292 [2] = MANAGER_REBOOT,
2296 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2297 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2298 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2299 manager_start_target(m, target_table[idx],
2300 (idx == 1 || idx == 2) ? JOB_ISOLATE : JOB_REPLACE);
2304 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2305 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2306 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2310 switch (sfsi.ssi_signo - SIGRTMIN) {
2313 log_debug("Enabling showing of status.");
2314 manager_set_show_status(m, true);
2318 log_debug("Disabling showing of status.");
2319 manager_set_show_status(m, false);
2323 log_set_max_level(LOG_DEBUG);
2324 log_notice("Setting log level to debug.");
2328 log_set_max_level(LOG_INFO);
2329 log_notice("Setting log level to info.");
2333 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2334 log_notice("Setting log target to journal-or-kmsg.");
2338 log_set_target(LOG_TARGET_CONSOLE);
2339 log_notice("Setting log target to console.");
2343 log_set_target(LOG_TARGET_KMSG);
2344 log_notice("Setting log target to kmsg.");
2348 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2349 log_notice("Setting log target to syslog-or-kmsg.");
2353 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2360 return manager_dispatch_sigchld(m);
2365 static int process_event(Manager *m, struct epoll_event *ev) {
2372 assert_se(w = ev->data.ptr);
2374 if (w->type == WATCH_INVALID)
2381 /* An incoming signal? */
2382 if (ev->events != EPOLLIN)
2385 if ((r = manager_process_signal_fd(m)) < 0)
2392 /* An incoming daemon notification event? */
2393 if (ev->events != EPOLLIN)
2396 if ((r = manager_process_notify_fd(m)) < 0)
2403 /* Some fd event, to be dispatched to the units */
2404 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2407 case WATCH_UNIT_TIMER:
2408 case WATCH_JOB_TIMER: {
2412 /* Some timer event, to be dispatched to the units */
2413 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2415 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2418 return k < 0 ? -errno : -EIO;
2421 if (w->type == WATCH_UNIT_TIMER)
2422 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2424 job_timer_event(w->data.job, v, w);
2429 /* Some mount table change, intended for the mount subsystem */
2430 mount_fd_event(m, ev->events);
2434 /* Some swap table change, intended for the swap subsystem */
2435 swap_fd_event(m, ev->events);
2439 /* Some notification from udev, intended for the device subsystem */
2440 device_fd_event(m, ev->events);
2443 case WATCH_DBUS_WATCH:
2444 bus_watch_event(m, w, ev->events);
2447 case WATCH_DBUS_TIMEOUT:
2448 bus_timeout_event(m, w, ev->events);
2452 log_error("event type=%i", w->type);
2453 assert_not_reached("Unknown epoll event type.");
2459 int manager_loop(Manager *m) {
2462 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2465 m->exit_code = MANAGER_RUNNING;
2467 /* Release the path cache */
2468 set_free_free(m->unit_path_cache);
2469 m->unit_path_cache = NULL;
2471 manager_check_finished(m);
2473 /* There might still be some zombies hanging around from
2474 * before we were exec()'ed. Leat's reap them */
2475 r = manager_dispatch_sigchld(m);
2479 while (m->exit_code == MANAGER_RUNNING) {
2480 struct epoll_event event;
2484 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM)
2487 if (!ratelimit_test(&rl)) {
2488 /* Yay, something is going seriously wrong, pause a little */
2489 log_warning("Looping too fast. Throttling execution a little.");
2494 if (manager_dispatch_load_queue(m) > 0)
2497 if (manager_dispatch_run_queue(m) > 0)
2500 if (bus_dispatch(m) > 0)
2503 if (manager_dispatch_cleanup_queue(m) > 0)
2506 if (manager_dispatch_gc_queue(m) > 0)
2509 if (manager_dispatch_dbus_queue(m) > 0)
2512 if (swap_dispatch_reload(m) > 0)
2515 /* Sleep for half the watchdog time */
2516 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) {
2517 wait_msec = (int) (m->runtime_watchdog / 2 / USEC_PER_MSEC);
2523 n = epoll_wait(m->epoll_fd, &event, 1, wait_msec);
2535 r = process_event(m, &event);
2540 return m->exit_code;
2543 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2551 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2554 if (!(n = bus_path_unescape(s+31)))
2557 u = manager_get_unit(m, n);
2568 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2577 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2580 if ((r = safe_atou(s + 30, &id)) < 0)
2583 if (!(j = manager_get_job(m, id)))
2591 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2596 if (m->audit_fd < 0)
2599 /* Don't generate audit events if the service was already
2600 * started and we're just deserializing */
2601 if (m->n_reloading > 0)
2604 if (m->running_as != MANAGER_SYSTEM)
2607 if (u->type != UNIT_SERVICE)
2610 if (!(p = unit_name_to_prefix_and_instance(u->id))) {
2611 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2615 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2616 if (errno == EPERM) {
2617 /* We aren't allowed to send audit messages?
2618 * Then let's not retry again. */
2619 audit_close(m->audit_fd);
2622 log_warning("Failed to send audit message: %m");
2630 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2632 union sockaddr_union sa;
2634 char *message = NULL;
2636 /* Don't generate plymouth events if the service was already
2637 * started and we're just deserializing */
2638 if (m->n_reloading > 0)
2641 if (m->running_as != MANAGER_SYSTEM)
2644 if (u->type != UNIT_SERVICE &&
2645 u->type != UNIT_MOUNT &&
2646 u->type != UNIT_SWAP)
2649 /* We set SOCK_NONBLOCK here so that we rather drop the
2650 * message then wait for plymouth */
2651 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2652 log_error("socket() failed: %m");
2657 sa.sa.sa_family = AF_UNIX;
2658 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2659 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2661 if (errno != EPIPE &&
2664 errno != ECONNREFUSED &&
2665 errno != ECONNRESET &&
2666 errno != ECONNABORTED)
2667 log_error("connect() failed: %m");
2672 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2673 log_error("Out of memory");
2678 if (write(fd, message, n + 1) != n + 1) {
2680 if (errno != EPIPE &&
2683 errno != ECONNREFUSED &&
2684 errno != ECONNRESET &&
2685 errno != ECONNABORTED)
2686 log_error("Failed to write Plymouth message: %m");
2693 close_nointr_nofail(fd);
2698 void manager_dispatch_bus_name_owner_changed(
2701 const char* old_owner,
2702 const char *new_owner) {
2709 if (!(u = hashmap_get(m->watch_bus, name)))
2712 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2715 void manager_dispatch_bus_query_pid_done(
2726 if (!(u = hashmap_get(m->watch_bus, name)))
2729 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2732 int manager_open_serialization(Manager *m, FILE **_f) {
2740 if (m->running_as == MANAGER_SYSTEM)
2741 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2743 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2748 saved_umask = umask(0077);
2749 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2759 log_debug("Serializing state to %s", path);
2762 if (!(f = fdopen(fd, "w+")))
2770 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2782 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2783 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2785 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2786 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2787 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2791 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2795 if (!unit_can_serialize(u))
2802 if ((r = unit_serialize(u, f, fds)) < 0) {
2808 assert(m->n_reloading > 0);
2814 r = bus_fdset_add_all(m, fds);
2821 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2827 log_debug("Deserializing state...");
2832 char line[LINE_MAX], *l;
2834 if (!fgets(line, sizeof(line), f)) {
2849 if (startswith(l, "current-job-id=")) {
2852 if (safe_atou32(l+15, &id) < 0)
2853 log_debug("Failed to parse current job id value %s", l+15);
2855 m->current_job_id = MAX(m->current_job_id, id);
2856 } else if (startswith(l, "taint-usr=")) {
2859 if ((b = parse_boolean(l+10)) < 0)
2860 log_debug("Failed to parse taint /usr flag %s", l+10);
2862 m->taint_usr = m->taint_usr || b;
2863 } else if (startswith(l, "initrd-timestamp="))
2864 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2865 else if (startswith(l, "startup-timestamp="))
2866 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2867 else if (startswith(l, "finish-timestamp="))
2868 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2870 log_debug("Unknown serialization item '%s'", l);
2875 char name[UNIT_NAME_MAX+2];
2878 if (!fgets(name, sizeof(name), f)) {
2889 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2892 if ((r = unit_deserialize(u, f, fds)) < 0)
2902 assert(m->n_reloading > 0);
2908 int manager_reload(Manager *m) {
2915 if ((r = manager_open_serialization(m, &f)) < 0)
2920 if (!(fds = fdset_new())) {
2926 if ((r = manager_serialize(m, f, fds)) < 0) {
2931 if (fseeko(f, 0, SEEK_SET) < 0) {
2937 /* From here on there is no way back. */
2938 manager_clear_jobs_and_units(m);
2939 manager_undo_generators(m);
2941 /* Find new unit paths */
2942 lookup_paths_free(&m->lookup_paths);
2943 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2946 manager_run_generators(m);
2948 manager_build_unit_path_cache(m);
2950 /* First, enumerate what we can from all config files */
2951 if ((q = manager_enumerate(m)) < 0)
2954 /* Second, deserialize our stored data */
2955 if ((q = manager_deserialize(m, f, fds)) < 0)
2961 /* Third, fire things up! */
2962 if ((q = manager_coldplug(m)) < 0)
2965 assert(m->n_reloading > 0);
2978 bool manager_is_booting_or_shutting_down(Manager *m) {
2983 /* Is the initial job still around? */
2984 if (manager_get_job(m, m->default_unit_job_id))
2987 /* Is there a job for the shutdown target? */
2988 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
2995 void manager_reset_failed(Manager *m) {
3001 HASHMAP_FOREACH(u, m->units, i)
3002 unit_reset_failed(u);
3005 bool manager_unit_pending_inactive(Manager *m, const char *name) {
3011 /* Returns true if the unit is inactive or going down */
3012 if (!(u = manager_get_unit(m, name)))
3015 return unit_pending_inactive(u);
3018 void manager_check_finished(Manager *m) {
3019 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3020 usec_t kernel_usec, initrd_usec, userspace_usec, total_usec;
3024 if (dual_timestamp_is_set(&m->finish_timestamp))
3027 if (hashmap_size(m->jobs) > 0)
3030 dual_timestamp_get(&m->finish_timestamp);
3032 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
3034 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3035 total_usec = m->finish_timestamp.monotonic;
3037 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
3039 kernel_usec = m->initrd_timestamp.monotonic;
3040 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
3042 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
3043 format_timespan(kernel, sizeof(kernel), kernel_usec),
3044 format_timespan(initrd, sizeof(initrd), initrd_usec),
3045 format_timespan(userspace, sizeof(userspace), userspace_usec),
3046 format_timespan(sum, sizeof(sum), total_usec));
3048 kernel_usec = m->startup_timestamp.monotonic;
3051 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
3052 format_timespan(kernel, sizeof(kernel), kernel_usec),
3053 format_timespan(userspace, sizeof(userspace), userspace_usec),
3054 format_timespan(sum, sizeof(sum), total_usec));
3057 userspace_usec = initrd_usec = kernel_usec = 0;
3058 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3060 log_debug("Startup finished in %s.",
3061 format_timespan(sum, sizeof(sum), total_usec));
3064 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
3067 "READY=1\nSTATUS=Startup finished in %s.",
3068 format_timespan(sum, sizeof(sum), total_usec));
3071 void manager_run_generators(Manager *m) {
3073 const char *generator_path;
3074 const char *argv[3];
3079 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
3080 if (!(d = opendir(generator_path))) {
3082 if (errno == ENOENT)
3085 log_error("Failed to enumerate generator directory: %m");
3089 if (!m->generator_unit_path) {
3091 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3093 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3094 p = "/run/systemd/generator";
3096 if (mkdir_p(p, 0755) < 0) {
3097 log_error("Failed to create generator directory: %m");
3102 if (!(p = mkdtemp(user_path))) {
3103 log_error("Failed to create generator directory: %m");
3108 if (!(m->generator_unit_path = strdup(p))) {
3109 log_error("Failed to allocate generator unit path.");
3114 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3115 argv[1] = m->generator_unit_path;
3119 execute_directory(generator_path, d, (char**) argv);
3122 if (rmdir(m->generator_unit_path) >= 0) {
3123 /* Uh? we were able to remove this dir? I guess that
3124 * means the directory was empty, hence let's shortcut
3127 free(m->generator_unit_path);
3128 m->generator_unit_path = NULL;
3132 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3135 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3136 log_error("Failed to add generator directory to unit search path: %m");
3140 strv_free(m->lookup_paths.unit_path);
3141 m->lookup_paths.unit_path = l;
3143 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3151 void manager_undo_generators(Manager *m) {
3154 if (!m->generator_unit_path)
3157 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3158 rm_rf(m->generator_unit_path, false, true, false);
3160 free(m->generator_unit_path);
3161 m->generator_unit_path = NULL;
3164 int manager_set_default_controllers(Manager *m, char **controllers) {
3169 l = strv_copy(controllers);
3173 strv_free(m->default_controllers);
3174 m->default_controllers = l;
3176 cg_shorten_controllers(m->default_controllers);
3181 void manager_recheck_journal(Manager *m) {
3186 if (m->running_as != MANAGER_SYSTEM)
3189 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3190 if (u && SOCKET(u)->state != SOCKET_RUNNING) {
3191 log_close_journal();
3195 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3196 if (u && SERVICE(u)->state != SERVICE_RUNNING) {
3197 log_close_journal();
3201 /* Hmm, OK, so the socket is fully up and the service is up
3202 * too, then let's make use of the thing. */
3206 void manager_set_show_status(Manager *m, bool b) {
3209 if (m->running_as != MANAGER_SYSTEM)
3215 touch("/run/systemd/show-status");
3217 unlink("/run/systemd/show-status");
3220 bool manager_get_show_status(Manager *m) {
3223 if (m->running_as != MANAGER_SYSTEM)
3229 /* If Plymouth is running make sure we show the status, so
3230 * that there's something nice to see when people press Esc */
3232 return plymouth_running();
3235 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3236 [MANAGER_SYSTEM] = "system",
3237 [MANAGER_USER] = "user"
3240 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);