1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
27 #include <sys/signalfd.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
36 #include <sys/types.h>
44 #include <systemd/sd-daemon.h>
53 #include "ratelimit.h"
55 #include "mount-setup.h"
56 #include "unit-name.h"
57 #include "dbus-unit.h"
60 #include "path-lookup.h"
62 #include "bus-errors.h"
63 #include "exit-status.h"
66 #include "cgroup-util.h"
68 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
69 #define GC_QUEUE_ENTRIES_MAX 16
71 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
72 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
74 /* Where clients shall send notification messages to */
75 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
76 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
78 static int manager_setup_notify(Manager *m) {
81 struct sockaddr_un un;
83 struct epoll_event ev;
89 m->notify_watch.type = WATCH_NOTIFY;
90 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
91 log_error("Failed to allocate notification socket: %m");
96 sa.sa.sa_family = AF_UNIX;
99 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
101 unlink(NOTIFY_SOCKET_SYSTEM);
102 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
105 if (sa.un.sun_path[0] == '@')
106 sa.un.sun_path[0] = 0;
109 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
113 log_error("bind() failed: %m");
117 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
118 log_error("SO_PASSCRED failed: %m");
124 ev.data.ptr = &m->notify_watch;
126 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
129 if (sa.un.sun_path[0] == 0)
130 sa.un.sun_path[0] = '@';
132 if (!(m->notify_socket = strdup(sa.un.sun_path)))
135 log_debug("Using notification socket %s", m->notify_socket);
140 static int enable_special_signals(Manager *m) {
145 /* Enable that we get SIGINT on control-alt-del. In containers
146 * this will fail with EPERM, so ignore that. */
147 if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM)
148 log_warning("Failed to enable ctrl-alt-del handling: %m");
150 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
152 /* Support systems without virtual console */
154 log_warning("Failed to open /dev/tty0: %m");
156 /* Enable that we get SIGWINCH on kbrequest */
157 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
158 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
160 close_nointr_nofail(fd);
166 static int manager_setup_signals(Manager *m) {
168 struct epoll_event ev;
173 /* We are not interested in SIGSTOP and friends. */
175 sa.sa_handler = SIG_DFL;
176 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
177 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
179 assert_se(sigemptyset(&mask) == 0);
181 sigset_add_many(&mask,
182 SIGCHLD, /* Child died */
183 SIGTERM, /* Reexecute daemon */
184 SIGHUP, /* Reload configuration */
185 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
186 SIGUSR2, /* systemd: dump status */
187 SIGINT, /* Kernel sends us this on control-alt-del */
188 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
189 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
190 SIGRTMIN+0, /* systemd: start default.target */
191 SIGRTMIN+1, /* systemd: isolate rescue.target */
192 SIGRTMIN+2, /* systemd: isolate emergency.target */
193 SIGRTMIN+3, /* systemd: start halt.target */
194 SIGRTMIN+4, /* systemd: start poweroff.target */
195 SIGRTMIN+5, /* systemd: start reboot.target */
196 SIGRTMIN+6, /* systemd: start kexec.target */
197 SIGRTMIN+13, /* systemd: Immediate halt */
198 SIGRTMIN+14, /* systemd: Immediate poweroff */
199 SIGRTMIN+15, /* systemd: Immediate reboot */
200 SIGRTMIN+16, /* systemd: Immediate kexec */
201 SIGRTMIN+20, /* systemd: enable status messages */
202 SIGRTMIN+21, /* systemd: disable status messages */
203 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
204 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
205 SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */
206 SIGRTMIN+27, /* systemd: set log target to console */
207 SIGRTMIN+28, /* systemd: set log target to kmsg */
208 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
210 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
212 m->signal_watch.type = WATCH_SIGNAL;
213 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
218 ev.data.ptr = &m->signal_watch;
220 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
223 if (m->running_as == MANAGER_SYSTEM)
224 return enable_special_signals(m);
229 static void manager_strip_environment(Manager *m) {
232 /* Remove variables from the inherited set that are part of
233 * the container interface:
234 * http://www.freedesktop.org/wiki/Software/systemd/ContainerInterface */
235 strv_remove_prefix(m->environment, "container=");
236 strv_remove_prefix(m->environment, "container_");
238 /* Remove variables from the inherited set that are part of
239 * the initrd interface:
240 * http://www.freedesktop.org/wiki/Software/systemd/InitrdInterface */
241 strv_remove_prefix(m->environment, "RD_");
244 int manager_new(ManagerRunningAs running_as, Manager **_m) {
249 assert(running_as >= 0);
250 assert(running_as < _MANAGER_RUNNING_AS_MAX);
252 if (!(m = new0(Manager, 1)))
255 dual_timestamp_get(&m->startup_timestamp);
257 m->running_as = running_as;
258 m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1;
259 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
260 m->pin_cgroupfs_fd = -1;
266 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
267 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
269 m->environment = strv_copy(environ);
273 manager_strip_environment(m);
275 if (running_as == MANAGER_SYSTEM) {
276 m->default_controllers = strv_new("cpu", NULL);
277 if (!m->default_controllers)
281 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
284 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
287 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
290 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
293 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
296 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
299 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
302 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
305 if ((r = manager_setup_signals(m)) < 0)
308 if ((r = manager_setup_cgroup(m)) < 0)
311 if ((r = manager_setup_notify(m)) < 0)
314 /* Try to connect to the busses, if possible. */
315 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
319 if ((m->audit_fd = audit_open()) < 0 &&
320 /* If the kernel lacks netlink or audit support,
321 * don't worry about it. */
322 errno != EAFNOSUPPORT && errno != EPROTONOSUPPORT)
323 log_error("Failed to connect to audit log: %m");
326 m->taint_usr = dir_is_empty("/usr") > 0;
336 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
342 while ((u = m->cleanup_queue)) {
343 assert(u->in_cleanup_queue);
353 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
354 GC_OFFSET_UNSURE, /* No clue */
355 GC_OFFSET_GOOD, /* We still need this unit */
356 GC_OFFSET_BAD, /* We don't need this unit anymore */
360 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
367 if (u->gc_marker == gc_marker + GC_OFFSET_GOOD ||
368 u->gc_marker == gc_marker + GC_OFFSET_BAD ||
369 u->gc_marker == gc_marker + GC_OFFSET_IN_PATH)
372 if (u->in_cleanup_queue)
375 if (unit_check_gc(u))
378 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
382 SET_FOREACH(other, u->dependencies[UNIT_REFERENCED_BY], i) {
383 unit_gc_sweep(other, gc_marker);
385 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
388 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
395 /* We were unable to find anything out about this entry, so
396 * let's investigate it later */
397 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
398 unit_add_to_gc_queue(u);
402 /* We definitely know that this one is not useful anymore, so
403 * let's mark it for deletion */
404 u->gc_marker = gc_marker + GC_OFFSET_BAD;
405 unit_add_to_cleanup_queue(u);
409 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
412 static unsigned manager_dispatch_gc_queue(Manager *m) {
419 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
420 (m->gc_queue_timestamp <= 0 ||
421 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
424 log_debug("Running GC...");
426 m->gc_marker += _GC_OFFSET_MAX;
427 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
430 gc_marker = m->gc_marker;
432 while ((u = m->gc_queue)) {
433 assert(u->in_gc_queue);
435 unit_gc_sweep(u, gc_marker);
437 LIST_REMOVE(Unit, gc_queue, m->gc_queue, u);
438 u->in_gc_queue = false;
442 if (u->gc_marker == gc_marker + GC_OFFSET_BAD ||
443 u->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
444 log_debug("Collecting %s", u->id);
445 u->gc_marker = gc_marker + GC_OFFSET_BAD;
446 unit_add_to_cleanup_queue(u);
450 m->n_in_gc_queue = 0;
451 m->gc_queue_timestamp = 0;
456 static void manager_clear_jobs_and_units(Manager *m) {
462 while ((j = hashmap_first(m->transaction_jobs)))
465 while ((u = hashmap_first(m->units)))
468 manager_dispatch_cleanup_queue(m);
470 assert(!m->load_queue);
471 assert(!m->run_queue);
472 assert(!m->dbus_unit_queue);
473 assert(!m->dbus_job_queue);
474 assert(!m->cleanup_queue);
475 assert(!m->gc_queue);
477 assert(hashmap_isempty(m->transaction_jobs));
478 assert(hashmap_isempty(m->jobs));
479 assert(hashmap_isempty(m->units));
482 void manager_free(Manager *m) {
487 manager_clear_jobs_and_units(m);
489 for (c = 0; c < _UNIT_TYPE_MAX; c++)
490 if (unit_vtable[c]->shutdown)
491 unit_vtable[c]->shutdown(m);
493 /* If we reexecute ourselves, we keep the root cgroup
495 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
497 manager_undo_generators(m);
501 hashmap_free(m->units);
502 hashmap_free(m->jobs);
503 hashmap_free(m->transaction_jobs);
504 hashmap_free(m->watch_pids);
505 hashmap_free(m->watch_bus);
507 if (m->epoll_fd >= 0)
508 close_nointr_nofail(m->epoll_fd);
509 if (m->signal_watch.fd >= 0)
510 close_nointr_nofail(m->signal_watch.fd);
511 if (m->notify_watch.fd >= 0)
512 close_nointr_nofail(m->notify_watch.fd);
515 if (m->audit_fd >= 0)
516 audit_close(m->audit_fd);
519 free(m->notify_socket);
521 lookup_paths_free(&m->lookup_paths);
522 strv_free(m->environment);
524 strv_free(m->default_controllers);
526 hashmap_free(m->cgroup_bondings);
527 set_free_free(m->unit_path_cache);
532 int manager_enumerate(Manager *m) {
538 /* Let's ask every type to load all units from disk/kernel
539 * that it might know */
540 for (c = 0; c < _UNIT_TYPE_MAX; c++)
541 if (unit_vtable[c]->enumerate)
542 if ((q = unit_vtable[c]->enumerate(m)) < 0)
545 manager_dispatch_load_queue(m);
549 int manager_coldplug(Manager *m) {
557 /* Then, let's set up their initial state. */
558 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
564 if ((q = unit_coldplug(u)) < 0)
571 static void manager_build_unit_path_cache(Manager *m) {
578 set_free_free(m->unit_path_cache);
580 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
581 log_error("Failed to allocate unit path cache.");
585 /* This simply builds a list of files we know exist, so that
586 * we don't always have to go to disk */
588 STRV_FOREACH(i, m->lookup_paths.unit_path) {
591 if (!(d = opendir(*i))) {
592 log_error("Failed to open directory: %m");
596 while ((de = readdir(d))) {
599 if (ignore_file(de->d_name))
602 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
608 if ((r = set_put(m->unit_path_cache, p)) < 0) {
621 log_error("Failed to build unit path cache: %s", strerror(-r));
623 set_free_free(m->unit_path_cache);
624 m->unit_path_cache = NULL;
630 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
635 manager_run_generators(m);
637 manager_build_unit_path_cache(m);
639 /* If we will deserialize make sure that during enumeration
640 * this is already known, so we increase the counter here
645 /* First, enumerate what we can from all config files */
646 r = manager_enumerate(m);
648 /* Second, deserialize if there is something to deserialize */
650 if ((q = manager_deserialize(m, serialization, fds)) < 0)
653 /* Third, fire things up! */
654 if ((q = manager_coldplug(m)) < 0)
658 assert(m->n_reloading > 0);
665 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
669 /* Deletes one job from the transaction */
671 manager_transaction_unlink_job(m, j, delete_dependencies);
677 static void transaction_delete_unit(Manager *m, Unit *u) {
680 /* Deletes all jobs associated with a certain unit from the
683 while ((j = hashmap_get(m->transaction_jobs, u)))
684 transaction_delete_job(m, j, true);
687 static void transaction_clean_dependencies(Manager *m) {
693 /* Drops all dependencies of all installed jobs */
695 HASHMAP_FOREACH(j, m->jobs, i) {
696 while (j->subject_list)
697 job_dependency_free(j->subject_list);
698 while (j->object_list)
699 job_dependency_free(j->object_list);
702 assert(!m->transaction_anchor);
705 static void transaction_abort(Manager *m) {
710 while ((j = hashmap_first(m->transaction_jobs)))
712 transaction_delete_job(m, j, true);
716 assert(hashmap_isempty(m->transaction_jobs));
718 transaction_clean_dependencies(m);
721 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
726 /* A recursive sweep through the graph that marks all units
727 * that matter to the anchor job, i.e. are directly or
728 * indirectly a dependency of the anchor job via paths that
729 * are fully marked as mattering. */
734 l = m->transaction_anchor;
736 LIST_FOREACH(subject, l, l) {
738 /* This link does not matter */
742 /* This unit has already been marked */
743 if (l->object->generation == generation)
746 l->object->matters_to_anchor = true;
747 l->object->generation = generation;
749 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
753 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
754 JobDependency *l, *last;
758 assert(j->unit == other->unit);
759 assert(!j->installed);
761 /* Merges 'other' into 'j' and then deletes 'other'. */
764 j->state = JOB_WAITING;
765 j->override = j->override || other->override;
767 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
769 /* Patch us in as new owner of the JobDependency objects */
771 LIST_FOREACH(subject, l, other->subject_list) {
772 assert(l->subject == other);
777 /* Merge both lists */
779 last->subject_next = j->subject_list;
781 j->subject_list->subject_prev = last;
782 j->subject_list = other->subject_list;
785 /* Patch us in as new owner of the JobDependency objects */
787 LIST_FOREACH(object, l, other->object_list) {
788 assert(l->object == other);
793 /* Merge both lists */
795 last->object_next = j->object_list;
797 j->object_list->object_prev = last;
798 j->object_list = other->object_list;
801 /* Kill the other job */
802 other->subject_list = NULL;
803 other->object_list = NULL;
804 transaction_delete_job(m, other, true);
807 static bool job_is_conflicted_by(Job *j) {
812 /* Returns true if this job is pulled in by a least one
813 * ConflictedBy dependency. */
815 LIST_FOREACH(object, l, j->object_list)
822 static int delete_one_unmergeable_job(Manager *m, Job *j) {
827 /* Tries to delete one item in the linked list
828 * j->transaction_next->transaction_next->... that conflicts
829 * with another one, in an attempt to make an inconsistent
830 * transaction work. */
832 /* We rely here on the fact that if a merged with b does not
833 * merge with c, either a or b merge with c neither */
834 LIST_FOREACH(transaction, j, j)
835 LIST_FOREACH(transaction, k, j->transaction_next) {
838 /* Is this one mergeable? Then skip it */
839 if (job_type_is_mergeable(j->type, k->type))
842 /* Ok, we found two that conflict, let's see if we can
843 * drop one of them */
844 if (!j->matters_to_anchor && !k->matters_to_anchor) {
846 /* Both jobs don't matter, so let's
847 * find the one that is smarter to
848 * remove. Let's think positive and
849 * rather remove stops then starts --
850 * except if something is being
851 * stopped because it is conflicted by
852 * another unit in which case we
853 * rather remove the start. */
855 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
856 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
858 if (j->type == JOB_STOP) {
860 if (job_is_conflicted_by(j))
865 } else if (k->type == JOB_STOP) {
867 if (job_is_conflicted_by(k))
874 } else if (!j->matters_to_anchor)
876 else if (!k->matters_to_anchor)
881 /* Ok, we can drop one, so let's do so. */
882 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
883 transaction_delete_job(m, d, true);
890 static int transaction_merge_jobs(Manager *m, DBusError *e) {
897 /* First step, check whether any of the jobs for one specific
898 * task conflict. If so, try to drop one of them. */
899 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
904 LIST_FOREACH(transaction, k, j->transaction_next) {
905 if (job_type_merge(&t, k->type) >= 0)
908 /* OK, we could not merge all jobs for this
909 * action. Let's see if we can get rid of one
912 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
913 /* Ok, we managed to drop one, now
914 * let's ask our callers to call us
915 * again after garbage collecting */
918 /* We couldn't merge anything. Failure */
919 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
920 job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
925 /* Second step, merge the jobs. */
926 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
930 /* Merge all transactions */
931 LIST_FOREACH(transaction, k, j->transaction_next)
932 assert_se(job_type_merge(&t, k->type) == 0);
934 /* If an active job is mergeable, merge it too */
936 job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
938 while ((k = j->transaction_next)) {
940 transaction_merge_and_delete_job(m, k, j, t);
943 transaction_merge_and_delete_job(m, j, k, t);
946 if (j->unit->job && !j->installed)
947 transaction_merge_and_delete_job(m, j, j->unit->job, t);
949 assert(!j->transaction_next);
950 assert(!j->transaction_prev);
956 static void transaction_drop_redundant(Manager *m) {
961 /* Goes through the transaction and removes all jobs that are
970 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
971 bool changes_something = false;
974 LIST_FOREACH(transaction, k, j) {
976 if (!job_is_anchor(k) &&
977 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
978 (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
981 changes_something = true;
985 if (changes_something)
988 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
989 transaction_delete_job(m, j, false);
997 static bool unit_matters_to_anchor(Unit *u, Job *j) {
999 assert(!j->transaction_prev);
1001 /* Checks whether at least one of the jobs for this unit
1002 * matters to the anchor. */
1004 LIST_FOREACH(transaction, j, j)
1005 if (j->matters_to_anchor)
1011 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
1018 assert(!j->transaction_prev);
1020 /* Does a recursive sweep through the ordering graph, looking
1021 * for a cycle. If we find cycle we try to break it. */
1023 /* Have we seen this before? */
1024 if (j->generation == generation) {
1027 /* If the marker is NULL we have been here already and
1028 * decided the job was loop-free from here. Hence
1029 * shortcut things and return right-away. */
1033 /* So, the marker is not NULL and we already have been
1034 * here. We have a cycle. Let's try to break it. We go
1035 * backwards in our path and try to find a suitable
1036 * job to remove. We use the marker to find our way
1037 * back, since smart how we are we stored our way back
1039 log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
1042 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1044 log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
1048 !unit_matters_to_anchor(k->unit, k)) {
1049 /* Ok, we can drop this one, so let's
1054 /* Check if this in fact was the beginning of
1062 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
1063 transaction_delete_unit(m, delete->unit);
1067 log_error("Unable to break cycle");
1069 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1073 /* Make the marker point to where we come from, so that we can
1074 * find our way backwards if we want to break a cycle. We use
1075 * a special marker for the beginning: we point to
1077 j->marker = from ? from : j;
1078 j->generation = generation;
1080 /* We assume that the the dependencies are bidirectional, and
1081 * hence can ignore UNIT_AFTER */
1082 SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
1085 /* Is there a job for this unit? */
1086 if (!(o = hashmap_get(m->transaction_jobs, u)))
1088 /* Ok, there is no job for this in the
1089 * transaction, but maybe there is already one
1094 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1098 /* Ok, let's backtrack, and remember that this entry is not on
1099 * our path anymore. */
1105 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1114 /* Check if the ordering graph is cyclic. If it is, try to fix
1115 * that up by dropping one of the jobs. */
1117 g = (*generation)++;
1119 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1120 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1126 static void transaction_collect_garbage(Manager *m) {
1131 /* Drop jobs that are not required by any other job */
1139 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1140 if (j->object_list) {
1141 /* log_debug("Keeping job %s/%s because of %s/%s", */
1142 /* j->unit->id, job_type_to_string(j->type), */
1143 /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
1144 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1148 /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
1149 transaction_delete_job(m, j, true);
1157 static int transaction_is_destructive(Manager *m, DBusError *e) {
1163 /* Checks whether applying this transaction means that
1164 * existing jobs would be replaced */
1166 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1169 assert(!j->transaction_prev);
1170 assert(!j->transaction_next);
1173 j->unit->job != j &&
1174 !job_type_is_superset(j->type, j->unit->job->type)) {
1176 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1184 static void transaction_minimize_impact(Manager *m) {
1188 /* Drops all unnecessary jobs that reverse already active jobs
1189 * or that stop a running service. */
1197 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1198 LIST_FOREACH(transaction, j, j) {
1199 bool stops_running_service, changes_existing_job;
1201 /* If it matters, we shouldn't drop it */
1202 if (j->matters_to_anchor)
1205 /* Would this stop a running service?
1206 * Would this change an existing job?
1207 * If so, let's drop this entry */
1209 stops_running_service =
1210 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1212 changes_existing_job =
1214 job_type_is_conflicting(j->type, j->unit->job->type);
1216 if (!stops_running_service && !changes_existing_job)
1219 if (stops_running_service)
1220 log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
1222 if (changes_existing_job)
1223 log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
1225 /* Ok, let's get rid of this */
1226 log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
1228 transaction_delete_job(m, j, true);
1240 static int transaction_apply(Manager *m, JobMode mode) {
1245 /* Moves the transaction jobs to the set of active jobs */
1247 if (mode == JOB_ISOLATE) {
1249 /* When isolating first kill all installed jobs which
1250 * aren't part of the new transaction */
1252 HASHMAP_FOREACH(j, m->jobs, i) {
1253 assert(j->installed);
1255 if (hashmap_get(m->transaction_jobs, j->unit))
1258 /* 'j' itself is safe to remove, but if other jobs
1259 are invalidated recursively, our iterator may become
1260 invalid and we need to start over. */
1261 if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
1266 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1268 assert(!j->transaction_prev);
1269 assert(!j->transaction_next);
1274 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1278 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1280 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
1285 job_free(j->unit->job);
1288 j->installed = true;
1289 m->n_installed_jobs ++;
1291 /* We're fully installed. Now let's free data we don't
1294 assert(!j->transaction_next);
1295 assert(!j->transaction_prev);
1297 job_add_to_run_queue(j);
1298 job_add_to_dbus_queue(j);
1301 log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
1304 /* As last step, kill all remaining job dependencies. */
1305 transaction_clean_dependencies(m);
1311 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1315 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1321 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1323 unsigned generation = 1;
1327 /* This applies the changes recorded in transaction_jobs to
1328 * the actual list of jobs, if possible. */
1330 /* First step: figure out which jobs matter */
1331 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1333 /* Second step: Try not to stop any running services if
1334 * we don't have to. Don't try to reverse running
1335 * jobs if we don't have to. */
1336 if (mode == JOB_FAIL)
1337 transaction_minimize_impact(m);
1339 /* Third step: Drop redundant jobs */
1340 transaction_drop_redundant(m);
1343 /* Fourth step: Let's remove unneeded jobs that might
1345 if (mode != JOB_ISOLATE)
1346 transaction_collect_garbage(m);
1348 /* Fifth step: verify order makes sense and correct
1349 * cycles if necessary and possible */
1350 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1354 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1358 /* Let's see if the resulting transaction ordering
1359 * graph is still cyclic... */
1363 /* Sixth step: let's drop unmergeable entries if
1364 * necessary and possible, merge entries we can
1366 if ((r = transaction_merge_jobs(m, e)) >= 0)
1370 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1374 /* Seventh step: an entry got dropped, let's garbage
1375 * collect its dependencies. */
1376 if (mode != JOB_ISOLATE)
1377 transaction_collect_garbage(m);
1379 /* Let's see if the resulting transaction still has
1380 * unmergeable entries ... */
1383 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1384 transaction_drop_redundant(m);
1386 /* Ninth step: check whether we can actually apply this */
1387 if (mode == JOB_FAIL)
1388 if ((r = transaction_is_destructive(m, e)) < 0) {
1389 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1393 /* Tenth step: apply changes */
1394 if ((r = transaction_apply(m, mode)) < 0) {
1395 log_warning("Failed to apply transaction: %s", strerror(-r));
1399 assert(hashmap_isempty(m->transaction_jobs));
1400 assert(!m->transaction_anchor);
1405 transaction_abort(m);
1409 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1415 /* Looks for an existing prospective job and returns that. If
1416 * it doesn't exist it is created and added to the prospective
1419 f = hashmap_get(m->transaction_jobs, unit);
1421 LIST_FOREACH(transaction, j, f) {
1422 assert(j->unit == unit);
1424 if (j->type == type) {
1431 if (unit->job && unit->job->type == type)
1433 else if (!(j = job_new(m, type, unit)))
1438 j->matters_to_anchor = false;
1439 j->override = override;
1441 LIST_PREPEND(Job, transaction, f, j);
1443 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1451 /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
1456 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1460 if (j->transaction_prev)
1461 j->transaction_prev->transaction_next = j->transaction_next;
1462 else if (j->transaction_next)
1463 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1465 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1467 if (j->transaction_next)
1468 j->transaction_next->transaction_prev = j->transaction_prev;
1470 j->transaction_prev = j->transaction_next = NULL;
1472 while (j->subject_list)
1473 job_dependency_free(j->subject_list);
1475 while (j->object_list) {
1476 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1478 job_dependency_free(j->object_list);
1480 if (other && delete_dependencies) {
1481 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1482 other->unit->id, job_type_to_string(other->type),
1483 j->unit->id, job_type_to_string(j->type));
1484 transaction_delete_job(m, other, delete_dependencies);
1489 static int transaction_add_job_and_dependencies(
1497 bool ignore_requirements,
1508 assert(type < _JOB_TYPE_MAX);
1511 /* log_debug("Pulling in %s/%s from %s/%s", */
1512 /* unit->id, job_type_to_string(type), */
1513 /* by ? by->unit->id : "NA", */
1514 /* by ? job_type_to_string(by->type) : "NA"); */
1516 if (unit->load_state != UNIT_LOADED &&
1517 unit->load_state != UNIT_ERROR &&
1518 unit->load_state != UNIT_MASKED) {
1519 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
1523 if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
1524 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1525 "Unit %s failed to load: %s. "
1526 "See system logs and 'systemctl status %s' for details.",
1528 strerror(-unit->load_error),
1533 if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
1534 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
1538 if (!unit_job_is_applicable(unit, type)) {
1539 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
1543 /* First add the job. */
1544 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1547 ret->ignore_order = ret->ignore_order || ignore_order;
1549 /* Then, add a link to the job. */
1550 if (!job_dependency_new(by, ret, matters, conflicts))
1553 if (is_new && !ignore_requirements) {
1556 /* If we are following some other unit, make sure we
1557 * add all dependencies of everybody following. */
1558 if (unit_following_set(ret->unit, &following) > 0) {
1559 SET_FOREACH(dep, following, i)
1560 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1561 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1567 set_free(following);
1570 /* Finally, recursively add in all dependencies. */
1571 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1572 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i)
1573 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1581 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i)
1582 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1591 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1592 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1593 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1599 SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i)
1600 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1601 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1607 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i)
1608 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1617 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1618 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1619 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1625 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i)
1626 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1635 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i)
1636 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1637 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1645 if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1647 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i)
1648 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1657 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i)
1658 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1668 if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
1670 SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
1671 r = transaction_add_job_and_dependencies(m, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
1674 log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1682 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1694 static int transaction_add_isolate_jobs(Manager *m) {
1702 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1704 /* ignore aliases */
1708 if (u->ignore_on_isolate)
1711 /* No need to stop inactive jobs */
1712 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
1715 /* Is there already something listed for this? */
1716 if (hashmap_get(m->transaction_jobs, u))
1719 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1720 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
1726 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1731 assert(type < _JOB_TYPE_MAX);
1733 assert(mode < _JOB_MODE_MAX);
1735 if (mode == JOB_ISOLATE && type != JOB_START) {
1736 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1740 if (mode == JOB_ISOLATE && !unit->allow_isolate) {
1741 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1745 log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1747 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1748 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1749 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1750 transaction_abort(m);
1754 if (mode == JOB_ISOLATE)
1755 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1756 transaction_abort(m);
1760 if ((r = transaction_activate(m, mode, e)) < 0)
1763 log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
1771 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1776 assert(type < _JOB_TYPE_MAX);
1778 assert(mode < _JOB_MODE_MAX);
1780 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1783 return manager_add_job(m, type, unit, mode, override, e, _ret);
1786 Job *manager_get_job(Manager *m, uint32_t id) {
1789 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1792 Unit *manager_get_unit(Manager *m, const char *name) {
1796 return hashmap_get(m->units, name);
1799 unsigned manager_dispatch_load_queue(Manager *m) {
1805 /* Make sure we are not run recursively */
1806 if (m->dispatching_load_queue)
1809 m->dispatching_load_queue = true;
1811 /* Dispatches the load queue. Takes a unit from the queue and
1812 * tries to load its data until the queue is empty */
1814 while ((u = m->load_queue)) {
1815 assert(u->in_load_queue);
1821 m->dispatching_load_queue = false;
1825 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1831 assert(name || path);
1833 /* This will prepare the unit for loading, but not actually
1834 * load anything from disk. */
1836 if (path && !is_path(path)) {
1837 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1842 name = file_name_from_path(path);
1844 t = unit_name_to_type(name);
1846 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid_no_type(name, false)) {
1847 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1851 ret = manager_get_unit(m, name);
1857 ret = unit_new(m, unit_vtable[t]->object_size);
1862 ret->fragment_path = strdup(path);
1863 if (!ret->fragment_path) {
1869 if ((r = unit_add_name(ret, name)) < 0) {
1874 unit_add_to_load_queue(ret);
1875 unit_add_to_dbus_queue(ret);
1876 unit_add_to_gc_queue(ret);
1884 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1889 /* This will load the service information files, but not actually
1890 * start any services or anything. */
1892 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1895 manager_dispatch_load_queue(m);
1898 *_ret = unit_follow_merge(*_ret);
1903 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1910 HASHMAP_FOREACH(j, s->jobs, i)
1911 job_dump(j, f, prefix);
1914 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1922 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1924 unit_dump(u, f, prefix);
1927 void manager_clear_jobs(Manager *m) {
1932 transaction_abort(m);
1934 while ((j = hashmap_first(m->jobs)))
1935 job_finish_and_invalidate(j, JOB_CANCELED);
1938 unsigned manager_dispatch_run_queue(Manager *m) {
1942 if (m->dispatching_run_queue)
1945 m->dispatching_run_queue = true;
1947 while ((j = m->run_queue)) {
1948 assert(j->installed);
1949 assert(j->in_run_queue);
1951 job_run_and_invalidate(j);
1955 m->dispatching_run_queue = false;
1959 unsigned manager_dispatch_dbus_queue(Manager *m) {
1966 if (m->dispatching_dbus_queue)
1969 m->dispatching_dbus_queue = true;
1971 while ((u = m->dbus_unit_queue)) {
1972 assert(u->in_dbus_queue);
1974 bus_unit_send_change_signal(u);
1978 while ((j = m->dbus_job_queue)) {
1979 assert(j->in_dbus_queue);
1981 bus_job_send_change_signal(j);
1985 m->dispatching_dbus_queue = false;
1989 static int manager_process_notify_fd(Manager *m) {
1996 struct msghdr msghdr;
1998 struct ucred *ucred;
2000 struct cmsghdr cmsghdr;
2001 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
2007 iovec.iov_base = buf;
2008 iovec.iov_len = sizeof(buf)-1;
2012 msghdr.msg_iov = &iovec;
2013 msghdr.msg_iovlen = 1;
2014 msghdr.msg_control = &control;
2015 msghdr.msg_controllen = sizeof(control);
2017 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
2021 if (errno == EAGAIN || errno == EINTR)
2027 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
2028 control.cmsghdr.cmsg_level != SOL_SOCKET ||
2029 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
2030 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
2031 log_warning("Received notify message without credentials. Ignoring.");
2035 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
2037 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
2038 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
2039 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
2043 assert((size_t) n < sizeof(buf));
2045 if (!(tags = strv_split(buf, "\n\r")))
2048 log_debug("Got notification message for unit %s", u->id);
2050 if (UNIT_VTABLE(u)->notify_message)
2051 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
2059 static int manager_dispatch_sigchld(Manager *m) {
2069 /* First we call waitd() for a PID and do not reap the
2070 * zombie. That way we can still access /proc/$PID for
2071 * it while it is a zombie. */
2072 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2074 if (errno == ECHILD)
2086 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2089 get_process_comm(si.si_pid, &name);
2090 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2094 /* Let's flush any message the dying child might still
2095 * have queued for us. This ensures that the process
2096 * still exists in /proc so that we can figure out
2097 * which cgroup and hence unit it belongs to. */
2098 if ((r = manager_process_notify_fd(m)) < 0)
2101 /* And now figure out the unit this belongs to */
2102 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2103 u = cgroup_unit_by_pid(m, si.si_pid);
2105 /* And now, we actually reap the zombie. */
2106 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2113 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2116 log_debug("Child %lu died (code=%s, status=%i/%s)",
2117 (long unsigned) si.si_pid,
2118 sigchld_code_to_string(si.si_code),
2120 strna(si.si_code == CLD_EXITED
2121 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2122 : signal_to_string(si.si_status)));
2127 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->id);
2129 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2130 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2136 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2140 dbus_error_init(&error);
2142 log_debug("Activating special unit %s", name);
2144 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2145 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2147 dbus_error_free(&error);
2152 static int manager_process_signal_fd(Manager *m) {
2154 struct signalfd_siginfo sfsi;
2155 bool sigchld = false;
2160 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2165 if (errno == EINTR || errno == EAGAIN)
2171 if (sfsi.ssi_pid > 0) {
2174 get_process_comm(sfsi.ssi_pid, &p);
2176 log_debug("Received SIG%s from PID %lu (%s).",
2177 signal_to_string(sfsi.ssi_signo),
2178 (unsigned long) sfsi.ssi_pid, strna(p));
2181 log_debug("Received SIG%s.", signal_to_string(sfsi.ssi_signo));
2183 switch (sfsi.ssi_signo) {
2190 if (m->running_as == MANAGER_SYSTEM) {
2191 /* This is for compatibility with the
2192 * original sysvinit */
2193 m->exit_code = MANAGER_REEXECUTE;
2200 if (m->running_as == MANAGER_SYSTEM) {
2201 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2205 /* Run the exit target if there is one, if not, just exit. */
2206 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2207 m->exit_code = MANAGER_EXIT;
2214 if (m->running_as == MANAGER_SYSTEM)
2215 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2217 /* This is a nop on non-init */
2221 if (m->running_as == MANAGER_SYSTEM)
2222 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2224 /* This is a nop on non-init */
2230 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2232 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2233 log_info("Trying to reconnect to bus...");
2237 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2238 log_info("Loading D-Bus service...");
2239 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2250 if (!(f = open_memstream(&dump, &size))) {
2251 log_warning("Failed to allocate memory stream.");
2255 manager_dump_units(m, f, "\t");
2256 manager_dump_jobs(m, f, "\t");
2261 log_warning("Failed to write status stream");
2266 log_dump(LOG_INFO, dump);
2273 m->exit_code = MANAGER_RELOAD;
2278 /* Starting SIGRTMIN+0 */
2279 static const char * const target_table[] = {
2280 [0] = SPECIAL_DEFAULT_TARGET,
2281 [1] = SPECIAL_RESCUE_TARGET,
2282 [2] = SPECIAL_EMERGENCY_TARGET,
2283 [3] = SPECIAL_HALT_TARGET,
2284 [4] = SPECIAL_POWEROFF_TARGET,
2285 [5] = SPECIAL_REBOOT_TARGET,
2286 [6] = SPECIAL_KEXEC_TARGET
2289 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2290 static const ManagerExitCode code_table[] = {
2292 [1] = MANAGER_POWEROFF,
2293 [2] = MANAGER_REBOOT,
2297 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2298 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2299 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2300 manager_start_target(m, target_table[idx],
2301 (idx == 1 || idx == 2) ? JOB_ISOLATE : JOB_REPLACE);
2305 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2306 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2307 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2311 switch (sfsi.ssi_signo - SIGRTMIN) {
2314 log_debug("Enabling showing of status.");
2315 manager_set_show_status(m, true);
2319 log_debug("Disabling showing of status.");
2320 manager_set_show_status(m, false);
2324 log_set_max_level(LOG_DEBUG);
2325 log_notice("Setting log level to debug.");
2329 log_set_max_level(LOG_INFO);
2330 log_notice("Setting log level to info.");
2334 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2335 log_notice("Setting log target to journal-or-kmsg.");
2339 log_set_target(LOG_TARGET_CONSOLE);
2340 log_notice("Setting log target to console.");
2344 log_set_target(LOG_TARGET_KMSG);
2345 log_notice("Setting log target to kmsg.");
2349 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2350 log_notice("Setting log target to syslog-or-kmsg.");
2354 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2361 return manager_dispatch_sigchld(m);
2366 static int process_event(Manager *m, struct epoll_event *ev) {
2373 assert_se(w = ev->data.ptr);
2375 if (w->type == WATCH_INVALID)
2382 /* An incoming signal? */
2383 if (ev->events != EPOLLIN)
2386 if ((r = manager_process_signal_fd(m)) < 0)
2393 /* An incoming daemon notification event? */
2394 if (ev->events != EPOLLIN)
2397 if ((r = manager_process_notify_fd(m)) < 0)
2404 /* Some fd event, to be dispatched to the units */
2405 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2408 case WATCH_UNIT_TIMER:
2409 case WATCH_JOB_TIMER: {
2413 /* Some timer event, to be dispatched to the units */
2414 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2416 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2419 return k < 0 ? -errno : -EIO;
2422 if (w->type == WATCH_UNIT_TIMER)
2423 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2425 job_timer_event(w->data.job, v, w);
2430 /* Some mount table change, intended for the mount subsystem */
2431 mount_fd_event(m, ev->events);
2435 /* Some swap table change, intended for the swap subsystem */
2436 swap_fd_event(m, ev->events);
2440 /* Some notification from udev, intended for the device subsystem */
2441 device_fd_event(m, ev->events);
2444 case WATCH_DBUS_WATCH:
2445 bus_watch_event(m, w, ev->events);
2448 case WATCH_DBUS_TIMEOUT:
2449 bus_timeout_event(m, w, ev->events);
2453 log_error("event type=%i", w->type);
2454 assert_not_reached("Unknown epoll event type.");
2460 int manager_loop(Manager *m) {
2463 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2466 m->exit_code = MANAGER_RUNNING;
2468 /* Release the path cache */
2469 set_free_free(m->unit_path_cache);
2470 m->unit_path_cache = NULL;
2472 manager_check_finished(m);
2474 /* There might still be some zombies hanging around from
2475 * before we were exec()'ed. Leat's reap them */
2476 r = manager_dispatch_sigchld(m);
2480 while (m->exit_code == MANAGER_RUNNING) {
2481 struct epoll_event event;
2485 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM)
2488 if (!ratelimit_test(&rl)) {
2489 /* Yay, something is going seriously wrong, pause a little */
2490 log_warning("Looping too fast. Throttling execution a little.");
2495 if (manager_dispatch_load_queue(m) > 0)
2498 if (manager_dispatch_run_queue(m) > 0)
2501 if (bus_dispatch(m) > 0)
2504 if (manager_dispatch_cleanup_queue(m) > 0)
2507 if (manager_dispatch_gc_queue(m) > 0)
2510 if (manager_dispatch_dbus_queue(m) > 0)
2513 if (swap_dispatch_reload(m) > 0)
2516 /* Sleep for half the watchdog time */
2517 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) {
2518 wait_msec = (int) (m->runtime_watchdog / 2 / USEC_PER_MSEC);
2524 n = epoll_wait(m->epoll_fd, &event, 1, wait_msec);
2536 r = process_event(m, &event);
2541 return m->exit_code;
2544 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2552 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2555 if (!(n = bus_path_unescape(s+31)))
2558 u = manager_get_unit(m, n);
2569 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2578 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2581 if ((r = safe_atou(s + 30, &id)) < 0)
2584 if (!(j = manager_get_job(m, id)))
2592 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2597 if (m->audit_fd < 0)
2600 /* Don't generate audit events if the service was already
2601 * started and we're just deserializing */
2602 if (m->n_reloading > 0)
2605 if (m->running_as != MANAGER_SYSTEM)
2608 if (u->type != UNIT_SERVICE)
2611 if (!(p = unit_name_to_prefix_and_instance(u->id))) {
2612 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2616 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2617 if (errno == EPERM) {
2618 /* We aren't allowed to send audit messages?
2619 * Then let's not retry again. */
2620 audit_close(m->audit_fd);
2623 log_warning("Failed to send audit message: %m");
2631 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2633 union sockaddr_union sa;
2635 char *message = NULL;
2637 /* Don't generate plymouth events if the service was already
2638 * started and we're just deserializing */
2639 if (m->n_reloading > 0)
2642 if (m->running_as != MANAGER_SYSTEM)
2645 if (u->type != UNIT_SERVICE &&
2646 u->type != UNIT_MOUNT &&
2647 u->type != UNIT_SWAP)
2650 /* We set SOCK_NONBLOCK here so that we rather drop the
2651 * message then wait for plymouth */
2652 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2653 log_error("socket() failed: %m");
2658 sa.sa.sa_family = AF_UNIX;
2659 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2660 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2662 if (errno != EPIPE &&
2665 errno != ECONNREFUSED &&
2666 errno != ECONNRESET &&
2667 errno != ECONNABORTED)
2668 log_error("connect() failed: %m");
2673 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2674 log_error("Out of memory");
2679 if (write(fd, message, n + 1) != n + 1) {
2681 if (errno != EPIPE &&
2684 errno != ECONNREFUSED &&
2685 errno != ECONNRESET &&
2686 errno != ECONNABORTED)
2687 log_error("Failed to write Plymouth message: %m");
2694 close_nointr_nofail(fd);
2699 void manager_dispatch_bus_name_owner_changed(
2702 const char* old_owner,
2703 const char *new_owner) {
2710 if (!(u = hashmap_get(m->watch_bus, name)))
2713 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2716 void manager_dispatch_bus_query_pid_done(
2727 if (!(u = hashmap_get(m->watch_bus, name)))
2730 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2733 int manager_open_serialization(Manager *m, FILE **_f) {
2741 if (m->running_as == MANAGER_SYSTEM)
2742 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2744 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2749 saved_umask = umask(0077);
2750 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2760 log_debug("Serializing state to %s", path);
2763 if (!(f = fdopen(fd, "w+")))
2771 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2783 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2784 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2786 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2787 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2788 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2792 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2796 if (!unit_can_serialize(u))
2803 if ((r = unit_serialize(u, f, fds)) < 0) {
2809 assert(m->n_reloading > 0);
2815 r = bus_fdset_add_all(m, fds);
2822 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2828 log_debug("Deserializing state...");
2833 char line[LINE_MAX], *l;
2835 if (!fgets(line, sizeof(line), f)) {
2850 if (startswith(l, "current-job-id=")) {
2853 if (safe_atou32(l+15, &id) < 0)
2854 log_debug("Failed to parse current job id value %s", l+15);
2856 m->current_job_id = MAX(m->current_job_id, id);
2857 } else if (startswith(l, "taint-usr=")) {
2860 if ((b = parse_boolean(l+10)) < 0)
2861 log_debug("Failed to parse taint /usr flag %s", l+10);
2863 m->taint_usr = m->taint_usr || b;
2864 } else if (startswith(l, "initrd-timestamp="))
2865 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2866 else if (startswith(l, "startup-timestamp="))
2867 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2868 else if (startswith(l, "finish-timestamp="))
2869 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2871 log_debug("Unknown serialization item '%s'", l);
2876 char name[UNIT_NAME_MAX+2];
2879 if (!fgets(name, sizeof(name), f)) {
2890 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2893 if ((r = unit_deserialize(u, f, fds)) < 0)
2903 assert(m->n_reloading > 0);
2909 int manager_reload(Manager *m) {
2916 if ((r = manager_open_serialization(m, &f)) < 0)
2921 if (!(fds = fdset_new())) {
2927 if ((r = manager_serialize(m, f, fds)) < 0) {
2932 if (fseeko(f, 0, SEEK_SET) < 0) {
2938 /* From here on there is no way back. */
2939 manager_clear_jobs_and_units(m);
2940 manager_undo_generators(m);
2942 /* Find new unit paths */
2943 lookup_paths_free(&m->lookup_paths);
2944 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2947 manager_run_generators(m);
2949 manager_build_unit_path_cache(m);
2951 /* First, enumerate what we can from all config files */
2952 if ((q = manager_enumerate(m)) < 0)
2955 /* Second, deserialize our stored data */
2956 if ((q = manager_deserialize(m, f, fds)) < 0)
2962 /* Third, fire things up! */
2963 if ((q = manager_coldplug(m)) < 0)
2966 assert(m->n_reloading > 0);
2979 bool manager_is_booting_or_shutting_down(Manager *m) {
2984 /* Is the initial job still around? */
2985 if (manager_get_job(m, m->default_unit_job_id))
2988 /* Is there a job for the shutdown target? */
2989 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
2996 void manager_reset_failed(Manager *m) {
3002 HASHMAP_FOREACH(u, m->units, i)
3003 unit_reset_failed(u);
3006 bool manager_unit_pending_inactive(Manager *m, const char *name) {
3012 /* Returns true if the unit is inactive or going down */
3013 if (!(u = manager_get_unit(m, name)))
3016 return unit_pending_inactive(u);
3019 void manager_check_finished(Manager *m) {
3020 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3021 usec_t kernel_usec, initrd_usec, userspace_usec, total_usec;
3025 if (dual_timestamp_is_set(&m->finish_timestamp))
3028 if (hashmap_size(m->jobs) > 0)
3031 dual_timestamp_get(&m->finish_timestamp);
3033 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
3035 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3036 total_usec = m->finish_timestamp.monotonic;
3038 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
3040 kernel_usec = m->initrd_timestamp.monotonic;
3041 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
3043 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
3044 format_timespan(kernel, sizeof(kernel), kernel_usec),
3045 format_timespan(initrd, sizeof(initrd), initrd_usec),
3046 format_timespan(userspace, sizeof(userspace), userspace_usec),
3047 format_timespan(sum, sizeof(sum), total_usec));
3049 kernel_usec = m->startup_timestamp.monotonic;
3052 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
3053 format_timespan(kernel, sizeof(kernel), kernel_usec),
3054 format_timespan(userspace, sizeof(userspace), userspace_usec),
3055 format_timespan(sum, sizeof(sum), total_usec));
3058 userspace_usec = initrd_usec = kernel_usec = 0;
3059 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3061 log_debug("Startup finished in %s.",
3062 format_timespan(sum, sizeof(sum), total_usec));
3065 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
3068 "READY=1\nSTATUS=Startup finished in %s.",
3069 format_timespan(sum, sizeof(sum), total_usec));
3072 void manager_run_generators(Manager *m) {
3074 const char *generator_path;
3075 const char *argv[3];
3080 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
3081 if (!(d = opendir(generator_path))) {
3083 if (errno == ENOENT)
3086 log_error("Failed to enumerate generator directory: %m");
3090 if (!m->generator_unit_path) {
3092 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3094 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3095 p = "/run/systemd/generator";
3097 if (mkdir_p(p, 0755) < 0) {
3098 log_error("Failed to create generator directory: %m");
3103 if (!(p = mkdtemp(user_path))) {
3104 log_error("Failed to create generator directory: %m");
3109 if (!(m->generator_unit_path = strdup(p))) {
3110 log_error("Failed to allocate generator unit path.");
3115 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3116 argv[1] = m->generator_unit_path;
3120 execute_directory(generator_path, d, (char**) argv);
3123 if (rmdir(m->generator_unit_path) >= 0) {
3124 /* Uh? we were able to remove this dir? I guess that
3125 * means the directory was empty, hence let's shortcut
3128 free(m->generator_unit_path);
3129 m->generator_unit_path = NULL;
3133 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3136 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3137 log_error("Failed to add generator directory to unit search path: %m");
3141 strv_free(m->lookup_paths.unit_path);
3142 m->lookup_paths.unit_path = l;
3144 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3152 void manager_undo_generators(Manager *m) {
3155 if (!m->generator_unit_path)
3158 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3159 rm_rf(m->generator_unit_path, false, true, false);
3161 free(m->generator_unit_path);
3162 m->generator_unit_path = NULL;
3165 int manager_set_default_controllers(Manager *m, char **controllers) {
3170 l = strv_copy(controllers);
3174 strv_free(m->default_controllers);
3175 m->default_controllers = l;
3177 cg_shorten_controllers(m->default_controllers);
3182 void manager_recheck_journal(Manager *m) {
3187 if (m->running_as != MANAGER_SYSTEM)
3190 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3191 if (u && SOCKET(u)->state != SOCKET_RUNNING) {
3192 log_close_journal();
3196 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3197 if (u && SERVICE(u)->state != SERVICE_RUNNING) {
3198 log_close_journal();
3202 /* Hmm, OK, so the socket is fully up and the service is up
3203 * too, then let's make use of the thing. */
3207 void manager_set_show_status(Manager *m, bool b) {
3210 if (m->running_as != MANAGER_SYSTEM)
3216 touch("/run/systemd/show-status");
3218 unlink("/run/systemd/show-status");
3221 bool manager_get_show_status(Manager *m) {
3224 if (m->running_as != MANAGER_SYSTEM)
3230 /* If Plymouth is running make sure we show the status, so
3231 * that there's something nice to see when people press Esc */
3233 return plymouth_running();
3236 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3237 [MANAGER_SYSTEM] = "system",
3238 [MANAGER_USER] = "user"
3241 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);