1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
27 #include <sys/signalfd.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
36 #include <sys/types.h>
44 #include <systemd/sd-daemon.h>
53 #include "ratelimit.h"
55 #include "mount-setup.h"
56 #include "unit-name.h"
57 #include "dbus-unit.h"
60 #include "path-lookup.h"
62 #include "bus-errors.h"
63 #include "exit-status.h"
66 #include "cgroup-util.h"
68 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
69 #define GC_QUEUE_ENTRIES_MAX 16
71 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
72 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
74 /* Where clients shall send notification messages to */
75 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
76 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
78 static int manager_setup_notify(Manager *m) {
81 struct sockaddr_un un;
83 struct epoll_event ev;
89 m->notify_watch.type = WATCH_NOTIFY;
90 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
91 log_error("Failed to allocate notification socket: %m");
96 sa.sa.sa_family = AF_UNIX;
99 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
101 unlink(NOTIFY_SOCKET_SYSTEM);
102 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
105 if (sa.un.sun_path[0] == '@')
106 sa.un.sun_path[0] = 0;
109 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
113 log_error("bind() failed: %m");
117 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
118 log_error("SO_PASSCRED failed: %m");
124 ev.data.ptr = &m->notify_watch;
126 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
129 if (sa.un.sun_path[0] == 0)
130 sa.un.sun_path[0] = '@';
132 if (!(m->notify_socket = strdup(sa.un.sun_path)))
135 log_debug("Using notification socket %s", m->notify_socket);
140 static int enable_special_signals(Manager *m) {
145 /* Enable that we get SIGINT on control-alt-del. In containers
146 * this will fail with EPERM, so ignore that. */
147 if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM)
148 log_warning("Failed to enable ctrl-alt-del handling: %m");
150 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
152 /* Support systems without virtual console */
154 log_warning("Failed to open /dev/tty0: %m");
156 /* Enable that we get SIGWINCH on kbrequest */
157 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
158 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
160 close_nointr_nofail(fd);
166 static int manager_setup_signals(Manager *m) {
168 struct epoll_event ev;
173 /* We are not interested in SIGSTOP and friends. */
175 sa.sa_handler = SIG_DFL;
176 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
177 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
179 assert_se(sigemptyset(&mask) == 0);
181 sigset_add_many(&mask,
182 SIGCHLD, /* Child died */
183 SIGTERM, /* Reexecute daemon */
184 SIGHUP, /* Reload configuration */
185 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
186 SIGUSR2, /* systemd: dump status */
187 SIGINT, /* Kernel sends us this on control-alt-del */
188 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
189 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
190 SIGRTMIN+0, /* systemd: start default.target */
191 SIGRTMIN+1, /* systemd: isolate rescue.target */
192 SIGRTMIN+2, /* systemd: isolate emergency.target */
193 SIGRTMIN+3, /* systemd: start halt.target */
194 SIGRTMIN+4, /* systemd: start poweroff.target */
195 SIGRTMIN+5, /* systemd: start reboot.target */
196 SIGRTMIN+6, /* systemd: start kexec.target */
197 SIGRTMIN+13, /* systemd: Immediate halt */
198 SIGRTMIN+14, /* systemd: Immediate poweroff */
199 SIGRTMIN+15, /* systemd: Immediate reboot */
200 SIGRTMIN+16, /* systemd: Immediate kexec */
201 SIGRTMIN+20, /* systemd: enable status messages */
202 SIGRTMIN+21, /* systemd: disable status messages */
203 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
204 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
205 SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */
206 SIGRTMIN+27, /* systemd: set log target to console */
207 SIGRTMIN+28, /* systemd: set log target to kmsg */
208 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
210 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
212 m->signal_watch.type = WATCH_SIGNAL;
213 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
218 ev.data.ptr = &m->signal_watch;
220 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
223 if (m->running_as == MANAGER_SYSTEM)
224 return enable_special_signals(m);
229 static void manager_strip_environment(Manager *m) {
232 /* Remove variables from the inherited set that are part of
233 * the container interface:
234 * http://www.freedesktop.org/wiki/Software/systemd/ContainerInterface */
235 strv_remove_prefix(m->environment, "container=");
236 strv_remove_prefix(m->environment, "container_");
238 /* Remove variables from the inherited set that are part of
239 * the initrd interface:
240 * http://www.freedesktop.org/wiki/Software/systemd/InitrdInterface */
241 strv_remove_prefix(m->environment, "RD_");
244 int manager_new(ManagerRunningAs running_as, Manager **_m) {
249 assert(running_as >= 0);
250 assert(running_as < _MANAGER_RUNNING_AS_MAX);
252 if (!(m = new0(Manager, 1)))
255 dual_timestamp_get(&m->startup_timestamp);
257 m->running_as = running_as;
258 m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1;
259 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
260 m->pin_cgroupfs_fd = -1;
266 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
267 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
269 m->environment = strv_copy(environ);
273 manager_strip_environment(m);
275 if (running_as == MANAGER_SYSTEM) {
276 m->default_controllers = strv_new("cpu", NULL);
277 if (!m->default_controllers)
281 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
284 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
287 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
290 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
293 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
296 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
299 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
302 if ((r = manager_setup_signals(m)) < 0)
305 if ((r = manager_setup_cgroup(m)) < 0)
308 if ((r = manager_setup_notify(m)) < 0)
311 /* Try to connect to the busses, if possible. */
312 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
316 if ((m->audit_fd = audit_open()) < 0 &&
317 /* If the kernel lacks netlink or audit support,
318 * don't worry about it. */
319 errno != EAFNOSUPPORT && errno != EPROTONOSUPPORT)
320 log_error("Failed to connect to audit log: %m");
323 m->taint_usr = dir_is_empty("/usr") > 0;
333 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
339 while ((u = m->cleanup_queue)) {
340 assert(u->in_cleanup_queue);
350 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
351 GC_OFFSET_UNSURE, /* No clue */
352 GC_OFFSET_GOOD, /* We still need this unit */
353 GC_OFFSET_BAD, /* We don't need this unit anymore */
357 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
364 if (u->gc_marker == gc_marker + GC_OFFSET_GOOD ||
365 u->gc_marker == gc_marker + GC_OFFSET_BAD ||
366 u->gc_marker == gc_marker + GC_OFFSET_IN_PATH)
369 if (u->in_cleanup_queue)
372 if (unit_check_gc(u))
375 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
379 SET_FOREACH(other, u->dependencies[UNIT_REFERENCED_BY], i) {
380 unit_gc_sweep(other, gc_marker);
382 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
385 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
392 /* We were unable to find anything out about this entry, so
393 * let's investigate it later */
394 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
395 unit_add_to_gc_queue(u);
399 /* We definitely know that this one is not useful anymore, so
400 * let's mark it for deletion */
401 u->gc_marker = gc_marker + GC_OFFSET_BAD;
402 unit_add_to_cleanup_queue(u);
406 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
409 static unsigned manager_dispatch_gc_queue(Manager *m) {
416 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
417 (m->gc_queue_timestamp <= 0 ||
418 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
421 log_debug("Running GC...");
423 m->gc_marker += _GC_OFFSET_MAX;
424 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
427 gc_marker = m->gc_marker;
429 while ((u = m->gc_queue)) {
430 assert(u->in_gc_queue);
432 unit_gc_sweep(u, gc_marker);
434 LIST_REMOVE(Unit, gc_queue, m->gc_queue, u);
435 u->in_gc_queue = false;
439 if (u->gc_marker == gc_marker + GC_OFFSET_BAD ||
440 u->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
441 log_debug("Collecting %s", u->id);
442 u->gc_marker = gc_marker + GC_OFFSET_BAD;
443 unit_add_to_cleanup_queue(u);
447 m->n_in_gc_queue = 0;
448 m->gc_queue_timestamp = 0;
453 static void manager_clear_jobs_and_units(Manager *m) {
458 while ((u = hashmap_first(m->units)))
461 manager_dispatch_cleanup_queue(m);
463 assert(!m->load_queue);
464 assert(!m->run_queue);
465 assert(!m->dbus_unit_queue);
466 assert(!m->dbus_job_queue);
467 assert(!m->cleanup_queue);
468 assert(!m->gc_queue);
470 assert(hashmap_isempty(m->jobs));
471 assert(hashmap_isempty(m->units));
474 void manager_free(Manager *m) {
479 manager_clear_jobs_and_units(m);
481 for (c = 0; c < _UNIT_TYPE_MAX; c++)
482 if (unit_vtable[c]->shutdown)
483 unit_vtable[c]->shutdown(m);
485 /* If we reexecute ourselves, we keep the root cgroup
487 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
489 manager_undo_generators(m);
493 hashmap_free(m->units);
494 hashmap_free(m->jobs);
495 hashmap_free(m->watch_pids);
496 hashmap_free(m->watch_bus);
498 if (m->epoll_fd >= 0)
499 close_nointr_nofail(m->epoll_fd);
500 if (m->signal_watch.fd >= 0)
501 close_nointr_nofail(m->signal_watch.fd);
502 if (m->notify_watch.fd >= 0)
503 close_nointr_nofail(m->notify_watch.fd);
506 if (m->audit_fd >= 0)
507 audit_close(m->audit_fd);
510 free(m->notify_socket);
512 lookup_paths_free(&m->lookup_paths);
513 strv_free(m->environment);
515 strv_free(m->default_controllers);
517 hashmap_free(m->cgroup_bondings);
518 set_free_free(m->unit_path_cache);
523 int manager_enumerate(Manager *m) {
529 /* Let's ask every type to load all units from disk/kernel
530 * that it might know */
531 for (c = 0; c < _UNIT_TYPE_MAX; c++)
532 if (unit_vtable[c]->enumerate)
533 if ((q = unit_vtable[c]->enumerate(m)) < 0)
536 manager_dispatch_load_queue(m);
540 int manager_coldplug(Manager *m) {
548 /* Then, let's set up their initial state. */
549 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
555 if ((q = unit_coldplug(u)) < 0)
562 static void manager_build_unit_path_cache(Manager *m) {
569 set_free_free(m->unit_path_cache);
571 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
572 log_error("Failed to allocate unit path cache.");
576 /* This simply builds a list of files we know exist, so that
577 * we don't always have to go to disk */
579 STRV_FOREACH(i, m->lookup_paths.unit_path) {
582 if (!(d = opendir(*i))) {
583 log_error("Failed to open directory: %m");
587 while ((de = readdir(d))) {
590 if (ignore_file(de->d_name))
593 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
599 if ((r = set_put(m->unit_path_cache, p)) < 0) {
612 log_error("Failed to build unit path cache: %s", strerror(-r));
614 set_free_free(m->unit_path_cache);
615 m->unit_path_cache = NULL;
621 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
626 manager_run_generators(m);
628 manager_build_unit_path_cache(m);
630 /* If we will deserialize make sure that during enumeration
631 * this is already known, so we increase the counter here
636 /* First, enumerate what we can from all config files */
637 r = manager_enumerate(m);
639 /* Second, deserialize if there is something to deserialize */
641 if ((q = manager_deserialize(m, serialization, fds)) < 0)
644 /* Third, fire things up! */
645 if ((q = manager_coldplug(m)) < 0)
649 assert(m->n_reloading > 0);
656 static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies);
658 static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) {
662 /* Deletes one job from the transaction */
664 transaction_unlink_job(tr, j, delete_dependencies);
670 static void transaction_delete_unit(Transaction *tr, Unit *u) {
673 /* Deletes all jobs associated with a certain unit from the
676 while ((j = hashmap_get(tr->jobs, u)))
677 transaction_delete_job(tr, j, true);
680 static void transaction_abort(Transaction *tr) {
685 while ((j = hashmap_first(tr->jobs)))
686 transaction_delete_job(tr, j, true);
688 assert(hashmap_isempty(tr->jobs));
693 static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j, unsigned generation) {
698 /* A recursive sweep through the graph that marks all units
699 * that matter to the anchor job, i.e. are directly or
700 * indirectly a dependency of the anchor job via paths that
701 * are fully marked as mattering. */
708 LIST_FOREACH(subject, l, l) {
710 /* This link does not matter */
714 /* This unit has already been marked */
715 if (l->object->generation == generation)
718 l->object->matters_to_anchor = true;
719 l->object->generation = generation;
721 transaction_find_jobs_that_matter_to_anchor(tr, l->object, generation);
725 static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) {
726 JobDependency *l, *last;
730 assert(j->unit == other->unit);
731 assert(!j->installed);
733 /* Merges 'other' into 'j' and then deletes 'other'. */
736 j->state = JOB_WAITING;
737 j->override = j->override || other->override;
739 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
741 /* Patch us in as new owner of the JobDependency objects */
743 LIST_FOREACH(subject, l, other->subject_list) {
744 assert(l->subject == other);
749 /* Merge both lists */
751 last->subject_next = j->subject_list;
753 j->subject_list->subject_prev = last;
754 j->subject_list = other->subject_list;
757 /* Patch us in as new owner of the JobDependency objects */
759 LIST_FOREACH(object, l, other->object_list) {
760 assert(l->object == other);
765 /* Merge both lists */
767 last->object_next = j->object_list;
769 j->object_list->object_prev = last;
770 j->object_list = other->object_list;
773 /* Kill the other job */
774 other->subject_list = NULL;
775 other->object_list = NULL;
776 transaction_delete_job(tr, other, true);
779 static bool job_is_conflicted_by(Job *j) {
784 /* Returns true if this job is pulled in by a least one
785 * ConflictedBy dependency. */
787 LIST_FOREACH(object, l, j->object_list)
794 static int delete_one_unmergeable_job(Transaction *tr, Job *j) {
799 /* Tries to delete one item in the linked list
800 * j->transaction_next->transaction_next->... that conflicts
801 * with another one, in an attempt to make an inconsistent
802 * transaction work. */
804 /* We rely here on the fact that if a merged with b does not
805 * merge with c, either a or b merge with c neither */
806 LIST_FOREACH(transaction, j, j)
807 LIST_FOREACH(transaction, k, j->transaction_next) {
810 /* Is this one mergeable? Then skip it */
811 if (job_type_is_mergeable(j->type, k->type))
814 /* Ok, we found two that conflict, let's see if we can
815 * drop one of them */
816 if (!j->matters_to_anchor && !k->matters_to_anchor) {
818 /* Both jobs don't matter, so let's
819 * find the one that is smarter to
820 * remove. Let's think positive and
821 * rather remove stops then starts --
822 * except if something is being
823 * stopped because it is conflicted by
824 * another unit in which case we
825 * rather remove the start. */
827 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
828 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
830 if (j->type == JOB_STOP) {
832 if (job_is_conflicted_by(j))
837 } else if (k->type == JOB_STOP) {
839 if (job_is_conflicted_by(k))
846 } else if (!j->matters_to_anchor)
848 else if (!k->matters_to_anchor)
853 /* Ok, we can drop one, so let's do so. */
854 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
855 transaction_delete_job(tr, d, true);
862 static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
869 /* First step, check whether any of the jobs for one specific
870 * task conflict. If so, try to drop one of them. */
871 HASHMAP_FOREACH(j, tr->jobs, i) {
876 LIST_FOREACH(transaction, k, j->transaction_next) {
877 if (job_type_merge(&t, k->type) >= 0)
880 /* OK, we could not merge all jobs for this
881 * action. Let's see if we can get rid of one
884 r = delete_one_unmergeable_job(tr, j);
886 /* Ok, we managed to drop one, now
887 * let's ask our callers to call us
888 * again after garbage collecting */
891 /* We couldn't merge anything. Failure */
892 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
893 job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
898 /* Second step, merge the jobs. */
899 HASHMAP_FOREACH(j, tr->jobs, i) {
903 /* Merge all transactions */
904 LIST_FOREACH(transaction, k, j->transaction_next)
905 assert_se(job_type_merge(&t, k->type) == 0);
907 /* If an active job is mergeable, merge it too */
909 job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
911 while ((k = j->transaction_next)) {
913 transaction_merge_and_delete_job(tr, k, j, t);
916 transaction_merge_and_delete_job(tr, j, k, t);
919 if (j->unit->job && !j->installed)
920 transaction_merge_and_delete_job(tr, j, j->unit->job, t);
922 assert(!j->transaction_next);
923 assert(!j->transaction_prev);
929 static void transaction_drop_redundant(Transaction *tr) {
934 /* Goes through the transaction and removes all jobs that are
943 HASHMAP_FOREACH(j, tr->jobs, i) {
944 bool changes_something = false;
947 LIST_FOREACH(transaction, k, j) {
949 if (!job_is_anchor(k) &&
950 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
951 (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
954 changes_something = true;
958 if (changes_something)
961 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
962 transaction_delete_job(tr, j, false);
970 static bool unit_matters_to_anchor(Unit *u, Job *j) {
972 assert(!j->transaction_prev);
974 /* Checks whether at least one of the jobs for this unit
975 * matters to the anchor. */
977 LIST_FOREACH(transaction, j, j)
978 if (j->matters_to_anchor)
984 static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, DBusError *e) {
991 assert(!j->transaction_prev);
993 /* Does a recursive sweep through the ordering graph, looking
994 * for a cycle. If we find cycle we try to break it. */
996 /* Have we seen this before? */
997 if (j->generation == generation) {
1000 /* If the marker is NULL we have been here already and
1001 * decided the job was loop-free from here. Hence
1002 * shortcut things and return right-away. */
1006 /* So, the marker is not NULL and we already have been
1007 * here. We have a cycle. Let's try to break it. We go
1008 * backwards in our path and try to find a suitable
1009 * job to remove. We use the marker to find our way
1010 * back, since smart how we are we stored our way back
1012 log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
1015 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1017 log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
1021 !unit_matters_to_anchor(k->unit, k)) {
1022 /* Ok, we can drop this one, so let's
1027 /* Check if this in fact was the beginning of
1035 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
1036 transaction_delete_unit(tr, delete->unit);
1040 log_error("Unable to break cycle");
1042 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1046 /* Make the marker point to where we come from, so that we can
1047 * find our way backwards if we want to break a cycle. We use
1048 * a special marker for the beginning: we point to
1050 j->marker = from ? from : j;
1051 j->generation = generation;
1053 /* We assume that the the dependencies are bidirectional, and
1054 * hence can ignore UNIT_AFTER */
1055 SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
1058 /* Is there a job for this unit? */
1059 o = hashmap_get(tr->jobs, u);
1061 /* Ok, there is no job for this in the
1062 * transaction, but maybe there is already one
1069 r = transaction_verify_order_one(tr, o, j, generation, e);
1074 /* Ok, let's backtrack, and remember that this entry is not on
1075 * our path anymore. */
1081 static int transaction_verify_order(Transaction *tr, unsigned *generation, DBusError *e) {
1090 /* Check if the ordering graph is cyclic. If it is, try to fix
1091 * that up by dropping one of the jobs. */
1093 g = (*generation)++;
1095 HASHMAP_FOREACH(j, tr->jobs, i)
1096 if ((r = transaction_verify_order_one(tr, j, NULL, g, e)) < 0)
1102 static void transaction_collect_garbage(Transaction *tr) {
1107 /* Drop jobs that are not required by any other job */
1115 HASHMAP_FOREACH(j, tr->jobs, i) {
1116 if (j->object_list) {
1117 /* log_debug("Keeping job %s/%s because of %s/%s", */
1118 /* j->unit->id, job_type_to_string(j->type), */
1119 /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
1120 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1124 /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
1125 transaction_delete_job(tr, j, true);
1133 static int transaction_is_destructive(Transaction *tr, DBusError *e) {
1139 /* Checks whether applying this transaction means that
1140 * existing jobs would be replaced */
1142 HASHMAP_FOREACH(j, tr->jobs, i) {
1145 assert(!j->transaction_prev);
1146 assert(!j->transaction_next);
1149 j->unit->job != j &&
1150 !job_type_is_superset(j->type, j->unit->job->type)) {
1152 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1160 static void transaction_minimize_impact(Transaction *tr) {
1164 /* Drops all unnecessary jobs that reverse already active jobs
1165 * or that stop a running service. */
1173 HASHMAP_FOREACH(j, tr->jobs, i) {
1174 LIST_FOREACH(transaction, j, j) {
1175 bool stops_running_service, changes_existing_job;
1177 /* If it matters, we shouldn't drop it */
1178 if (j->matters_to_anchor)
1181 /* Would this stop a running service?
1182 * Would this change an existing job?
1183 * If so, let's drop this entry */
1185 stops_running_service =
1186 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1188 changes_existing_job =
1190 job_type_is_conflicting(j->type, j->unit->job->type);
1192 if (!stops_running_service && !changes_existing_job)
1195 if (stops_running_service)
1196 log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
1198 if (changes_existing_job)
1199 log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
1201 /* Ok, let's get rid of this */
1202 log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
1204 transaction_delete_job(tr, j, true);
1216 static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
1221 /* Moves the transaction jobs to the set of active jobs */
1223 if (mode == JOB_ISOLATE) {
1225 /* When isolating first kill all installed jobs which
1226 * aren't part of the new transaction */
1228 HASHMAP_FOREACH(j, m->jobs, i) {
1229 assert(j->installed);
1231 if (hashmap_get(tr->jobs, j->unit))
1234 /* 'j' itself is safe to remove, but if other jobs
1235 are invalidated recursively, our iterator may become
1236 invalid and we need to start over. */
1237 if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
1242 HASHMAP_FOREACH(j, tr->jobs, i) {
1244 assert(!j->transaction_prev);
1245 assert(!j->transaction_next);
1250 r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j);
1255 while ((j = hashmap_steal_first(tr->jobs))) {
1258 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
1269 j->installed = true;
1270 m->n_installed_jobs ++;
1272 /* We're fully installed. Now let's free data we don't
1275 assert(!j->transaction_next);
1276 assert(!j->transaction_prev);
1278 /* Clean the job dependencies */
1279 transaction_unlink_job(tr, j, false);
1281 job_add_to_run_queue(j);
1282 job_add_to_dbus_queue(j);
1285 log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
1288 assert(!tr->anchor);
1294 HASHMAP_FOREACH(j, tr->jobs, i) {
1298 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1304 static int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e) {
1306 unsigned generation = 1;
1310 /* This applies the changes recorded in tr->jobs to
1311 * the actual list of jobs, if possible. */
1313 /* First step: figure out which jobs matter */
1314 transaction_find_jobs_that_matter_to_anchor(tr, NULL, generation++);
1316 /* Second step: Try not to stop any running services if
1317 * we don't have to. Don't try to reverse running
1318 * jobs if we don't have to. */
1319 if (mode == JOB_FAIL)
1320 transaction_minimize_impact(tr);
1322 /* Third step: Drop redundant jobs */
1323 transaction_drop_redundant(tr);
1326 /* Fourth step: Let's remove unneeded jobs that might
1328 if (mode != JOB_ISOLATE)
1329 transaction_collect_garbage(tr);
1331 /* Fifth step: verify order makes sense and correct
1332 * cycles if necessary and possible */
1333 r = transaction_verify_order(tr, &generation, e);
1338 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1342 /* Let's see if the resulting transaction ordering
1343 * graph is still cyclic... */
1347 /* Sixth step: let's drop unmergeable entries if
1348 * necessary and possible, merge entries we can
1350 r = transaction_merge_jobs(tr, e);
1355 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1359 /* Seventh step: an entry got dropped, let's garbage
1360 * collect its dependencies. */
1361 if (mode != JOB_ISOLATE)
1362 transaction_collect_garbage(tr);
1364 /* Let's see if the resulting transaction still has
1365 * unmergeable entries ... */
1368 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1369 transaction_drop_redundant(tr);
1371 /* Ninth step: check whether we can actually apply this */
1372 if (mode == JOB_FAIL) {
1373 r = transaction_is_destructive(tr, e);
1375 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1380 /* Tenth step: apply changes */
1381 r = transaction_apply(tr, m, mode);
1383 log_warning("Failed to apply transaction: %s", strerror(-r));
1387 assert(hashmap_isempty(tr->jobs));
1388 assert(!tr->anchor);
1393 static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool override, bool *is_new) {
1399 /* Looks for an existing prospective job and returns that. If
1400 * it doesn't exist it is created and added to the prospective
1403 f = hashmap_get(tr->jobs, unit);
1405 LIST_FOREACH(transaction, j, f) {
1406 assert(j->unit == unit);
1408 if (j->type == type) {
1415 if (unit->job && unit->job->type == type)
1418 j = job_new(unit->manager, type, unit);
1425 j->matters_to_anchor = false;
1426 j->override = override;
1428 LIST_PREPEND(Job, transaction, f, j);
1430 if (hashmap_replace(tr->jobs, unit, f) < 0) {
1431 LIST_REMOVE(Job, transaction, f, j);
1439 /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
1444 static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
1448 if (j->transaction_prev)
1449 j->transaction_prev->transaction_next = j->transaction_next;
1450 else if (j->transaction_next)
1451 hashmap_replace(tr->jobs, j->unit, j->transaction_next);
1453 hashmap_remove_value(tr->jobs, j->unit, j);
1455 if (j->transaction_next)
1456 j->transaction_next->transaction_prev = j->transaction_prev;
1458 j->transaction_prev = j->transaction_next = NULL;
1460 while (j->subject_list)
1461 job_dependency_free(j->subject_list, tr);
1463 while (j->object_list) {
1464 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1466 job_dependency_free(j->object_list, tr);
1468 if (other && delete_dependencies) {
1469 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1470 other->unit->id, job_type_to_string(other->type),
1471 j->unit->id, job_type_to_string(j->type));
1472 transaction_delete_job(tr, other, delete_dependencies);
1477 static int transaction_add_job_and_dependencies(
1485 bool ignore_requirements,
1496 assert(type < _JOB_TYPE_MAX);
1499 /* log_debug("Pulling in %s/%s from %s/%s", */
1500 /* unit->id, job_type_to_string(type), */
1501 /* by ? by->unit->id : "NA", */
1502 /* by ? job_type_to_string(by->type) : "NA"); */
1504 if (unit->load_state != UNIT_LOADED &&
1505 unit->load_state != UNIT_ERROR &&
1506 unit->load_state != UNIT_MASKED) {
1507 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
1511 if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
1512 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1513 "Unit %s failed to load: %s. "
1514 "See system logs and 'systemctl status %s' for details.",
1516 strerror(-unit->load_error),
1521 if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
1522 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
1526 if (!unit_job_is_applicable(unit, type)) {
1527 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
1531 /* First add the job. */
1532 ret = transaction_add_one_job(tr, type, unit, override, &is_new);
1536 ret->ignore_order = ret->ignore_order || ignore_order;
1538 /* Then, add a link to the job. */
1539 if (!job_dependency_new(by, ret, matters, conflicts, tr))
1542 if (is_new && !ignore_requirements) {
1545 /* If we are following some other unit, make sure we
1546 * add all dependencies of everybody following. */
1547 if (unit_following_set(ret->unit, &following) > 0) {
1548 SET_FOREACH(dep, following, i) {
1549 r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e, NULL);
1551 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1558 set_free(following);
1561 /* Finally, recursively add in all dependencies. */
1562 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1563 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
1564 r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
1574 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) {
1575 r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
1585 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
1586 r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL);
1588 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1595 SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
1596 r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL);
1598 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1605 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) {
1606 r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL);
1616 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
1617 r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL);
1619 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1626 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) {
1627 r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL);
1637 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) {
1638 r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL);
1640 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1649 if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1651 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
1652 r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
1662 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) {
1663 r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
1674 if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
1676 SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
1677 r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
1679 log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1687 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1699 static int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
1708 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1710 /* ignore aliases */
1714 if (u->ignore_on_isolate)
1717 /* No need to stop inactive jobs */
1718 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
1721 /* Is there already something listed for this? */
1722 if (hashmap_get(tr->jobs, u))
1725 r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL);
1727 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
1733 static Transaction *transaction_new(void) {
1736 tr = new0(Transaction, 1);
1740 tr->jobs = hashmap_new(trivial_hash_func, trivial_compare_func);
1749 static void transaction_free(Transaction *tr) {
1750 assert(hashmap_isempty(tr->jobs));
1751 hashmap_free(tr->jobs);
1755 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1761 assert(type < _JOB_TYPE_MAX);
1763 assert(mode < _JOB_MODE_MAX);
1765 if (mode == JOB_ISOLATE && type != JOB_START) {
1766 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1770 if (mode == JOB_ISOLATE && !unit->allow_isolate) {
1771 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1775 log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1777 tr = transaction_new();
1781 r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, override, false,
1782 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1783 mode == JOB_IGNORE_DEPENDENCIES, e, &ret);
1787 if (mode == JOB_ISOLATE) {
1788 r = transaction_add_isolate_jobs(tr, m);
1793 r = transaction_activate(tr, m, mode, e);
1797 log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
1802 transaction_free(tr);
1806 transaction_abort(tr);
1807 transaction_free(tr);
1811 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1816 assert(type < _JOB_TYPE_MAX);
1818 assert(mode < _JOB_MODE_MAX);
1820 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1823 return manager_add_job(m, type, unit, mode, override, e, _ret);
1826 Job *manager_get_job(Manager *m, uint32_t id) {
1829 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1832 Unit *manager_get_unit(Manager *m, const char *name) {
1836 return hashmap_get(m->units, name);
1839 unsigned manager_dispatch_load_queue(Manager *m) {
1845 /* Make sure we are not run recursively */
1846 if (m->dispatching_load_queue)
1849 m->dispatching_load_queue = true;
1851 /* Dispatches the load queue. Takes a unit from the queue and
1852 * tries to load its data until the queue is empty */
1854 while ((u = m->load_queue)) {
1855 assert(u->in_load_queue);
1861 m->dispatching_load_queue = false;
1865 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1871 assert(name || path);
1873 /* This will prepare the unit for loading, but not actually
1874 * load anything from disk. */
1876 if (path && !is_path(path)) {
1877 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1882 name = file_name_from_path(path);
1884 t = unit_name_to_type(name);
1886 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid_no_type(name, false)) {
1887 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1891 ret = manager_get_unit(m, name);
1897 ret = unit_new(m, unit_vtable[t]->object_size);
1902 ret->fragment_path = strdup(path);
1903 if (!ret->fragment_path) {
1909 if ((r = unit_add_name(ret, name)) < 0) {
1914 unit_add_to_load_queue(ret);
1915 unit_add_to_dbus_queue(ret);
1916 unit_add_to_gc_queue(ret);
1924 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1929 /* This will load the service information files, but not actually
1930 * start any services or anything. */
1932 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1935 manager_dispatch_load_queue(m);
1938 *_ret = unit_follow_merge(*_ret);
1943 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1950 HASHMAP_FOREACH(j, s->jobs, i)
1951 job_dump(j, f, prefix);
1954 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1962 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1964 unit_dump(u, f, prefix);
1967 void manager_clear_jobs(Manager *m) {
1972 while ((j = hashmap_first(m->jobs)))
1973 job_finish_and_invalidate(j, JOB_CANCELED);
1976 unsigned manager_dispatch_run_queue(Manager *m) {
1980 if (m->dispatching_run_queue)
1983 m->dispatching_run_queue = true;
1985 while ((j = m->run_queue)) {
1986 assert(j->installed);
1987 assert(j->in_run_queue);
1989 job_run_and_invalidate(j);
1993 m->dispatching_run_queue = false;
1997 unsigned manager_dispatch_dbus_queue(Manager *m) {
2004 if (m->dispatching_dbus_queue)
2007 m->dispatching_dbus_queue = true;
2009 while ((u = m->dbus_unit_queue)) {
2010 assert(u->in_dbus_queue);
2012 bus_unit_send_change_signal(u);
2016 while ((j = m->dbus_job_queue)) {
2017 assert(j->in_dbus_queue);
2019 bus_job_send_change_signal(j);
2023 m->dispatching_dbus_queue = false;
2027 static int manager_process_notify_fd(Manager *m) {
2034 struct msghdr msghdr;
2036 struct ucred *ucred;
2038 struct cmsghdr cmsghdr;
2039 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
2045 iovec.iov_base = buf;
2046 iovec.iov_len = sizeof(buf)-1;
2050 msghdr.msg_iov = &iovec;
2051 msghdr.msg_iovlen = 1;
2052 msghdr.msg_control = &control;
2053 msghdr.msg_controllen = sizeof(control);
2055 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
2059 if (errno == EAGAIN || errno == EINTR)
2065 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
2066 control.cmsghdr.cmsg_level != SOL_SOCKET ||
2067 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
2068 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
2069 log_warning("Received notify message without credentials. Ignoring.");
2073 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
2075 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
2076 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
2077 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
2081 assert((size_t) n < sizeof(buf));
2083 if (!(tags = strv_split(buf, "\n\r")))
2086 log_debug("Got notification message for unit %s", u->id);
2088 if (UNIT_VTABLE(u)->notify_message)
2089 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
2097 static int manager_dispatch_sigchld(Manager *m) {
2107 /* First we call waitd() for a PID and do not reap the
2108 * zombie. That way we can still access /proc/$PID for
2109 * it while it is a zombie. */
2110 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2112 if (errno == ECHILD)
2124 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2127 get_process_comm(si.si_pid, &name);
2128 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2132 /* Let's flush any message the dying child might still
2133 * have queued for us. This ensures that the process
2134 * still exists in /proc so that we can figure out
2135 * which cgroup and hence unit it belongs to. */
2136 if ((r = manager_process_notify_fd(m)) < 0)
2139 /* And now figure out the unit this belongs to */
2140 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2141 u = cgroup_unit_by_pid(m, si.si_pid);
2143 /* And now, we actually reap the zombie. */
2144 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2151 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2154 log_debug("Child %lu died (code=%s, status=%i/%s)",
2155 (long unsigned) si.si_pid,
2156 sigchld_code_to_string(si.si_code),
2158 strna(si.si_code == CLD_EXITED
2159 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2160 : signal_to_string(si.si_status)));
2165 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->id);
2167 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2168 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2174 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2178 dbus_error_init(&error);
2180 log_debug("Activating special unit %s", name);
2182 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2183 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2185 dbus_error_free(&error);
2190 static int manager_process_signal_fd(Manager *m) {
2192 struct signalfd_siginfo sfsi;
2193 bool sigchld = false;
2198 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2203 if (errno == EINTR || errno == EAGAIN)
2209 if (sfsi.ssi_pid > 0) {
2212 get_process_comm(sfsi.ssi_pid, &p);
2214 log_debug("Received SIG%s from PID %lu (%s).",
2215 signal_to_string(sfsi.ssi_signo),
2216 (unsigned long) sfsi.ssi_pid, strna(p));
2219 log_debug("Received SIG%s.", signal_to_string(sfsi.ssi_signo));
2221 switch (sfsi.ssi_signo) {
2228 if (m->running_as == MANAGER_SYSTEM) {
2229 /* This is for compatibility with the
2230 * original sysvinit */
2231 m->exit_code = MANAGER_REEXECUTE;
2238 if (m->running_as == MANAGER_SYSTEM) {
2239 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2243 /* Run the exit target if there is one, if not, just exit. */
2244 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2245 m->exit_code = MANAGER_EXIT;
2252 if (m->running_as == MANAGER_SYSTEM)
2253 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2255 /* This is a nop on non-init */
2259 if (m->running_as == MANAGER_SYSTEM)
2260 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2262 /* This is a nop on non-init */
2268 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2270 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2271 log_info("Trying to reconnect to bus...");
2275 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2276 log_info("Loading D-Bus service...");
2277 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2288 if (!(f = open_memstream(&dump, &size))) {
2289 log_warning("Failed to allocate memory stream.");
2293 manager_dump_units(m, f, "\t");
2294 manager_dump_jobs(m, f, "\t");
2299 log_warning("Failed to write status stream");
2304 log_dump(LOG_INFO, dump);
2311 m->exit_code = MANAGER_RELOAD;
2316 /* Starting SIGRTMIN+0 */
2317 static const char * const target_table[] = {
2318 [0] = SPECIAL_DEFAULT_TARGET,
2319 [1] = SPECIAL_RESCUE_TARGET,
2320 [2] = SPECIAL_EMERGENCY_TARGET,
2321 [3] = SPECIAL_HALT_TARGET,
2322 [4] = SPECIAL_POWEROFF_TARGET,
2323 [5] = SPECIAL_REBOOT_TARGET,
2324 [6] = SPECIAL_KEXEC_TARGET
2327 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2328 static const ManagerExitCode code_table[] = {
2330 [1] = MANAGER_POWEROFF,
2331 [2] = MANAGER_REBOOT,
2335 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2336 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2337 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2338 manager_start_target(m, target_table[idx],
2339 (idx == 1 || idx == 2) ? JOB_ISOLATE : JOB_REPLACE);
2343 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2344 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2345 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2349 switch (sfsi.ssi_signo - SIGRTMIN) {
2352 log_debug("Enabling showing of status.");
2353 manager_set_show_status(m, true);
2357 log_debug("Disabling showing of status.");
2358 manager_set_show_status(m, false);
2362 log_set_max_level(LOG_DEBUG);
2363 log_notice("Setting log level to debug.");
2367 log_set_max_level(LOG_INFO);
2368 log_notice("Setting log level to info.");
2372 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2373 log_notice("Setting log target to journal-or-kmsg.");
2377 log_set_target(LOG_TARGET_CONSOLE);
2378 log_notice("Setting log target to console.");
2382 log_set_target(LOG_TARGET_KMSG);
2383 log_notice("Setting log target to kmsg.");
2387 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2388 log_notice("Setting log target to syslog-or-kmsg.");
2392 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2399 return manager_dispatch_sigchld(m);
2404 static int process_event(Manager *m, struct epoll_event *ev) {
2411 assert_se(w = ev->data.ptr);
2413 if (w->type == WATCH_INVALID)
2420 /* An incoming signal? */
2421 if (ev->events != EPOLLIN)
2424 if ((r = manager_process_signal_fd(m)) < 0)
2431 /* An incoming daemon notification event? */
2432 if (ev->events != EPOLLIN)
2435 if ((r = manager_process_notify_fd(m)) < 0)
2442 /* Some fd event, to be dispatched to the units */
2443 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2446 case WATCH_UNIT_TIMER:
2447 case WATCH_JOB_TIMER: {
2451 /* Some timer event, to be dispatched to the units */
2452 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2454 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2457 return k < 0 ? -errno : -EIO;
2460 if (w->type == WATCH_UNIT_TIMER)
2461 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2463 job_timer_event(w->data.job, v, w);
2468 /* Some mount table change, intended for the mount subsystem */
2469 mount_fd_event(m, ev->events);
2473 /* Some swap table change, intended for the swap subsystem */
2474 swap_fd_event(m, ev->events);
2478 /* Some notification from udev, intended for the device subsystem */
2479 device_fd_event(m, ev->events);
2482 case WATCH_DBUS_WATCH:
2483 bus_watch_event(m, w, ev->events);
2486 case WATCH_DBUS_TIMEOUT:
2487 bus_timeout_event(m, w, ev->events);
2491 log_error("event type=%i", w->type);
2492 assert_not_reached("Unknown epoll event type.");
2498 int manager_loop(Manager *m) {
2501 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2504 m->exit_code = MANAGER_RUNNING;
2506 /* Release the path cache */
2507 set_free_free(m->unit_path_cache);
2508 m->unit_path_cache = NULL;
2510 manager_check_finished(m);
2512 /* There might still be some zombies hanging around from
2513 * before we were exec()'ed. Leat's reap them */
2514 r = manager_dispatch_sigchld(m);
2518 while (m->exit_code == MANAGER_RUNNING) {
2519 struct epoll_event event;
2523 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM)
2526 if (!ratelimit_test(&rl)) {
2527 /* Yay, something is going seriously wrong, pause a little */
2528 log_warning("Looping too fast. Throttling execution a little.");
2533 if (manager_dispatch_load_queue(m) > 0)
2536 if (manager_dispatch_run_queue(m) > 0)
2539 if (bus_dispatch(m) > 0)
2542 if (manager_dispatch_cleanup_queue(m) > 0)
2545 if (manager_dispatch_gc_queue(m) > 0)
2548 if (manager_dispatch_dbus_queue(m) > 0)
2551 if (swap_dispatch_reload(m) > 0)
2554 /* Sleep for half the watchdog time */
2555 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) {
2556 wait_msec = (int) (m->runtime_watchdog / 2 / USEC_PER_MSEC);
2562 n = epoll_wait(m->epoll_fd, &event, 1, wait_msec);
2574 r = process_event(m, &event);
2579 return m->exit_code;
2582 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2590 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2593 if (!(n = bus_path_unescape(s+31)))
2596 u = manager_get_unit(m, n);
2607 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2616 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2619 if ((r = safe_atou(s + 30, &id)) < 0)
2622 if (!(j = manager_get_job(m, id)))
2630 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2635 if (m->audit_fd < 0)
2638 /* Don't generate audit events if the service was already
2639 * started and we're just deserializing */
2640 if (m->n_reloading > 0)
2643 if (m->running_as != MANAGER_SYSTEM)
2646 if (u->type != UNIT_SERVICE)
2649 if (!(p = unit_name_to_prefix_and_instance(u->id))) {
2650 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2654 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2655 if (errno == EPERM) {
2656 /* We aren't allowed to send audit messages?
2657 * Then let's not retry again. */
2658 audit_close(m->audit_fd);
2661 log_warning("Failed to send audit message: %m");
2669 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2671 union sockaddr_union sa;
2673 char *message = NULL;
2675 /* Don't generate plymouth events if the service was already
2676 * started and we're just deserializing */
2677 if (m->n_reloading > 0)
2680 if (m->running_as != MANAGER_SYSTEM)
2683 if (u->type != UNIT_SERVICE &&
2684 u->type != UNIT_MOUNT &&
2685 u->type != UNIT_SWAP)
2688 /* We set SOCK_NONBLOCK here so that we rather drop the
2689 * message then wait for plymouth */
2690 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2691 log_error("socket() failed: %m");
2696 sa.sa.sa_family = AF_UNIX;
2697 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2698 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2700 if (errno != EPIPE &&
2703 errno != ECONNREFUSED &&
2704 errno != ECONNRESET &&
2705 errno != ECONNABORTED)
2706 log_error("connect() failed: %m");
2711 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2712 log_error("Out of memory");
2717 if (write(fd, message, n + 1) != n + 1) {
2719 if (errno != EPIPE &&
2722 errno != ECONNREFUSED &&
2723 errno != ECONNRESET &&
2724 errno != ECONNABORTED)
2725 log_error("Failed to write Plymouth message: %m");
2732 close_nointr_nofail(fd);
2737 void manager_dispatch_bus_name_owner_changed(
2740 const char* old_owner,
2741 const char *new_owner) {
2748 if (!(u = hashmap_get(m->watch_bus, name)))
2751 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2754 void manager_dispatch_bus_query_pid_done(
2765 if (!(u = hashmap_get(m->watch_bus, name)))
2768 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2771 int manager_open_serialization(Manager *m, FILE **_f) {
2779 if (m->running_as == MANAGER_SYSTEM)
2780 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2782 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2787 saved_umask = umask(0077);
2788 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2798 log_debug("Serializing state to %s", path);
2801 if (!(f = fdopen(fd, "w+")))
2809 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2821 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2822 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2824 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2825 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2826 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2830 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2834 if (!unit_can_serialize(u))
2841 if ((r = unit_serialize(u, f, fds)) < 0) {
2847 assert(m->n_reloading > 0);
2853 r = bus_fdset_add_all(m, fds);
2860 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2866 log_debug("Deserializing state...");
2871 char line[LINE_MAX], *l;
2873 if (!fgets(line, sizeof(line), f)) {
2888 if (startswith(l, "current-job-id=")) {
2891 if (safe_atou32(l+15, &id) < 0)
2892 log_debug("Failed to parse current job id value %s", l+15);
2894 m->current_job_id = MAX(m->current_job_id, id);
2895 } else if (startswith(l, "taint-usr=")) {
2898 if ((b = parse_boolean(l+10)) < 0)
2899 log_debug("Failed to parse taint /usr flag %s", l+10);
2901 m->taint_usr = m->taint_usr || b;
2902 } else if (startswith(l, "initrd-timestamp="))
2903 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2904 else if (startswith(l, "startup-timestamp="))
2905 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2906 else if (startswith(l, "finish-timestamp="))
2907 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2909 log_debug("Unknown serialization item '%s'", l);
2914 char name[UNIT_NAME_MAX+2];
2917 if (!fgets(name, sizeof(name), f)) {
2928 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2931 if ((r = unit_deserialize(u, f, fds)) < 0)
2941 assert(m->n_reloading > 0);
2947 int manager_reload(Manager *m) {
2954 if ((r = manager_open_serialization(m, &f)) < 0)
2959 if (!(fds = fdset_new())) {
2965 if ((r = manager_serialize(m, f, fds)) < 0) {
2970 if (fseeko(f, 0, SEEK_SET) < 0) {
2976 /* From here on there is no way back. */
2977 manager_clear_jobs_and_units(m);
2978 manager_undo_generators(m);
2980 /* Find new unit paths */
2981 lookup_paths_free(&m->lookup_paths);
2982 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2985 manager_run_generators(m);
2987 manager_build_unit_path_cache(m);
2989 /* First, enumerate what we can from all config files */
2990 if ((q = manager_enumerate(m)) < 0)
2993 /* Second, deserialize our stored data */
2994 if ((q = manager_deserialize(m, f, fds)) < 0)
3000 /* Third, fire things up! */
3001 if ((q = manager_coldplug(m)) < 0)
3004 assert(m->n_reloading > 0);
3017 bool manager_is_booting_or_shutting_down(Manager *m) {
3022 /* Is the initial job still around? */
3023 if (manager_get_job(m, m->default_unit_job_id))
3026 /* Is there a job for the shutdown target? */
3027 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
3034 void manager_reset_failed(Manager *m) {
3040 HASHMAP_FOREACH(u, m->units, i)
3041 unit_reset_failed(u);
3044 bool manager_unit_pending_inactive(Manager *m, const char *name) {
3050 /* Returns true if the unit is inactive or going down */
3051 if (!(u = manager_get_unit(m, name)))
3054 return unit_pending_inactive(u);
3057 void manager_check_finished(Manager *m) {
3058 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3059 usec_t kernel_usec, initrd_usec, userspace_usec, total_usec;
3063 if (dual_timestamp_is_set(&m->finish_timestamp))
3066 if (hashmap_size(m->jobs) > 0)
3069 dual_timestamp_get(&m->finish_timestamp);
3071 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
3073 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3074 total_usec = m->finish_timestamp.monotonic;
3076 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
3078 kernel_usec = m->initrd_timestamp.monotonic;
3079 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
3081 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
3082 format_timespan(kernel, sizeof(kernel), kernel_usec),
3083 format_timespan(initrd, sizeof(initrd), initrd_usec),
3084 format_timespan(userspace, sizeof(userspace), userspace_usec),
3085 format_timespan(sum, sizeof(sum), total_usec));
3087 kernel_usec = m->startup_timestamp.monotonic;
3090 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
3091 format_timespan(kernel, sizeof(kernel), kernel_usec),
3092 format_timespan(userspace, sizeof(userspace), userspace_usec),
3093 format_timespan(sum, sizeof(sum), total_usec));
3096 userspace_usec = initrd_usec = kernel_usec = 0;
3097 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3099 log_debug("Startup finished in %s.",
3100 format_timespan(sum, sizeof(sum), total_usec));
3103 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
3106 "READY=1\nSTATUS=Startup finished in %s.",
3107 format_timespan(sum, sizeof(sum), total_usec));
3110 void manager_run_generators(Manager *m) {
3112 const char *generator_path;
3113 const char *argv[3];
3118 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
3119 if (!(d = opendir(generator_path))) {
3121 if (errno == ENOENT)
3124 log_error("Failed to enumerate generator directory: %m");
3128 if (!m->generator_unit_path) {
3130 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3132 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3133 p = "/run/systemd/generator";
3135 if (mkdir_p(p, 0755) < 0) {
3136 log_error("Failed to create generator directory: %m");
3141 if (!(p = mkdtemp(user_path))) {
3142 log_error("Failed to create generator directory: %m");
3147 if (!(m->generator_unit_path = strdup(p))) {
3148 log_error("Failed to allocate generator unit path.");
3153 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3154 argv[1] = m->generator_unit_path;
3158 execute_directory(generator_path, d, (char**) argv);
3161 if (rmdir(m->generator_unit_path) >= 0) {
3162 /* Uh? we were able to remove this dir? I guess that
3163 * means the directory was empty, hence let's shortcut
3166 free(m->generator_unit_path);
3167 m->generator_unit_path = NULL;
3171 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3174 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3175 log_error("Failed to add generator directory to unit search path: %m");
3179 strv_free(m->lookup_paths.unit_path);
3180 m->lookup_paths.unit_path = l;
3182 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3190 void manager_undo_generators(Manager *m) {
3193 if (!m->generator_unit_path)
3196 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3197 rm_rf(m->generator_unit_path, false, true, false);
3199 free(m->generator_unit_path);
3200 m->generator_unit_path = NULL;
3203 int manager_set_default_controllers(Manager *m, char **controllers) {
3208 l = strv_copy(controllers);
3212 strv_free(m->default_controllers);
3213 m->default_controllers = l;
3215 cg_shorten_controllers(m->default_controllers);
3220 void manager_recheck_journal(Manager *m) {
3225 if (m->running_as != MANAGER_SYSTEM)
3228 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3229 if (u && SOCKET(u)->state != SOCKET_RUNNING) {
3230 log_close_journal();
3234 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3235 if (u && SERVICE(u)->state != SERVICE_RUNNING) {
3236 log_close_journal();
3240 /* Hmm, OK, so the socket is fully up and the service is up
3241 * too, then let's make use of the thing. */
3245 void manager_set_show_status(Manager *m, bool b) {
3248 if (m->running_as != MANAGER_SYSTEM)
3254 touch("/run/systemd/show-status");
3256 unlink("/run/systemd/show-status");
3259 bool manager_get_show_status(Manager *m) {
3262 if (m->running_as != MANAGER_SYSTEM)
3268 /* If Plymouth is running make sure we show the status, so
3269 * that there's something nice to see when people press Esc */
3271 return plymouth_running();
3274 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3275 [MANAGER_SYSTEM] = "system",
3276 [MANAGER_USER] = "user"
3279 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);