1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
27 #include <sys/signalfd.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
36 #include <sys/types.h>
44 #include <systemd/sd-daemon.h>
53 #include "ratelimit.h"
55 #include "mount-setup.h"
56 #include "unit-name.h"
57 #include "dbus-unit.h"
60 #include "path-lookup.h"
62 #include "bus-errors.h"
63 #include "exit-status.h"
66 #include "cgroup-util.h"
68 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
69 #define GC_QUEUE_ENTRIES_MAX 16
71 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
72 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
74 /* Where clients shall send notification messages to */
75 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
76 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
78 static int manager_setup_notify(Manager *m) {
81 struct sockaddr_un un;
83 struct epoll_event ev;
89 m->notify_watch.type = WATCH_NOTIFY;
90 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
91 log_error("Failed to allocate notification socket: %m");
96 sa.sa.sa_family = AF_UNIX;
99 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
101 unlink(NOTIFY_SOCKET_SYSTEM);
102 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
105 if (sa.un.sun_path[0] == '@')
106 sa.un.sun_path[0] = 0;
109 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
113 log_error("bind() failed: %m");
117 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
118 log_error("SO_PASSCRED failed: %m");
124 ev.data.ptr = &m->notify_watch;
126 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
129 if (sa.un.sun_path[0] == 0)
130 sa.un.sun_path[0] = '@';
132 if (!(m->notify_socket = strdup(sa.un.sun_path)))
135 log_debug("Using notification socket %s", m->notify_socket);
140 static int enable_special_signals(Manager *m) {
145 /* Enable that we get SIGINT on control-alt-del. In containers
146 * this will fail with EPERM, so ignore that. */
147 if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM)
148 log_warning("Failed to enable ctrl-alt-del handling: %m");
150 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
152 /* Support systems without virtual console */
154 log_warning("Failed to open /dev/tty0: %m");
156 /* Enable that we get SIGWINCH on kbrequest */
157 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
158 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
160 close_nointr_nofail(fd);
166 static int manager_setup_signals(Manager *m) {
168 struct epoll_event ev;
173 /* We are not interested in SIGSTOP and friends. */
175 sa.sa_handler = SIG_DFL;
176 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
177 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
179 assert_se(sigemptyset(&mask) == 0);
181 sigset_add_many(&mask,
182 SIGCHLD, /* Child died */
183 SIGTERM, /* Reexecute daemon */
184 SIGHUP, /* Reload configuration */
185 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
186 SIGUSR2, /* systemd: dump status */
187 SIGINT, /* Kernel sends us this on control-alt-del */
188 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
189 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
190 SIGRTMIN+0, /* systemd: start default.target */
191 SIGRTMIN+1, /* systemd: isolate rescue.target */
192 SIGRTMIN+2, /* systemd: isolate emergency.target */
193 SIGRTMIN+3, /* systemd: start halt.target */
194 SIGRTMIN+4, /* systemd: start poweroff.target */
195 SIGRTMIN+5, /* systemd: start reboot.target */
196 SIGRTMIN+6, /* systemd: start kexec.target */
197 SIGRTMIN+13, /* systemd: Immediate halt */
198 SIGRTMIN+14, /* systemd: Immediate poweroff */
199 SIGRTMIN+15, /* systemd: Immediate reboot */
200 SIGRTMIN+16, /* systemd: Immediate kexec */
201 SIGRTMIN+20, /* systemd: enable status messages */
202 SIGRTMIN+21, /* systemd: disable status messages */
203 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
204 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
205 SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */
206 SIGRTMIN+27, /* systemd: set log target to console */
207 SIGRTMIN+28, /* systemd: set log target to kmsg */
208 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
210 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
212 m->signal_watch.type = WATCH_SIGNAL;
213 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
218 ev.data.ptr = &m->signal_watch;
220 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
223 if (m->running_as == MANAGER_SYSTEM)
224 return enable_special_signals(m);
229 static void manager_strip_environment(Manager *m) {
232 /* Remove variables from the inherited set that are part of
233 * the container interface:
234 * http://www.freedesktop.org/wiki/Software/systemd/ContainerInterface */
235 strv_remove_prefix(m->environment, "container=");
236 strv_remove_prefix(m->environment, "container_");
238 /* Remove variables from the inherited set that are part of
239 * the initrd interface:
240 * http://www.freedesktop.org/wiki/Software/systemd/InitrdInterface */
241 strv_remove_prefix(m->environment, "RD_");
244 int manager_new(ManagerRunningAs running_as, Manager **_m) {
249 assert(running_as >= 0);
250 assert(running_as < _MANAGER_RUNNING_AS_MAX);
252 if (!(m = new0(Manager, 1)))
255 dual_timestamp_get(&m->startup_timestamp);
257 m->running_as = running_as;
258 m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1;
259 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
260 m->pin_cgroupfs_fd = -1;
266 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
267 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
269 m->environment = strv_copy(environ);
273 manager_strip_environment(m);
275 if (running_as == MANAGER_SYSTEM) {
276 m->default_controllers = strv_new("cpu", NULL);
277 if (!m->default_controllers)
281 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
284 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
287 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
290 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
293 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
296 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
299 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
302 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
305 if ((r = manager_setup_signals(m)) < 0)
308 if ((r = manager_setup_cgroup(m)) < 0)
311 if ((r = manager_setup_notify(m)) < 0)
314 /* Try to connect to the busses, if possible. */
315 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
319 if ((m->audit_fd = audit_open()) < 0 &&
320 /* If the kernel lacks netlink or audit support,
321 * don't worry about it. */
322 errno != EAFNOSUPPORT && errno != EPROTONOSUPPORT)
323 log_error("Failed to connect to audit log: %m");
326 m->taint_usr = dir_is_empty("/usr") > 0;
336 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
342 while ((u = m->cleanup_queue)) {
343 assert(u->in_cleanup_queue);
353 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
354 GC_OFFSET_UNSURE, /* No clue */
355 GC_OFFSET_GOOD, /* We still need this unit */
356 GC_OFFSET_BAD, /* We don't need this unit anymore */
360 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
367 if (u->gc_marker == gc_marker + GC_OFFSET_GOOD ||
368 u->gc_marker == gc_marker + GC_OFFSET_BAD ||
369 u->gc_marker == gc_marker + GC_OFFSET_IN_PATH)
372 if (u->in_cleanup_queue)
375 if (unit_check_gc(u))
378 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
382 SET_FOREACH(other, u->dependencies[UNIT_REFERENCED_BY], i) {
383 unit_gc_sweep(other, gc_marker);
385 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
388 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
395 /* We were unable to find anything out about this entry, so
396 * let's investigate it later */
397 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
398 unit_add_to_gc_queue(u);
402 /* We definitely know that this one is not useful anymore, so
403 * let's mark it for deletion */
404 u->gc_marker = gc_marker + GC_OFFSET_BAD;
405 unit_add_to_cleanup_queue(u);
409 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
412 static unsigned manager_dispatch_gc_queue(Manager *m) {
419 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
420 (m->gc_queue_timestamp <= 0 ||
421 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
424 log_debug("Running GC...");
426 m->gc_marker += _GC_OFFSET_MAX;
427 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
430 gc_marker = m->gc_marker;
432 while ((u = m->gc_queue)) {
433 assert(u->in_gc_queue);
435 unit_gc_sweep(u, gc_marker);
437 LIST_REMOVE(Unit, gc_queue, m->gc_queue, u);
438 u->in_gc_queue = false;
442 if (u->gc_marker == gc_marker + GC_OFFSET_BAD ||
443 u->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
444 log_debug("Collecting %s", u->id);
445 u->gc_marker = gc_marker + GC_OFFSET_BAD;
446 unit_add_to_cleanup_queue(u);
450 m->n_in_gc_queue = 0;
451 m->gc_queue_timestamp = 0;
456 static void manager_clear_jobs_and_units(Manager *m) {
462 while ((j = hashmap_first(m->transaction_jobs)))
465 while ((u = hashmap_first(m->units)))
468 manager_dispatch_cleanup_queue(m);
470 assert(!m->load_queue);
471 assert(!m->run_queue);
472 assert(!m->dbus_unit_queue);
473 assert(!m->dbus_job_queue);
474 assert(!m->cleanup_queue);
475 assert(!m->gc_queue);
477 assert(hashmap_isempty(m->transaction_jobs));
478 assert(hashmap_isempty(m->jobs));
479 assert(hashmap_isempty(m->units));
482 void manager_free(Manager *m) {
487 manager_clear_jobs_and_units(m);
489 for (c = 0; c < _UNIT_TYPE_MAX; c++)
490 if (unit_vtable[c]->shutdown)
491 unit_vtable[c]->shutdown(m);
493 /* If we reexecute ourselves, we keep the root cgroup
495 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
497 manager_undo_generators(m);
501 hashmap_free(m->units);
502 hashmap_free(m->jobs);
503 hashmap_free(m->transaction_jobs);
504 hashmap_free(m->watch_pids);
505 hashmap_free(m->watch_bus);
507 if (m->epoll_fd >= 0)
508 close_nointr_nofail(m->epoll_fd);
509 if (m->signal_watch.fd >= 0)
510 close_nointr_nofail(m->signal_watch.fd);
511 if (m->notify_watch.fd >= 0)
512 close_nointr_nofail(m->notify_watch.fd);
515 if (m->audit_fd >= 0)
516 audit_close(m->audit_fd);
519 free(m->notify_socket);
521 lookup_paths_free(&m->lookup_paths);
522 strv_free(m->environment);
524 strv_free(m->default_controllers);
526 hashmap_free(m->cgroup_bondings);
527 set_free_free(m->unit_path_cache);
532 int manager_enumerate(Manager *m) {
538 /* Let's ask every type to load all units from disk/kernel
539 * that it might know */
540 for (c = 0; c < _UNIT_TYPE_MAX; c++)
541 if (unit_vtable[c]->enumerate)
542 if ((q = unit_vtable[c]->enumerate(m)) < 0)
545 manager_dispatch_load_queue(m);
549 int manager_coldplug(Manager *m) {
557 /* Then, let's set up their initial state. */
558 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
564 if ((q = unit_coldplug(u)) < 0)
571 static void manager_build_unit_path_cache(Manager *m) {
578 set_free_free(m->unit_path_cache);
580 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
581 log_error("Failed to allocate unit path cache.");
585 /* This simply builds a list of files we know exist, so that
586 * we don't always have to go to disk */
588 STRV_FOREACH(i, m->lookup_paths.unit_path) {
591 if (!(d = opendir(*i))) {
592 log_error("Failed to open directory: %m");
596 while ((de = readdir(d))) {
599 if (ignore_file(de->d_name))
602 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
608 if ((r = set_put(m->unit_path_cache, p)) < 0) {
621 log_error("Failed to build unit path cache: %s", strerror(-r));
623 set_free_free(m->unit_path_cache);
624 m->unit_path_cache = NULL;
630 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
635 manager_run_generators(m);
637 manager_build_unit_path_cache(m);
639 /* If we will deserialize make sure that during enumeration
640 * this is already known, so we increase the counter here
645 /* First, enumerate what we can from all config files */
646 r = manager_enumerate(m);
648 /* Second, deserialize if there is something to deserialize */
650 if ((q = manager_deserialize(m, serialization, fds)) < 0)
653 /* Third, fire things up! */
654 if ((q = manager_coldplug(m)) < 0)
658 assert(m->n_reloading > 0);
665 static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies);
667 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
671 /* Deletes one job from the transaction */
673 transaction_unlink_job(m, j, delete_dependencies);
679 static void transaction_delete_unit(Manager *m, Unit *u) {
682 /* Deletes all jobs associated with a certain unit from the
685 while ((j = hashmap_get(m->transaction_jobs, u)))
686 transaction_delete_job(m, j, true);
689 static void transaction_clean_dependencies(Manager *m) {
695 /* Drops all dependencies of all installed jobs */
697 HASHMAP_FOREACH(j, m->jobs, i) {
698 while (j->subject_list)
699 job_dependency_free(j->subject_list);
700 while (j->object_list)
701 job_dependency_free(j->object_list);
704 assert(!m->transaction_anchor);
707 static void transaction_abort(Manager *m) {
712 while ((j = hashmap_first(m->transaction_jobs)))
714 transaction_delete_job(m, j, true);
716 transaction_unlink_job(m, j, true);
720 assert(hashmap_isempty(m->transaction_jobs));
722 transaction_clean_dependencies(m);
725 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
730 /* A recursive sweep through the graph that marks all units
731 * that matter to the anchor job, i.e. are directly or
732 * indirectly a dependency of the anchor job via paths that
733 * are fully marked as mattering. */
738 l = m->transaction_anchor;
740 LIST_FOREACH(subject, l, l) {
742 /* This link does not matter */
746 /* This unit has already been marked */
747 if (l->object->generation == generation)
750 l->object->matters_to_anchor = true;
751 l->object->generation = generation;
753 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
757 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
758 JobDependency *l, *last;
762 assert(j->unit == other->unit);
763 assert(!j->installed);
765 /* Merges 'other' into 'j' and then deletes 'other'. */
768 j->state = JOB_WAITING;
769 j->override = j->override || other->override;
771 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
773 /* Patch us in as new owner of the JobDependency objects */
775 LIST_FOREACH(subject, l, other->subject_list) {
776 assert(l->subject == other);
781 /* Merge both lists */
783 last->subject_next = j->subject_list;
785 j->subject_list->subject_prev = last;
786 j->subject_list = other->subject_list;
789 /* Patch us in as new owner of the JobDependency objects */
791 LIST_FOREACH(object, l, other->object_list) {
792 assert(l->object == other);
797 /* Merge both lists */
799 last->object_next = j->object_list;
801 j->object_list->object_prev = last;
802 j->object_list = other->object_list;
805 /* Kill the other job */
806 other->subject_list = NULL;
807 other->object_list = NULL;
808 transaction_delete_job(m, other, true);
811 static bool job_is_conflicted_by(Job *j) {
816 /* Returns true if this job is pulled in by a least one
817 * ConflictedBy dependency. */
819 LIST_FOREACH(object, l, j->object_list)
826 static int delete_one_unmergeable_job(Manager *m, Job *j) {
831 /* Tries to delete one item in the linked list
832 * j->transaction_next->transaction_next->... that conflicts
833 * with another one, in an attempt to make an inconsistent
834 * transaction work. */
836 /* We rely here on the fact that if a merged with b does not
837 * merge with c, either a or b merge with c neither */
838 LIST_FOREACH(transaction, j, j)
839 LIST_FOREACH(transaction, k, j->transaction_next) {
842 /* Is this one mergeable? Then skip it */
843 if (job_type_is_mergeable(j->type, k->type))
846 /* Ok, we found two that conflict, let's see if we can
847 * drop one of them */
848 if (!j->matters_to_anchor && !k->matters_to_anchor) {
850 /* Both jobs don't matter, so let's
851 * find the one that is smarter to
852 * remove. Let's think positive and
853 * rather remove stops then starts --
854 * except if something is being
855 * stopped because it is conflicted by
856 * another unit in which case we
857 * rather remove the start. */
859 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
860 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
862 if (j->type == JOB_STOP) {
864 if (job_is_conflicted_by(j))
869 } else if (k->type == JOB_STOP) {
871 if (job_is_conflicted_by(k))
878 } else if (!j->matters_to_anchor)
880 else if (!k->matters_to_anchor)
885 /* Ok, we can drop one, so let's do so. */
886 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
887 transaction_delete_job(m, d, true);
894 static int transaction_merge_jobs(Manager *m, DBusError *e) {
901 /* First step, check whether any of the jobs for one specific
902 * task conflict. If so, try to drop one of them. */
903 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
908 LIST_FOREACH(transaction, k, j->transaction_next) {
909 if (job_type_merge(&t, k->type) >= 0)
912 /* OK, we could not merge all jobs for this
913 * action. Let's see if we can get rid of one
916 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
917 /* Ok, we managed to drop one, now
918 * let's ask our callers to call us
919 * again after garbage collecting */
922 /* We couldn't merge anything. Failure */
923 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
924 job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
929 /* Second step, merge the jobs. */
930 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
934 /* Merge all transactions */
935 LIST_FOREACH(transaction, k, j->transaction_next)
936 assert_se(job_type_merge(&t, k->type) == 0);
938 /* If an active job is mergeable, merge it too */
940 job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
942 while ((k = j->transaction_next)) {
944 transaction_merge_and_delete_job(m, k, j, t);
947 transaction_merge_and_delete_job(m, j, k, t);
950 if (j->unit->job && !j->installed)
951 transaction_merge_and_delete_job(m, j, j->unit->job, t);
953 assert(!j->transaction_next);
954 assert(!j->transaction_prev);
960 static void transaction_drop_redundant(Manager *m) {
965 /* Goes through the transaction and removes all jobs that are
974 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
975 bool changes_something = false;
978 LIST_FOREACH(transaction, k, j) {
980 if (!job_is_anchor(k) &&
981 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
982 (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
985 changes_something = true;
989 if (changes_something)
992 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
993 transaction_delete_job(m, j, false);
1001 static bool unit_matters_to_anchor(Unit *u, Job *j) {
1003 assert(!j->transaction_prev);
1005 /* Checks whether at least one of the jobs for this unit
1006 * matters to the anchor. */
1008 LIST_FOREACH(transaction, j, j)
1009 if (j->matters_to_anchor)
1015 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
1022 assert(!j->transaction_prev);
1024 /* Does a recursive sweep through the ordering graph, looking
1025 * for a cycle. If we find cycle we try to break it. */
1027 /* Have we seen this before? */
1028 if (j->generation == generation) {
1031 /* If the marker is NULL we have been here already and
1032 * decided the job was loop-free from here. Hence
1033 * shortcut things and return right-away. */
1037 /* So, the marker is not NULL and we already have been
1038 * here. We have a cycle. Let's try to break it. We go
1039 * backwards in our path and try to find a suitable
1040 * job to remove. We use the marker to find our way
1041 * back, since smart how we are we stored our way back
1043 log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
1046 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1048 log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
1052 !unit_matters_to_anchor(k->unit, k)) {
1053 /* Ok, we can drop this one, so let's
1058 /* Check if this in fact was the beginning of
1066 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
1067 transaction_delete_unit(m, delete->unit);
1071 log_error("Unable to break cycle");
1073 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1077 /* Make the marker point to where we come from, so that we can
1078 * find our way backwards if we want to break a cycle. We use
1079 * a special marker for the beginning: we point to
1081 j->marker = from ? from : j;
1082 j->generation = generation;
1084 /* We assume that the the dependencies are bidirectional, and
1085 * hence can ignore UNIT_AFTER */
1086 SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
1089 /* Is there a job for this unit? */
1090 if (!(o = hashmap_get(m->transaction_jobs, u)))
1092 /* Ok, there is no job for this in the
1093 * transaction, but maybe there is already one
1098 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1102 /* Ok, let's backtrack, and remember that this entry is not on
1103 * our path anymore. */
1109 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1118 /* Check if the ordering graph is cyclic. If it is, try to fix
1119 * that up by dropping one of the jobs. */
1121 g = (*generation)++;
1123 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1124 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1130 static void transaction_collect_garbage(Manager *m) {
1135 /* Drop jobs that are not required by any other job */
1143 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1144 if (j->object_list) {
1145 /* log_debug("Keeping job %s/%s because of %s/%s", */
1146 /* j->unit->id, job_type_to_string(j->type), */
1147 /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
1148 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1152 /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
1153 transaction_delete_job(m, j, true);
1161 static int transaction_is_destructive(Manager *m, DBusError *e) {
1167 /* Checks whether applying this transaction means that
1168 * existing jobs would be replaced */
1170 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1173 assert(!j->transaction_prev);
1174 assert(!j->transaction_next);
1177 j->unit->job != j &&
1178 !job_type_is_superset(j->type, j->unit->job->type)) {
1180 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1188 static void transaction_minimize_impact(Manager *m) {
1192 /* Drops all unnecessary jobs that reverse already active jobs
1193 * or that stop a running service. */
1201 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1202 LIST_FOREACH(transaction, j, j) {
1203 bool stops_running_service, changes_existing_job;
1205 /* If it matters, we shouldn't drop it */
1206 if (j->matters_to_anchor)
1209 /* Would this stop a running service?
1210 * Would this change an existing job?
1211 * If so, let's drop this entry */
1213 stops_running_service =
1214 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1216 changes_existing_job =
1218 job_type_is_conflicting(j->type, j->unit->job->type);
1220 if (!stops_running_service && !changes_existing_job)
1223 if (stops_running_service)
1224 log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
1226 if (changes_existing_job)
1227 log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
1229 /* Ok, let's get rid of this */
1230 log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
1232 transaction_delete_job(m, j, true);
1244 static int transaction_apply(Manager *m, JobMode mode) {
1249 /* Moves the transaction jobs to the set of active jobs */
1251 if (mode == JOB_ISOLATE) {
1253 /* When isolating first kill all installed jobs which
1254 * aren't part of the new transaction */
1256 HASHMAP_FOREACH(j, m->jobs, i) {
1257 assert(j->installed);
1259 if (hashmap_get(m->transaction_jobs, j->unit))
1262 /* 'j' itself is safe to remove, but if other jobs
1263 are invalidated recursively, our iterator may become
1264 invalid and we need to start over. */
1265 if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
1270 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1272 assert(!j->transaction_prev);
1273 assert(!j->transaction_next);
1278 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1282 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1284 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
1289 job_free(j->unit->job);
1292 j->installed = true;
1293 m->n_installed_jobs ++;
1295 /* We're fully installed. Now let's free data we don't
1298 assert(!j->transaction_next);
1299 assert(!j->transaction_prev);
1301 job_add_to_run_queue(j);
1302 job_add_to_dbus_queue(j);
1305 log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
1308 /* As last step, kill all remaining job dependencies. */
1309 transaction_clean_dependencies(m);
1315 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1319 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1325 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1327 unsigned generation = 1;
1331 /* This applies the changes recorded in transaction_jobs to
1332 * the actual list of jobs, if possible. */
1334 /* First step: figure out which jobs matter */
1335 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1337 /* Second step: Try not to stop any running services if
1338 * we don't have to. Don't try to reverse running
1339 * jobs if we don't have to. */
1340 if (mode == JOB_FAIL)
1341 transaction_minimize_impact(m);
1343 /* Third step: Drop redundant jobs */
1344 transaction_drop_redundant(m);
1347 /* Fourth step: Let's remove unneeded jobs that might
1349 if (mode != JOB_ISOLATE)
1350 transaction_collect_garbage(m);
1352 /* Fifth step: verify order makes sense and correct
1353 * cycles if necessary and possible */
1354 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1358 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1362 /* Let's see if the resulting transaction ordering
1363 * graph is still cyclic... */
1367 /* Sixth step: let's drop unmergeable entries if
1368 * necessary and possible, merge entries we can
1370 if ((r = transaction_merge_jobs(m, e)) >= 0)
1374 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1378 /* Seventh step: an entry got dropped, let's garbage
1379 * collect its dependencies. */
1380 if (mode != JOB_ISOLATE)
1381 transaction_collect_garbage(m);
1383 /* Let's see if the resulting transaction still has
1384 * unmergeable entries ... */
1387 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1388 transaction_drop_redundant(m);
1390 /* Ninth step: check whether we can actually apply this */
1391 if (mode == JOB_FAIL)
1392 if ((r = transaction_is_destructive(m, e)) < 0) {
1393 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1397 /* Tenth step: apply changes */
1398 if ((r = transaction_apply(m, mode)) < 0) {
1399 log_warning("Failed to apply transaction: %s", strerror(-r));
1403 assert(hashmap_isempty(m->transaction_jobs));
1404 assert(!m->transaction_anchor);
1409 transaction_abort(m);
1413 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1419 /* Looks for an existing prospective job and returns that. If
1420 * it doesn't exist it is created and added to the prospective
1423 f = hashmap_get(m->transaction_jobs, unit);
1425 LIST_FOREACH(transaction, j, f) {
1426 assert(j->unit == unit);
1428 if (j->type == type) {
1435 if (unit->job && unit->job->type == type)
1437 else if (!(j = job_new(m, type, unit)))
1442 j->matters_to_anchor = false;
1443 j->override = override;
1445 LIST_PREPEND(Job, transaction, f, j);
1447 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1448 LIST_REMOVE(Job, transaction, f, j);
1456 /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
1461 static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1465 if (j->transaction_prev)
1466 j->transaction_prev->transaction_next = j->transaction_next;
1467 else if (j->transaction_next)
1468 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1470 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1472 if (j->transaction_next)
1473 j->transaction_next->transaction_prev = j->transaction_prev;
1475 j->transaction_prev = j->transaction_next = NULL;
1477 while (j->subject_list)
1478 job_dependency_free(j->subject_list);
1480 while (j->object_list) {
1481 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1483 job_dependency_free(j->object_list);
1485 if (other && delete_dependencies) {
1486 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1487 other->unit->id, job_type_to_string(other->type),
1488 j->unit->id, job_type_to_string(j->type));
1489 transaction_delete_job(m, other, delete_dependencies);
1494 static int transaction_add_job_and_dependencies(
1502 bool ignore_requirements,
1513 assert(type < _JOB_TYPE_MAX);
1516 /* log_debug("Pulling in %s/%s from %s/%s", */
1517 /* unit->id, job_type_to_string(type), */
1518 /* by ? by->unit->id : "NA", */
1519 /* by ? job_type_to_string(by->type) : "NA"); */
1521 if (unit->load_state != UNIT_LOADED &&
1522 unit->load_state != UNIT_ERROR &&
1523 unit->load_state != UNIT_MASKED) {
1524 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
1528 if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
1529 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1530 "Unit %s failed to load: %s. "
1531 "See system logs and 'systemctl status %s' for details.",
1533 strerror(-unit->load_error),
1538 if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
1539 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
1543 if (!unit_job_is_applicable(unit, type)) {
1544 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
1548 /* First add the job. */
1549 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1552 ret->ignore_order = ret->ignore_order || ignore_order;
1554 /* Then, add a link to the job. */
1555 if (!job_dependency_new(by, ret, matters, conflicts))
1558 if (is_new && !ignore_requirements) {
1561 /* If we are following some other unit, make sure we
1562 * add all dependencies of everybody following. */
1563 if (unit_following_set(ret->unit, &following) > 0) {
1564 SET_FOREACH(dep, following, i)
1565 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1566 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1572 set_free(following);
1575 /* Finally, recursively add in all dependencies. */
1576 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1577 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i)
1578 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1586 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i)
1587 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1596 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1597 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1598 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1604 SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i)
1605 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1606 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1612 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i)
1613 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1622 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1623 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1624 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1630 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i)
1631 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1640 SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i)
1641 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1642 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1650 if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1652 SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i)
1653 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1662 SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i)
1663 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1673 if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
1675 SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
1676 r = transaction_add_job_and_dependencies(m, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
1679 log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
1687 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1699 static int transaction_add_isolate_jobs(Manager *m) {
1707 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1709 /* ignore aliases */
1713 if (u->ignore_on_isolate)
1716 /* No need to stop inactive jobs */
1717 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
1720 /* Is there already something listed for this? */
1721 if (hashmap_get(m->transaction_jobs, u))
1724 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1725 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
1731 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1736 assert(type < _JOB_TYPE_MAX);
1738 assert(mode < _JOB_MODE_MAX);
1740 if (mode == JOB_ISOLATE && type != JOB_START) {
1741 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1745 if (mode == JOB_ISOLATE && !unit->allow_isolate) {
1746 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1750 log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1752 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1753 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1754 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1755 transaction_abort(m);
1759 if (mode == JOB_ISOLATE)
1760 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1761 transaction_abort(m);
1765 if ((r = transaction_activate(m, mode, e)) < 0)
1768 log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
1776 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1781 assert(type < _JOB_TYPE_MAX);
1783 assert(mode < _JOB_MODE_MAX);
1785 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1788 return manager_add_job(m, type, unit, mode, override, e, _ret);
1791 Job *manager_get_job(Manager *m, uint32_t id) {
1794 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1797 Unit *manager_get_unit(Manager *m, const char *name) {
1801 return hashmap_get(m->units, name);
1804 unsigned manager_dispatch_load_queue(Manager *m) {
1810 /* Make sure we are not run recursively */
1811 if (m->dispatching_load_queue)
1814 m->dispatching_load_queue = true;
1816 /* Dispatches the load queue. Takes a unit from the queue and
1817 * tries to load its data until the queue is empty */
1819 while ((u = m->load_queue)) {
1820 assert(u->in_load_queue);
1826 m->dispatching_load_queue = false;
1830 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1836 assert(name || path);
1838 /* This will prepare the unit for loading, but not actually
1839 * load anything from disk. */
1841 if (path && !is_path(path)) {
1842 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1847 name = file_name_from_path(path);
1849 t = unit_name_to_type(name);
1851 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid_no_type(name, false)) {
1852 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1856 ret = manager_get_unit(m, name);
1862 ret = unit_new(m, unit_vtable[t]->object_size);
1867 ret->fragment_path = strdup(path);
1868 if (!ret->fragment_path) {
1874 if ((r = unit_add_name(ret, name)) < 0) {
1879 unit_add_to_load_queue(ret);
1880 unit_add_to_dbus_queue(ret);
1881 unit_add_to_gc_queue(ret);
1889 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1894 /* This will load the service information files, but not actually
1895 * start any services or anything. */
1897 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1900 manager_dispatch_load_queue(m);
1903 *_ret = unit_follow_merge(*_ret);
1908 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1915 HASHMAP_FOREACH(j, s->jobs, i)
1916 job_dump(j, f, prefix);
1919 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1927 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1929 unit_dump(u, f, prefix);
1932 void manager_clear_jobs(Manager *m) {
1937 transaction_abort(m);
1939 while ((j = hashmap_first(m->jobs)))
1940 job_finish_and_invalidate(j, JOB_CANCELED);
1943 unsigned manager_dispatch_run_queue(Manager *m) {
1947 if (m->dispatching_run_queue)
1950 m->dispatching_run_queue = true;
1952 while ((j = m->run_queue)) {
1953 assert(j->installed);
1954 assert(j->in_run_queue);
1956 job_run_and_invalidate(j);
1960 m->dispatching_run_queue = false;
1964 unsigned manager_dispatch_dbus_queue(Manager *m) {
1971 if (m->dispatching_dbus_queue)
1974 m->dispatching_dbus_queue = true;
1976 while ((u = m->dbus_unit_queue)) {
1977 assert(u->in_dbus_queue);
1979 bus_unit_send_change_signal(u);
1983 while ((j = m->dbus_job_queue)) {
1984 assert(j->in_dbus_queue);
1986 bus_job_send_change_signal(j);
1990 m->dispatching_dbus_queue = false;
1994 static int manager_process_notify_fd(Manager *m) {
2001 struct msghdr msghdr;
2003 struct ucred *ucred;
2005 struct cmsghdr cmsghdr;
2006 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
2012 iovec.iov_base = buf;
2013 iovec.iov_len = sizeof(buf)-1;
2017 msghdr.msg_iov = &iovec;
2018 msghdr.msg_iovlen = 1;
2019 msghdr.msg_control = &control;
2020 msghdr.msg_controllen = sizeof(control);
2022 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
2026 if (errno == EAGAIN || errno == EINTR)
2032 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
2033 control.cmsghdr.cmsg_level != SOL_SOCKET ||
2034 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
2035 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
2036 log_warning("Received notify message without credentials. Ignoring.");
2040 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
2042 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
2043 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
2044 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
2048 assert((size_t) n < sizeof(buf));
2050 if (!(tags = strv_split(buf, "\n\r")))
2053 log_debug("Got notification message for unit %s", u->id);
2055 if (UNIT_VTABLE(u)->notify_message)
2056 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
2064 static int manager_dispatch_sigchld(Manager *m) {
2074 /* First we call waitd() for a PID and do not reap the
2075 * zombie. That way we can still access /proc/$PID for
2076 * it while it is a zombie. */
2077 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2079 if (errno == ECHILD)
2091 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2094 get_process_comm(si.si_pid, &name);
2095 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2099 /* Let's flush any message the dying child might still
2100 * have queued for us. This ensures that the process
2101 * still exists in /proc so that we can figure out
2102 * which cgroup and hence unit it belongs to. */
2103 if ((r = manager_process_notify_fd(m)) < 0)
2106 /* And now figure out the unit this belongs to */
2107 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2108 u = cgroup_unit_by_pid(m, si.si_pid);
2110 /* And now, we actually reap the zombie. */
2111 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2118 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2121 log_debug("Child %lu died (code=%s, status=%i/%s)",
2122 (long unsigned) si.si_pid,
2123 sigchld_code_to_string(si.si_code),
2125 strna(si.si_code == CLD_EXITED
2126 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2127 : signal_to_string(si.si_status)));
2132 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->id);
2134 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2135 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2141 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2145 dbus_error_init(&error);
2147 log_debug("Activating special unit %s", name);
2149 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2150 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2152 dbus_error_free(&error);
2157 static int manager_process_signal_fd(Manager *m) {
2159 struct signalfd_siginfo sfsi;
2160 bool sigchld = false;
2165 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2170 if (errno == EINTR || errno == EAGAIN)
2176 if (sfsi.ssi_pid > 0) {
2179 get_process_comm(sfsi.ssi_pid, &p);
2181 log_debug("Received SIG%s from PID %lu (%s).",
2182 signal_to_string(sfsi.ssi_signo),
2183 (unsigned long) sfsi.ssi_pid, strna(p));
2186 log_debug("Received SIG%s.", signal_to_string(sfsi.ssi_signo));
2188 switch (sfsi.ssi_signo) {
2195 if (m->running_as == MANAGER_SYSTEM) {
2196 /* This is for compatibility with the
2197 * original sysvinit */
2198 m->exit_code = MANAGER_REEXECUTE;
2205 if (m->running_as == MANAGER_SYSTEM) {
2206 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2210 /* Run the exit target if there is one, if not, just exit. */
2211 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2212 m->exit_code = MANAGER_EXIT;
2219 if (m->running_as == MANAGER_SYSTEM)
2220 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2222 /* This is a nop on non-init */
2226 if (m->running_as == MANAGER_SYSTEM)
2227 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2229 /* This is a nop on non-init */
2235 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2237 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2238 log_info("Trying to reconnect to bus...");
2242 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2243 log_info("Loading D-Bus service...");
2244 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2255 if (!(f = open_memstream(&dump, &size))) {
2256 log_warning("Failed to allocate memory stream.");
2260 manager_dump_units(m, f, "\t");
2261 manager_dump_jobs(m, f, "\t");
2266 log_warning("Failed to write status stream");
2271 log_dump(LOG_INFO, dump);
2278 m->exit_code = MANAGER_RELOAD;
2283 /* Starting SIGRTMIN+0 */
2284 static const char * const target_table[] = {
2285 [0] = SPECIAL_DEFAULT_TARGET,
2286 [1] = SPECIAL_RESCUE_TARGET,
2287 [2] = SPECIAL_EMERGENCY_TARGET,
2288 [3] = SPECIAL_HALT_TARGET,
2289 [4] = SPECIAL_POWEROFF_TARGET,
2290 [5] = SPECIAL_REBOOT_TARGET,
2291 [6] = SPECIAL_KEXEC_TARGET
2294 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2295 static const ManagerExitCode code_table[] = {
2297 [1] = MANAGER_POWEROFF,
2298 [2] = MANAGER_REBOOT,
2302 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2303 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2304 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2305 manager_start_target(m, target_table[idx],
2306 (idx == 1 || idx == 2) ? JOB_ISOLATE : JOB_REPLACE);
2310 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2311 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2312 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2316 switch (sfsi.ssi_signo - SIGRTMIN) {
2319 log_debug("Enabling showing of status.");
2320 manager_set_show_status(m, true);
2324 log_debug("Disabling showing of status.");
2325 manager_set_show_status(m, false);
2329 log_set_max_level(LOG_DEBUG);
2330 log_notice("Setting log level to debug.");
2334 log_set_max_level(LOG_INFO);
2335 log_notice("Setting log level to info.");
2339 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2340 log_notice("Setting log target to journal-or-kmsg.");
2344 log_set_target(LOG_TARGET_CONSOLE);
2345 log_notice("Setting log target to console.");
2349 log_set_target(LOG_TARGET_KMSG);
2350 log_notice("Setting log target to kmsg.");
2354 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2355 log_notice("Setting log target to syslog-or-kmsg.");
2359 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2366 return manager_dispatch_sigchld(m);
2371 static int process_event(Manager *m, struct epoll_event *ev) {
2378 assert_se(w = ev->data.ptr);
2380 if (w->type == WATCH_INVALID)
2387 /* An incoming signal? */
2388 if (ev->events != EPOLLIN)
2391 if ((r = manager_process_signal_fd(m)) < 0)
2398 /* An incoming daemon notification event? */
2399 if (ev->events != EPOLLIN)
2402 if ((r = manager_process_notify_fd(m)) < 0)
2409 /* Some fd event, to be dispatched to the units */
2410 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2413 case WATCH_UNIT_TIMER:
2414 case WATCH_JOB_TIMER: {
2418 /* Some timer event, to be dispatched to the units */
2419 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2421 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2424 return k < 0 ? -errno : -EIO;
2427 if (w->type == WATCH_UNIT_TIMER)
2428 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2430 job_timer_event(w->data.job, v, w);
2435 /* Some mount table change, intended for the mount subsystem */
2436 mount_fd_event(m, ev->events);
2440 /* Some swap table change, intended for the swap subsystem */
2441 swap_fd_event(m, ev->events);
2445 /* Some notification from udev, intended for the device subsystem */
2446 device_fd_event(m, ev->events);
2449 case WATCH_DBUS_WATCH:
2450 bus_watch_event(m, w, ev->events);
2453 case WATCH_DBUS_TIMEOUT:
2454 bus_timeout_event(m, w, ev->events);
2458 log_error("event type=%i", w->type);
2459 assert_not_reached("Unknown epoll event type.");
2465 int manager_loop(Manager *m) {
2468 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2471 m->exit_code = MANAGER_RUNNING;
2473 /* Release the path cache */
2474 set_free_free(m->unit_path_cache);
2475 m->unit_path_cache = NULL;
2477 manager_check_finished(m);
2479 /* There might still be some zombies hanging around from
2480 * before we were exec()'ed. Leat's reap them */
2481 r = manager_dispatch_sigchld(m);
2485 while (m->exit_code == MANAGER_RUNNING) {
2486 struct epoll_event event;
2490 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM)
2493 if (!ratelimit_test(&rl)) {
2494 /* Yay, something is going seriously wrong, pause a little */
2495 log_warning("Looping too fast. Throttling execution a little.");
2500 if (manager_dispatch_load_queue(m) > 0)
2503 if (manager_dispatch_run_queue(m) > 0)
2506 if (bus_dispatch(m) > 0)
2509 if (manager_dispatch_cleanup_queue(m) > 0)
2512 if (manager_dispatch_gc_queue(m) > 0)
2515 if (manager_dispatch_dbus_queue(m) > 0)
2518 if (swap_dispatch_reload(m) > 0)
2521 /* Sleep for half the watchdog time */
2522 if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) {
2523 wait_msec = (int) (m->runtime_watchdog / 2 / USEC_PER_MSEC);
2529 n = epoll_wait(m->epoll_fd, &event, 1, wait_msec);
2541 r = process_event(m, &event);
2546 return m->exit_code;
2549 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2557 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2560 if (!(n = bus_path_unescape(s+31)))
2563 u = manager_get_unit(m, n);
2574 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2583 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2586 if ((r = safe_atou(s + 30, &id)) < 0)
2589 if (!(j = manager_get_job(m, id)))
2597 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2602 if (m->audit_fd < 0)
2605 /* Don't generate audit events if the service was already
2606 * started and we're just deserializing */
2607 if (m->n_reloading > 0)
2610 if (m->running_as != MANAGER_SYSTEM)
2613 if (u->type != UNIT_SERVICE)
2616 if (!(p = unit_name_to_prefix_and_instance(u->id))) {
2617 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2621 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2622 if (errno == EPERM) {
2623 /* We aren't allowed to send audit messages?
2624 * Then let's not retry again. */
2625 audit_close(m->audit_fd);
2628 log_warning("Failed to send audit message: %m");
2636 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2638 union sockaddr_union sa;
2640 char *message = NULL;
2642 /* Don't generate plymouth events if the service was already
2643 * started and we're just deserializing */
2644 if (m->n_reloading > 0)
2647 if (m->running_as != MANAGER_SYSTEM)
2650 if (u->type != UNIT_SERVICE &&
2651 u->type != UNIT_MOUNT &&
2652 u->type != UNIT_SWAP)
2655 /* We set SOCK_NONBLOCK here so that we rather drop the
2656 * message then wait for plymouth */
2657 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2658 log_error("socket() failed: %m");
2663 sa.sa.sa_family = AF_UNIX;
2664 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2665 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2667 if (errno != EPIPE &&
2670 errno != ECONNREFUSED &&
2671 errno != ECONNRESET &&
2672 errno != ECONNABORTED)
2673 log_error("connect() failed: %m");
2678 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2679 log_error("Out of memory");
2684 if (write(fd, message, n + 1) != n + 1) {
2686 if (errno != EPIPE &&
2689 errno != ECONNREFUSED &&
2690 errno != ECONNRESET &&
2691 errno != ECONNABORTED)
2692 log_error("Failed to write Plymouth message: %m");
2699 close_nointr_nofail(fd);
2704 void manager_dispatch_bus_name_owner_changed(
2707 const char* old_owner,
2708 const char *new_owner) {
2715 if (!(u = hashmap_get(m->watch_bus, name)))
2718 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2721 void manager_dispatch_bus_query_pid_done(
2732 if (!(u = hashmap_get(m->watch_bus, name)))
2735 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2738 int manager_open_serialization(Manager *m, FILE **_f) {
2746 if (m->running_as == MANAGER_SYSTEM)
2747 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2749 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2754 saved_umask = umask(0077);
2755 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2765 log_debug("Serializing state to %s", path);
2768 if (!(f = fdopen(fd, "w+")))
2776 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2788 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2789 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2791 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2792 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2793 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2797 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2801 if (!unit_can_serialize(u))
2808 if ((r = unit_serialize(u, f, fds)) < 0) {
2814 assert(m->n_reloading > 0);
2820 r = bus_fdset_add_all(m, fds);
2827 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2833 log_debug("Deserializing state...");
2838 char line[LINE_MAX], *l;
2840 if (!fgets(line, sizeof(line), f)) {
2855 if (startswith(l, "current-job-id=")) {
2858 if (safe_atou32(l+15, &id) < 0)
2859 log_debug("Failed to parse current job id value %s", l+15);
2861 m->current_job_id = MAX(m->current_job_id, id);
2862 } else if (startswith(l, "taint-usr=")) {
2865 if ((b = parse_boolean(l+10)) < 0)
2866 log_debug("Failed to parse taint /usr flag %s", l+10);
2868 m->taint_usr = m->taint_usr || b;
2869 } else if (startswith(l, "initrd-timestamp="))
2870 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2871 else if (startswith(l, "startup-timestamp="))
2872 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2873 else if (startswith(l, "finish-timestamp="))
2874 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2876 log_debug("Unknown serialization item '%s'", l);
2881 char name[UNIT_NAME_MAX+2];
2884 if (!fgets(name, sizeof(name), f)) {
2895 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2898 if ((r = unit_deserialize(u, f, fds)) < 0)
2908 assert(m->n_reloading > 0);
2914 int manager_reload(Manager *m) {
2921 if ((r = manager_open_serialization(m, &f)) < 0)
2926 if (!(fds = fdset_new())) {
2932 if ((r = manager_serialize(m, f, fds)) < 0) {
2937 if (fseeko(f, 0, SEEK_SET) < 0) {
2943 /* From here on there is no way back. */
2944 manager_clear_jobs_and_units(m);
2945 manager_undo_generators(m);
2947 /* Find new unit paths */
2948 lookup_paths_free(&m->lookup_paths);
2949 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2952 manager_run_generators(m);
2954 manager_build_unit_path_cache(m);
2956 /* First, enumerate what we can from all config files */
2957 if ((q = manager_enumerate(m)) < 0)
2960 /* Second, deserialize our stored data */
2961 if ((q = manager_deserialize(m, f, fds)) < 0)
2967 /* Third, fire things up! */
2968 if ((q = manager_coldplug(m)) < 0)
2971 assert(m->n_reloading > 0);
2984 bool manager_is_booting_or_shutting_down(Manager *m) {
2989 /* Is the initial job still around? */
2990 if (manager_get_job(m, m->default_unit_job_id))
2993 /* Is there a job for the shutdown target? */
2994 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
3001 void manager_reset_failed(Manager *m) {
3007 HASHMAP_FOREACH(u, m->units, i)
3008 unit_reset_failed(u);
3011 bool manager_unit_pending_inactive(Manager *m, const char *name) {
3017 /* Returns true if the unit is inactive or going down */
3018 if (!(u = manager_get_unit(m, name)))
3021 return unit_pending_inactive(u);
3024 void manager_check_finished(Manager *m) {
3025 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3026 usec_t kernel_usec, initrd_usec, userspace_usec, total_usec;
3030 if (dual_timestamp_is_set(&m->finish_timestamp))
3033 if (hashmap_size(m->jobs) > 0)
3036 dual_timestamp_get(&m->finish_timestamp);
3038 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
3040 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3041 total_usec = m->finish_timestamp.monotonic;
3043 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
3045 kernel_usec = m->initrd_timestamp.monotonic;
3046 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
3048 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
3049 format_timespan(kernel, sizeof(kernel), kernel_usec),
3050 format_timespan(initrd, sizeof(initrd), initrd_usec),
3051 format_timespan(userspace, sizeof(userspace), userspace_usec),
3052 format_timespan(sum, sizeof(sum), total_usec));
3054 kernel_usec = m->startup_timestamp.monotonic;
3057 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
3058 format_timespan(kernel, sizeof(kernel), kernel_usec),
3059 format_timespan(userspace, sizeof(userspace), userspace_usec),
3060 format_timespan(sum, sizeof(sum), total_usec));
3063 userspace_usec = initrd_usec = kernel_usec = 0;
3064 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
3066 log_debug("Startup finished in %s.",
3067 format_timespan(sum, sizeof(sum), total_usec));
3070 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
3073 "READY=1\nSTATUS=Startup finished in %s.",
3074 format_timespan(sum, sizeof(sum), total_usec));
3077 void manager_run_generators(Manager *m) {
3079 const char *generator_path;
3080 const char *argv[3];
3085 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
3086 if (!(d = opendir(generator_path))) {
3088 if (errno == ENOENT)
3091 log_error("Failed to enumerate generator directory: %m");
3095 if (!m->generator_unit_path) {
3097 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3099 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3100 p = "/run/systemd/generator";
3102 if (mkdir_p(p, 0755) < 0) {
3103 log_error("Failed to create generator directory: %m");
3108 if (!(p = mkdtemp(user_path))) {
3109 log_error("Failed to create generator directory: %m");
3114 if (!(m->generator_unit_path = strdup(p))) {
3115 log_error("Failed to allocate generator unit path.");
3120 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3121 argv[1] = m->generator_unit_path;
3125 execute_directory(generator_path, d, (char**) argv);
3128 if (rmdir(m->generator_unit_path) >= 0) {
3129 /* Uh? we were able to remove this dir? I guess that
3130 * means the directory was empty, hence let's shortcut
3133 free(m->generator_unit_path);
3134 m->generator_unit_path = NULL;
3138 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3141 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3142 log_error("Failed to add generator directory to unit search path: %m");
3146 strv_free(m->lookup_paths.unit_path);
3147 m->lookup_paths.unit_path = l;
3149 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3157 void manager_undo_generators(Manager *m) {
3160 if (!m->generator_unit_path)
3163 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3164 rm_rf(m->generator_unit_path, false, true, false);
3166 free(m->generator_unit_path);
3167 m->generator_unit_path = NULL;
3170 int manager_set_default_controllers(Manager *m, char **controllers) {
3175 l = strv_copy(controllers);
3179 strv_free(m->default_controllers);
3180 m->default_controllers = l;
3182 cg_shorten_controllers(m->default_controllers);
3187 void manager_recheck_journal(Manager *m) {
3192 if (m->running_as != MANAGER_SYSTEM)
3195 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3196 if (u && SOCKET(u)->state != SOCKET_RUNNING) {
3197 log_close_journal();
3201 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3202 if (u && SERVICE(u)->state != SERVICE_RUNNING) {
3203 log_close_journal();
3207 /* Hmm, OK, so the socket is fully up and the service is up
3208 * too, then let's make use of the thing. */
3212 void manager_set_show_status(Manager *m, bool b) {
3215 if (m->running_as != MANAGER_SYSTEM)
3221 touch("/run/systemd/show-status");
3223 unlink("/run/systemd/show-status");
3226 bool manager_get_show_status(Manager *m) {
3229 if (m->running_as != MANAGER_SYSTEM)
3235 /* If Plymouth is running make sure we show the status, so
3236 * that there's something nice to see when people press Esc */
3238 return plymouth_running();
3241 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3242 [MANAGER_SYSTEM] = "system",
3243 [MANAGER_USER] = "user"
3246 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);