X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fcore%2Fmanager.c;h=3cd99154e698ae7e2be533c01f0cf53bb0f64414;hp=95056296ad21d9f4949136d3d135dfb5cb82fcf1;hb=67445f4e22ad924394acdd4fd49e6f238244a5ca;hpb=153bda8f03c670caf137f745350c0215b9be2147 diff --git a/src/core/manager.c b/src/core/manager.c index 95056296a..3cd99154e 100644 --- a/src/core/manager.c +++ b/src/core/manager.c @@ -41,9 +41,12 @@ #include #endif -#include +#include "systemd/sd-daemon.h" +#include "systemd/sd-id128.h" +#include "systemd/sd-messages.h" #include "manager.h" +#include "transaction.h" #include "hashmap.h" #include "macro.h" #include "strv.h" @@ -64,6 +67,7 @@ #include "virt.h" #include "watchdog.h" #include "cgroup-util.h" +#include "path-util.h" /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */ #define GC_QUEUE_ENTRIES_MAX 16 @@ -72,8 +76,7 @@ #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC) /* Where clients shall send notification messages to */ -#define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify" -#define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify" +#define NOTIFY_SOCKET "@/org/freedesktop/systemd1/notify" static int manager_setup_notify(Manager *m) { union { @@ -81,13 +84,13 @@ static int manager_setup_notify(Manager *m) { struct sockaddr_un un; } sa; struct epoll_event ev; - int one = 1, r; - mode_t u; + int one = 1; assert(m); m->notify_watch.type = WATCH_NOTIFY; - if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) { + m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0); + if (m->notify_watch.fd < 0) { log_error("Failed to allocate notification socket: %m"); return -errno; } @@ -95,21 +98,14 @@ static int manager_setup_notify(Manager *m) { zero(sa); sa.sa.sa_family = AF_UNIX; - if (getpid() != 1) - snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull()); - else { - unlink(NOTIFY_SOCKET_SYSTEM); - strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path)); - } - - if (sa.un.sun_path[0] == '@') - sa.un.sun_path[0] = 0; + if (getpid() != 1 || detect_container(NULL) > 0) + snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET "/%llu", random_ull()); + else + strncpy(sa.un.sun_path, NOTIFY_SOCKET, sizeof(sa.un.sun_path)); - u = umask(0111); - r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)); - umask(u); + sa.un.sun_path[0] = 0; - if (r < 0) { + if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) { log_error("bind() failed: %m"); return -errno; } @@ -126,10 +122,9 @@ static int manager_setup_notify(Manager *m) { if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0) return -errno; - if (sa.un.sun_path[0] == 0) - sa.un.sun_path[0] = '@'; - - if (!(m->notify_socket = strdup(sa.un.sun_path))) + sa.un.sun_path[0] = '@'; + m->notify_socket = strdup(sa.un.sun_path); + if (!m->notify_socket) return -ENOMEM; log_debug("Using notification socket %s", m->notify_socket); @@ -143,8 +138,9 @@ static int enable_special_signals(Manager *m) { assert(m); /* Enable that we get SIGINT on control-alt-del. In containers - * this will fail with EPERM, so ignore that. */ - if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM) + * this will fail with EPERM (older) or EINVAL (newer), so + * ignore that. */ + if (reboot(RB_DISABLE_CAD) < 0 && errno != EPERM && errno != EINVAL) log_warning("Failed to enable ctrl-alt-del handling: %m"); fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC); @@ -220,7 +216,7 @@ static int manager_setup_signals(Manager *m) { if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0) return -errno; - if (m->running_as == MANAGER_SYSTEM) + if (m->running_as == SYSTEMD_SYSTEM) return enable_special_signals(m); return 0; @@ -241,23 +237,25 @@ static void manager_strip_environment(Manager *m) { strv_remove_prefix(m->environment, "RD_"); } -int manager_new(ManagerRunningAs running_as, Manager **_m) { +int manager_new(SystemdRunningAs running_as, Manager **_m) { Manager *m; int r = -ENOMEM; assert(_m); assert(running_as >= 0); - assert(running_as < _MANAGER_RUNNING_AS_MAX); + assert(running_as < _SYSTEMD_RUNNING_AS_MAX); - if (!(m = new0(Manager, 1))) + m = new0(Manager, 1); + if (!m) return -ENOMEM; - dual_timestamp_get(&m->startup_timestamp); + dual_timestamp_get(&m->userspace_timestamp); m->running_as = running_as; m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1; m->exit_code = _MANAGER_EXIT_CODE_INVALID; m->pin_cgroupfs_fd = -1; + m->idle_pipe[0] = m->idle_pipe[1] = -1; #ifdef HAVE_AUDIT m->audit_fd = -1; @@ -272,7 +270,7 @@ int manager_new(ManagerRunningAs running_as, Manager **_m) { manager_strip_environment(m); - if (running_as == MANAGER_SYSTEM) { + if (running_as == SYSTEMD_SYSTEM) { m->default_controllers = strv_new("cpu", NULL); if (!m->default_controllers) goto fail; @@ -284,9 +282,6 @@ int manager_new(ManagerRunningAs running_as, Manager **_m) { if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func))) goto fail; - if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func))) - goto fail; - if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func))) goto fail; @@ -299,9 +294,6 @@ int manager_new(ManagerRunningAs running_as, Manager **_m) { if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0) goto fail; - if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0) - goto fail; - if ((r = manager_setup_signals(m)) < 0) goto fail; @@ -312,7 +304,7 @@ int manager_new(ManagerRunningAs running_as, Manager **_m) { goto fail; /* Try to connect to the busses, if possible. */ - if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0) + if ((r = bus_init(m, running_as != SYSTEMD_SYSTEM)) < 0) goto fail; #ifdef HAVE_AUDIT @@ -454,14 +446,10 @@ static unsigned manager_dispatch_gc_queue(Manager *m) { } static void manager_clear_jobs_and_units(Manager *m) { - Job *j; Unit *u; assert(m); - while ((j = hashmap_first(m->transaction_jobs))) - job_free(j); - while ((u = hashmap_first(m->units))) unit_free(u); @@ -474,13 +462,13 @@ static void manager_clear_jobs_and_units(Manager *m) { assert(!m->cleanup_queue); assert(!m->gc_queue); - assert(hashmap_isempty(m->transaction_jobs)); assert(hashmap_isempty(m->jobs)); assert(hashmap_isempty(m->units)); } void manager_free(Manager *m) { UnitType c; + int i; assert(m); @@ -498,1234 +486,191 @@ void manager_free(Manager *m) { bus_done(m); - hashmap_free(m->units); - hashmap_free(m->jobs); - hashmap_free(m->transaction_jobs); - hashmap_free(m->watch_pids); - hashmap_free(m->watch_bus); - - if (m->epoll_fd >= 0) - close_nointr_nofail(m->epoll_fd); - if (m->signal_watch.fd >= 0) - close_nointr_nofail(m->signal_watch.fd); - if (m->notify_watch.fd >= 0) - close_nointr_nofail(m->notify_watch.fd); - -#ifdef HAVE_AUDIT - if (m->audit_fd >= 0) - audit_close(m->audit_fd); -#endif - - free(m->notify_socket); - - lookup_paths_free(&m->lookup_paths); - strv_free(m->environment); - - strv_free(m->default_controllers); - - hashmap_free(m->cgroup_bondings); - set_free_free(m->unit_path_cache); - - free(m); -} - -int manager_enumerate(Manager *m) { - int r = 0, q; - UnitType c; - - assert(m); - - /* Let's ask every type to load all units from disk/kernel - * that it might know */ - for (c = 0; c < _UNIT_TYPE_MAX; c++) - if (unit_vtable[c]->enumerate) - if ((q = unit_vtable[c]->enumerate(m)) < 0) - r = q; - - manager_dispatch_load_queue(m); - return r; -} - -int manager_coldplug(Manager *m) { - int r = 0, q; - Iterator i; - Unit *u; - char *k; - - assert(m); - - /* Then, let's set up their initial state. */ - HASHMAP_FOREACH_KEY(u, k, m->units, i) { - - /* ignore aliases */ - if (u->id != k) - continue; - - if ((q = unit_coldplug(u)) < 0) - r = q; - } - - return r; -} - -static void manager_build_unit_path_cache(Manager *m) { - char **i; - DIR *d = NULL; - int r; - - assert(m); - - set_free_free(m->unit_path_cache); - - if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) { - log_error("Failed to allocate unit path cache."); - return; - } - - /* This simply builds a list of files we know exist, so that - * we don't always have to go to disk */ - - STRV_FOREACH(i, m->lookup_paths.unit_path) { - struct dirent *de; - - if (!(d = opendir(*i))) { - log_error("Failed to open directory: %m"); - continue; - } - - while ((de = readdir(d))) { - char *p; - - if (ignore_file(de->d_name)) - continue; - - p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL); - if (!p) { - r = -ENOMEM; - goto fail; - } - - if ((r = set_put(m->unit_path_cache, p)) < 0) { - free(p); - goto fail; - } - } - - closedir(d); - d = NULL; - } - - return; - -fail: - log_error("Failed to build unit path cache: %s", strerror(-r)); - - set_free_free(m->unit_path_cache); - m->unit_path_cache = NULL; - - if (d) - closedir(d); -} - -int manager_startup(Manager *m, FILE *serialization, FDSet *fds) { - int r, q; - - assert(m); - - manager_run_generators(m); - - manager_build_unit_path_cache(m); - - /* If we will deserialize make sure that during enumeration - * this is already known, so we increase the counter here - * already */ - if (serialization) - m->n_reloading ++; - - /* First, enumerate what we can from all config files */ - r = manager_enumerate(m); - - /* Second, deserialize if there is something to deserialize */ - if (serialization) - if ((q = manager_deserialize(m, serialization, fds)) < 0) - r = q; - - /* Third, fire things up! */ - if ((q = manager_coldplug(m)) < 0) - r = q; - - if (serialization) { - assert(m->n_reloading > 0); - m->n_reloading --; - } - - return r; -} - -static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) { - assert(m); - assert(j); - - /* Deletes one job from the transaction */ - - manager_transaction_unlink_job(m, j, delete_dependencies); - - if (!j->installed) - job_free(j); -} - -static void transaction_delete_unit(Manager *m, Unit *u) { - Job *j; - - /* Deletes all jobs associated with a certain unit from the - * transaction */ - - while ((j = hashmap_get(m->transaction_jobs, u))) - transaction_delete_job(m, j, true); -} - -static void transaction_clean_dependencies(Manager *m) { - Iterator i; - Job *j; - - assert(m); - - /* Drops all dependencies of all installed jobs */ - - HASHMAP_FOREACH(j, m->jobs, i) { - while (j->subject_list) - job_dependency_free(j->subject_list); - while (j->object_list) - job_dependency_free(j->object_list); - } - - assert(!m->transaction_anchor); -} - -static void transaction_abort(Manager *m) { - Job *j; - - assert(m); - - while ((j = hashmap_first(m->transaction_jobs))) - if (j->installed) - transaction_delete_job(m, j, true); - else - job_free(j); - - assert(hashmap_isempty(m->transaction_jobs)); - - transaction_clean_dependencies(m); -} - -static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) { - JobDependency *l; - - assert(m); - - /* A recursive sweep through the graph that marks all units - * that matter to the anchor job, i.e. are directly or - * indirectly a dependency of the anchor job via paths that - * are fully marked as mattering. */ - - if (j) - l = j->subject_list; - else - l = m->transaction_anchor; - - LIST_FOREACH(subject, l, l) { - - /* This link does not matter */ - if (!l->matters) - continue; - - /* This unit has already been marked */ - if (l->object->generation == generation) - continue; - - l->object->matters_to_anchor = true; - l->object->generation = generation; - - transaction_find_jobs_that_matter_to_anchor(m, l->object, generation); - } -} - -static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) { - JobDependency *l, *last; - - assert(j); - assert(other); - assert(j->unit == other->unit); - assert(!j->installed); - - /* Merges 'other' into 'j' and then deletes 'other'. */ - - j->type = t; - j->state = JOB_WAITING; - j->override = j->override || other->override; - - j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; - - /* Patch us in as new owner of the JobDependency objects */ - last = NULL; - LIST_FOREACH(subject, l, other->subject_list) { - assert(l->subject == other); - l->subject = j; - last = l; - } - - /* Merge both lists */ - if (last) { - last->subject_next = j->subject_list; - if (j->subject_list) - j->subject_list->subject_prev = last; - j->subject_list = other->subject_list; - } - - /* Patch us in as new owner of the JobDependency objects */ - last = NULL; - LIST_FOREACH(object, l, other->object_list) { - assert(l->object == other); - l->object = j; - last = l; - } - - /* Merge both lists */ - if (last) { - last->object_next = j->object_list; - if (j->object_list) - j->object_list->object_prev = last; - j->object_list = other->object_list; - } - - /* Kill the other job */ - other->subject_list = NULL; - other->object_list = NULL; - transaction_delete_job(m, other, true); -} - -static bool job_is_conflicted_by(Job *j) { - JobDependency *l; - - assert(j); - - /* Returns true if this job is pulled in by a least one - * ConflictedBy dependency. */ - - LIST_FOREACH(object, l, j->object_list) - if (l->conflicts) - return true; - - return false; -} - -static int delete_one_unmergeable_job(Manager *m, Job *j) { - Job *k; - - assert(j); - - /* Tries to delete one item in the linked list - * j->transaction_next->transaction_next->... that conflicts - * with another one, in an attempt to make an inconsistent - * transaction work. */ - - /* We rely here on the fact that if a merged with b does not - * merge with c, either a or b merge with c neither */ - LIST_FOREACH(transaction, j, j) - LIST_FOREACH(transaction, k, j->transaction_next) { - Job *d; - - /* Is this one mergeable? Then skip it */ - if (job_type_is_mergeable(j->type, k->type)) - continue; - - /* Ok, we found two that conflict, let's see if we can - * drop one of them */ - if (!j->matters_to_anchor && !k->matters_to_anchor) { - - /* Both jobs don't matter, so let's - * find the one that is smarter to - * remove. Let's think positive and - * rather remove stops then starts -- - * except if something is being - * stopped because it is conflicted by - * another unit in which case we - * rather remove the start. */ - - log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j))); - log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k))); - - if (j->type == JOB_STOP) { - - if (job_is_conflicted_by(j)) - d = k; - else - d = j; - - } else if (k->type == JOB_STOP) { - - if (job_is_conflicted_by(k)) - d = j; - else - d = k; - } else - d = j; - - } else if (!j->matters_to_anchor) - d = j; - else if (!k->matters_to_anchor) - d = k; - else - return -ENOEXEC; - - /* Ok, we can drop one, so let's do so. */ - log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type)); - transaction_delete_job(m, d, true); - return 0; - } - - return -EINVAL; -} - -static int transaction_merge_jobs(Manager *m, DBusError *e) { - Job *j; - Iterator i; - int r; - - assert(m); - - /* First step, check whether any of the jobs for one specific - * task conflict. If so, try to drop one of them. */ - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - JobType t; - Job *k; - - t = j->type; - LIST_FOREACH(transaction, k, j->transaction_next) { - if (job_type_merge(&t, k->type) >= 0) - continue; - - /* OK, we could not merge all jobs for this - * action. Let's see if we can get rid of one - * of them */ - - if ((r = delete_one_unmergeable_job(m, j)) >= 0) - /* Ok, we managed to drop one, now - * let's ask our callers to call us - * again after garbage collecting */ - return -EAGAIN; - - /* We couldn't merge anything. Failure */ - dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.", - job_type_to_string(t), job_type_to_string(k->type), k->unit->id); - return r; - } - } - - /* Second step, merge the jobs. */ - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - JobType t = j->type; - Job *k; - - /* Merge all transactions */ - LIST_FOREACH(transaction, k, j->transaction_next) - assert_se(job_type_merge(&t, k->type) == 0); - - /* If an active job is mergeable, merge it too */ - if (j->unit->job) - job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */ - - while ((k = j->transaction_next)) { - if (j->installed) { - transaction_merge_and_delete_job(m, k, j, t); - j = k; - } else - transaction_merge_and_delete_job(m, j, k, t); - } - - if (j->unit->job && !j->installed) - transaction_merge_and_delete_job(m, j, j->unit->job, t); - - assert(!j->transaction_next); - assert(!j->transaction_prev); - } - - return 0; -} - -static void transaction_drop_redundant(Manager *m) { - bool again; - - assert(m); - - /* Goes through the transaction and removes all jobs that are - * a noop */ - - do { - Job *j; - Iterator i; - - again = false; - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - bool changes_something = false; - Job *k; - - LIST_FOREACH(transaction, k, j) { - - if (!job_is_anchor(k) && - (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) && - (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type))) - continue; - - changes_something = true; - break; - } - - if (changes_something) - continue; - - /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */ - transaction_delete_job(m, j, false); - again = true; - break; - } - - } while (again); -} - -static bool unit_matters_to_anchor(Unit *u, Job *j) { - assert(u); - assert(!j->transaction_prev); - - /* Checks whether at least one of the jobs for this unit - * matters to the anchor. */ - - LIST_FOREACH(transaction, j, j) - if (j->matters_to_anchor) - return true; - - return false; -} - -static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) { - Iterator i; - Unit *u; - int r; - - assert(m); - assert(j); - assert(!j->transaction_prev); - - /* Does a recursive sweep through the ordering graph, looking - * for a cycle. If we find cycle we try to break it. */ - - /* Have we seen this before? */ - if (j->generation == generation) { - Job *k, *delete; - - /* If the marker is NULL we have been here already and - * decided the job was loop-free from here. Hence - * shortcut things and return right-away. */ - if (!j->marker) - return 0; - - /* So, the marker is not NULL and we already have been - * here. We have a cycle. Let's try to break it. We go - * backwards in our path and try to find a suitable - * job to remove. We use the marker to find our way - * back, since smart how we are we stored our way back - * in there. */ - log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type)); - - delete = NULL; - for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) { - - log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type)); - - if (!delete && - !k->installed && - !unit_matters_to_anchor(k->unit, k)) { - /* Ok, we can drop this one, so let's - * do so. */ - delete = k; - } - - /* Check if this in fact was the beginning of - * the cycle */ - if (k == j) - break; - } - - - if (delete) { - log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type)); - transaction_delete_unit(m, delete->unit); - return -EAGAIN; - } - - log_error("Unable to break cycle"); - - dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details."); - return -ENOEXEC; - } - - /* Make the marker point to where we come from, so that we can - * find our way backwards if we want to break a cycle. We use - * a special marker for the beginning: we point to - * ourselves. */ - j->marker = from ? from : j; - j->generation = generation; - - /* We assume that the the dependencies are bidirectional, and - * hence can ignore UNIT_AFTER */ - SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) { - Job *o; - - /* Is there a job for this unit? */ - if (!(o = hashmap_get(m->transaction_jobs, u))) - - /* Ok, there is no job for this in the - * transaction, but maybe there is already one - * running? */ - if (!(o = u->job)) - continue; - - if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0) - return r; - } - - /* Ok, let's backtrack, and remember that this entry is not on - * our path anymore. */ - j->marker = NULL; - - return 0; -} - -static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) { - Job *j; - int r; - Iterator i; - unsigned g; - - assert(m); - assert(generation); - - /* Check if the ordering graph is cyclic. If it is, try to fix - * that up by dropping one of the jobs. */ - - g = (*generation)++; - - HASHMAP_FOREACH(j, m->transaction_jobs, i) - if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0) - return r; - - return 0; -} - -static void transaction_collect_garbage(Manager *m) { - bool again; - - assert(m); - - /* Drop jobs that are not required by any other job */ - - do { - Iterator i; - Job *j; - - again = false; - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - if (j->object_list) { - /* log_debug("Keeping job %s/%s because of %s/%s", */ - /* j->unit->id, job_type_to_string(j->type), */ - /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */ - /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */ - continue; - } - - /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */ - transaction_delete_job(m, j, true); - again = true; - break; - } - - } while (again); -} - -static int transaction_is_destructive(Manager *m, DBusError *e) { - Iterator i; - Job *j; - - assert(m); - - /* Checks whether applying this transaction means that - * existing jobs would be replaced */ - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - - /* Assume merged */ - assert(!j->transaction_prev); - assert(!j->transaction_next); - - if (j->unit->job && - j->unit->job != j && - !job_type_is_superset(j->type, j->unit->job->type)) { - - dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive."); - return -EEXIST; - } - } - - return 0; -} - -static void transaction_minimize_impact(Manager *m) { - bool again; - assert(m); - - /* Drops all unnecessary jobs that reverse already active jobs - * or that stop a running service. */ - - do { - Job *j; - Iterator i; - - again = false; - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - LIST_FOREACH(transaction, j, j) { - bool stops_running_service, changes_existing_job; - - /* If it matters, we shouldn't drop it */ - if (j->matters_to_anchor) - continue; - - /* Would this stop a running service? - * Would this change an existing job? - * If so, let's drop this entry */ - - stops_running_service = - j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit)); - - changes_existing_job = - j->unit->job && - job_type_is_conflicting(j->type, j->unit->job->type); - - if (!stops_running_service && !changes_existing_job) - continue; - - if (stops_running_service) - log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type)); - - if (changes_existing_job) - log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type)); - - /* Ok, let's get rid of this */ - log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type)); - - transaction_delete_job(m, j, true); - again = true; - break; - } - - if (again) - break; - } - - } while (again); -} - -static int transaction_apply(Manager *m, JobMode mode) { - Iterator i; - Job *j; - int r; - - /* Moves the transaction jobs to the set of active jobs */ - - if (mode == JOB_ISOLATE) { - - /* When isolating first kill all installed jobs which - * aren't part of the new transaction */ - rescan: - HASHMAP_FOREACH(j, m->jobs, i) { - assert(j->installed); - - if (hashmap_get(m->transaction_jobs, j->unit)) - continue; - - /* 'j' itself is safe to remove, but if other jobs - are invalidated recursively, our iterator may become - invalid and we need to start over. */ - if (job_finish_and_invalidate(j, JOB_CANCELED) > 0) - goto rescan; - } - } - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - /* Assume merged */ - assert(!j->transaction_prev); - assert(!j->transaction_next); - - if (j->installed) - continue; - - if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0) - goto rollback; - } - - while ((j = hashmap_steal_first(m->transaction_jobs))) { - if (j->installed) { - /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */ - continue; - } - - if (j->unit->job) - job_free(j->unit->job); - - j->unit->job = j; - j->installed = true; - m->n_installed_jobs ++; - - /* We're fully installed. Now let's free data we don't - * need anymore. */ - - assert(!j->transaction_next); - assert(!j->transaction_prev); - - job_add_to_run_queue(j); - job_add_to_dbus_queue(j); - job_start_timer(j); - - log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); - } - - /* As last step, kill all remaining job dependencies. */ - transaction_clean_dependencies(m); - - return 0; - -rollback: - - HASHMAP_FOREACH(j, m->transaction_jobs, i) { - if (j->installed) - continue; - - hashmap_remove(m->jobs, UINT32_TO_PTR(j->id)); - } - - return r; -} - -static int transaction_activate(Manager *m, JobMode mode, DBusError *e) { - int r; - unsigned generation = 1; - - assert(m); - - /* This applies the changes recorded in transaction_jobs to - * the actual list of jobs, if possible. */ - - /* First step: figure out which jobs matter */ - transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++); - - /* Second step: Try not to stop any running services if - * we don't have to. Don't try to reverse running - * jobs if we don't have to. */ - if (mode == JOB_FAIL) - transaction_minimize_impact(m); - - /* Third step: Drop redundant jobs */ - transaction_drop_redundant(m); - - for (;;) { - /* Fourth step: Let's remove unneeded jobs that might - * be lurking. */ - if (mode != JOB_ISOLATE) - transaction_collect_garbage(m); - - /* Fifth step: verify order makes sense and correct - * cycles if necessary and possible */ - if ((r = transaction_verify_order(m, &generation, e)) >= 0) - break; - - if (r != -EAGAIN) { - log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r)); - goto rollback; - } - - /* Let's see if the resulting transaction ordering - * graph is still cyclic... */ - } - - for (;;) { - /* Sixth step: let's drop unmergeable entries if - * necessary and possible, merge entries we can - * merge */ - if ((r = transaction_merge_jobs(m, e)) >= 0) - break; - - if (r != -EAGAIN) { - log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r)); - goto rollback; - } - - /* Seventh step: an entry got dropped, let's garbage - * collect its dependencies. */ - if (mode != JOB_ISOLATE) - transaction_collect_garbage(m); - - /* Let's see if the resulting transaction still has - * unmergeable entries ... */ - } - - /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */ - transaction_drop_redundant(m); - - /* Ninth step: check whether we can actually apply this */ - if (mode == JOB_FAIL) - if ((r = transaction_is_destructive(m, e)) < 0) { - log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r)); - goto rollback; - } - - /* Tenth step: apply changes */ - if ((r = transaction_apply(m, mode)) < 0) { - log_warning("Failed to apply transaction: %s", strerror(-r)); - goto rollback; - } - - assert(hashmap_isempty(m->transaction_jobs)); - assert(!m->transaction_anchor); - - return 0; - -rollback: - transaction_abort(m); - return r; -} - -static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) { - Job *j, *f; - - assert(m); - assert(unit); - - /* Looks for an existing prospective job and returns that. If - * it doesn't exist it is created and added to the prospective - * jobs list. */ - - f = hashmap_get(m->transaction_jobs, unit); - - LIST_FOREACH(transaction, j, f) { - assert(j->unit == unit); - - if (j->type == type) { - if (is_new) - *is_new = false; - return j; - } - } - - if (unit->job && unit->job->type == type) - j = unit->job; - else if (!(j = job_new(m, type, unit))) - return NULL; - - j->generation = 0; - j->marker = NULL; - j->matters_to_anchor = false; - j->override = override; - - LIST_PREPEND(Job, transaction, f, j); - - if (hashmap_replace(m->transaction_jobs, unit, f) < 0) { - job_free(j); - return NULL; - } - - if (is_new) - *is_new = true; - - /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */ - - return j; -} - -void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) { - assert(m); - assert(j); - - if (j->transaction_prev) - j->transaction_prev->transaction_next = j->transaction_next; - else if (j->transaction_next) - hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next); - else - hashmap_remove_value(m->transaction_jobs, j->unit, j); - - if (j->transaction_next) - j->transaction_next->transaction_prev = j->transaction_prev; - - j->transaction_prev = j->transaction_next = NULL; - - while (j->subject_list) - job_dependency_free(j->subject_list); - - while (j->object_list) { - Job *other = j->object_list->matters ? j->object_list->subject : NULL; - - job_dependency_free(j->object_list); - - if (other && delete_dependencies) { - log_debug("Deleting job %s/%s as dependency of job %s/%s", - other->unit->id, job_type_to_string(other->type), - j->unit->id, job_type_to_string(j->type)); - transaction_delete_job(m, other, delete_dependencies); - } - } -} - -static int transaction_add_job_and_dependencies( - Manager *m, - JobType type, - Unit *unit, - Job *by, - bool matters, - bool override, - bool conflicts, - bool ignore_requirements, - bool ignore_order, - DBusError *e, - Job **_ret) { - Job *ret; - Iterator i; - Unit *dep; - int r; - bool is_new; - - assert(m); - assert(type < _JOB_TYPE_MAX); - assert(unit); - - /* log_debug("Pulling in %s/%s from %s/%s", */ - /* unit->id, job_type_to_string(type), */ - /* by ? by->unit->id : "NA", */ - /* by ? job_type_to_string(by->type) : "NA"); */ - - if (unit->load_state != UNIT_LOADED && - unit->load_state != UNIT_ERROR && - unit->load_state != UNIT_MASKED) { - dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id); - return -EINVAL; - } - - if (type != JOB_STOP && unit->load_state == UNIT_ERROR) { - dbus_set_error(e, BUS_ERROR_LOAD_FAILED, - "Unit %s failed to load: %s. " - "See system logs and 'systemctl status %s' for details.", - unit->id, - strerror(-unit->load_error), - unit->id); - return -EINVAL; - } - - if (type != JOB_STOP && unit->load_state == UNIT_MASKED) { - dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id); - return -EINVAL; - } - - if (!unit_job_is_applicable(unit, type)) { - dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id); - return -EBADR; - } - - /* First add the job. */ - if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new))) - return -ENOMEM; - - ret->ignore_order = ret->ignore_order || ignore_order; - - /* Then, add a link to the job. */ - if (!job_dependency_new(by, ret, matters, conflicts)) - return -ENOMEM; - - if (is_new && !ignore_requirements) { - Set *following; - - /* If we are following some other unit, make sure we - * add all dependencies of everybody following. */ - if (unit_following_set(ret->unit, &following) > 0) { - SET_FOREACH(dep, following, i) - if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) { - log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); - - if (e) - dbus_error_free(e); - } - - set_free(following); - } + hashmap_free(m->units); + hashmap_free(m->jobs); + hashmap_free(m->watch_pids); + hashmap_free(m->watch_bus); - /* Finally, recursively add in all dependencies. */ - if (type == JOB_START || type == JOB_RELOAD_OR_START) { - SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) { - if (r != -EBADR) - goto fail; + if (m->epoll_fd >= 0) + close_nointr_nofail(m->epoll_fd); + if (m->signal_watch.fd >= 0) + close_nointr_nofail(m->signal_watch.fd); + if (m->notify_watch.fd >= 0) + close_nointr_nofail(m->notify_watch.fd); - if (e) - dbus_error_free(e); - } +#ifdef HAVE_AUDIT + if (m->audit_fd >= 0) + audit_close(m->audit_fd); +#endif - SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) { + free(m->notify_socket); - if (r != -EBADR) - goto fail; + lookup_paths_free(&m->lookup_paths); + strv_free(m->environment); - if (e) - dbus_error_free(e); - } + strv_free(m->default_controllers); - SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) { - log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); + hashmap_free(m->cgroup_bondings); + set_free_free(m->unit_path_cache); - if (e) - dbus_error_free(e); - } + close_pipe(m->idle_pipe); - SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) { - log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); + free(m->switch_root); + free(m->switch_root_init); - if (e) - dbus_error_free(e); - } + for (i = 0; i < RLIMIT_NLIMITS; i++) + free(m->rlimit[i]); - SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) { + free(m); +} - if (r != -EBADR) - goto fail; +int manager_enumerate(Manager *m) { + int r = 0, q; + UnitType c; - if (e) - dbus_error_free(e); - } + assert(m); - SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) { - log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); + /* Let's ask every type to load all units from disk/kernel + * that it might know */ + for (c = 0; c < _UNIT_TYPE_MAX; c++) + if (unit_vtable[c]->enumerate) + if ((q = unit_vtable[c]->enumerate(m)) < 0) + r = q; - if (e) - dbus_error_free(e); - } + manager_dispatch_load_queue(m); + return r; +} - SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) { +int manager_coldplug(Manager *m) { + int r = 0, q; + Iterator i; + Unit *u; + char *k; - if (r != -EBADR) - goto fail; + assert(m); - if (e) - dbus_error_free(e); - } + /* Then, let's set up their initial state. */ + HASHMAP_FOREACH_KEY(u, k, m->units, i) { - SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) - if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) { - log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); + /* ignore aliases */ + if (u->id != k) + continue; - if (e) - dbus_error_free(e); - } + if ((q = unit_coldplug(u)) < 0) + r = q; + } - } + return r; +} - if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) { +static void manager_build_unit_path_cache(Manager *m) { + char **i; + DIR *d = NULL; + int r; - SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) - if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) { + assert(m); - if (r != -EBADR) - goto fail; + set_free_free(m->unit_path_cache); - if (e) - dbus_error_free(e); - } + if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) { + log_error("Failed to allocate unit path cache."); + return; + } - SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) - if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) { + /* This simply builds a list of files we know exist, so that + * we don't always have to go to disk */ - if (r != -EBADR) - goto fail; + STRV_FOREACH(i, m->lookup_paths.unit_path) { + struct dirent *de; - if (e) - dbus_error_free(e); - } + if (!(d = opendir(*i))) { + log_error("Failed to open directory: %m"); + continue; } - if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) { + while ((de = readdir(d))) { + char *p; - SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) { - r = transaction_add_job_and_dependencies(m, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL); + if (ignore_file(de->d_name)) + continue; - if (r < 0) { - log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r)); + p = strjoin(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL); + if (!p) { + r = -ENOMEM; + goto fail; + } - if (e) - dbus_error_free(e); - } + if ((r = set_put(m->unit_path_cache, p)) < 0) { + free(p); + goto fail; } } - /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */ + closedir(d); + d = NULL; } - if (_ret) - *_ret = ret; - - return 0; + return; fail: - return r; + log_error("Failed to build unit path cache: %s", strerror(-r)); + + set_free_free(m->unit_path_cache); + m->unit_path_cache = NULL; + + if (d) + closedir(d); } -static int transaction_add_isolate_jobs(Manager *m) { - Iterator i; - Unit *u; - char *k; - int r; +int manager_startup(Manager *m, FILE *serialization, FDSet *fds) { + int r, q; assert(m); - HASHMAP_FOREACH_KEY(u, k, m->units, i) { + manager_run_generators(m); - /* ignore aliases */ - if (u->id != k) - continue; + r = lookup_paths_init( + &m->lookup_paths, m->running_as, true, + m->generator_unit_path, + m->generator_unit_path_early, + m->generator_unit_path_late); + if (r < 0) + return r; - if (u->ignore_on_isolate) - continue; + manager_build_unit_path_cache(m); - /* No need to stop inactive jobs */ - if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job) - continue; + /* If we will deserialize make sure that during enumeration + * this is already known, so we increase the counter here + * already */ + if (serialization) + m->n_reloading ++; - /* Is there already something listed for this? */ - if (hashmap_get(m->transaction_jobs, u)) - continue; + /* First, enumerate what we can from all config files */ + r = manager_enumerate(m); - if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0) - log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r)); + /* Second, deserialize if there is something to deserialize */ + if (serialization) { + q = manager_deserialize(m, serialization, fds); + if (q < 0) + r = q; } - return 0; + /* Third, fire things up! */ + q = manager_coldplug(m); + if (q < 0) + r = q; + + if (serialization) { + assert(m->n_reloading > 0); + m->n_reloading --; + } + + return r; } int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) { int r; - Job *ret; + Transaction *tr; assert(m); assert(type < _JOB_TYPE_MAX); @@ -1744,28 +689,40 @@ int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool ove log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode)); - if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, - mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS, - mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) { - transaction_abort(m); - return r; - } + job_type_collapse(&type, unit); - if (mode == JOB_ISOLATE) - if ((r = transaction_add_isolate_jobs(m)) < 0) { - transaction_abort(m); - return r; - } + tr = transaction_new(); + if (!tr) + return -ENOMEM; - if ((r = transaction_activate(m, mode, e)) < 0) - return r; + r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, override, false, + mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS, + mode == JOB_IGNORE_DEPENDENCIES, e); + if (r < 0) + goto tr_abort; + + if (mode == JOB_ISOLATE) { + r = transaction_add_isolate_jobs(tr, m); + if (r < 0) + goto tr_abort; + } - log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id); + r = transaction_activate(tr, m, mode, e); + if (r < 0) + goto tr_abort; + + log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) tr->anchor_job->id); if (_ret) - *_ret = ret; + *_ret = tr->anchor_job; + transaction_free(tr); return 0; + +tr_abort: + transaction_abort(tr); + transaction_free(tr); + return r; } int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) { @@ -1777,7 +734,8 @@ int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode assert(name); assert(mode < _JOB_MODE_MAX); - if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0) + r = manager_load_unit(m, name, NULL, NULL, &unit); + if (r < 0) return r; return manager_add_job(m, type, unit, mode, override, e, _ret); @@ -1839,11 +797,11 @@ int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DB } if (!name) - name = file_name_from_path(path); + name = path_get_file_name(path); t = unit_name_to_type(name); - if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid_no_type(name, false)) { + if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, false)) { dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name); return -EINVAL; } @@ -1889,7 +847,8 @@ int manager_load_unit(Manager *m, const char *name, const char *path, DBusError /* This will load the service information files, but not actually * start any services or anything. */ - if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0) + r = manager_load_unit_prepare(m, name, path, e, _ret); + if (r != 0) return r; manager_dispatch_load_queue(m); @@ -1929,10 +888,9 @@ void manager_clear_jobs(Manager *m) { assert(m); - transaction_abort(m); - while ((j = hashmap_first(m->jobs))) - job_finish_and_invalidate(j, JOB_CANCELED); + /* No need to recurse. We're cancelling all jobs. */ + job_finish_and_invalidate(j, JOB_CANCELED, false); } unsigned manager_dispatch_run_queue(Manager *m) { @@ -2187,7 +1145,7 @@ static int manager_process_signal_fd(Manager *m) { break; case SIGTERM: - if (m->running_as == MANAGER_SYSTEM) { + if (m->running_as == SYSTEMD_SYSTEM) { /* This is for compatibility with the * original sysvinit */ m->exit_code = MANAGER_REEXECUTE; @@ -2197,7 +1155,7 @@ static int manager_process_signal_fd(Manager *m) { /* Fall through */ case SIGINT: - if (m->running_as == MANAGER_SYSTEM) { + if (m->running_as == SYSTEMD_SYSTEM) { manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE); break; } @@ -2211,14 +1169,14 @@ static int manager_process_signal_fd(Manager *m) { break; case SIGWINCH: - if (m->running_as == MANAGER_SYSTEM) + if (m->running_as == SYSTEMD_SYSTEM) manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE); /* This is a nop on non-init */ break; case SIGPWR: - if (m->running_as == MANAGER_SYSTEM) + if (m->running_as == SYSTEMD_SYSTEM) manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE); /* This is a nop on non-init */ @@ -2482,7 +1440,7 @@ int manager_loop(Manager *m) { int n; int wait_msec = -1; - if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) + if (m->runtime_watchdog > 0 && m->running_as == SYSTEMD_SYSTEM) watchdog_ping(); if (!ratelimit_test(&rl)) { @@ -2514,7 +1472,7 @@ int manager_loop(Manager *m) { continue; /* Sleep for half the watchdog time */ - if (m->runtime_watchdog > 0 && m->running_as == MANAGER_SYSTEM) { + if (m->runtime_watchdog > 0 && m->running_as == SYSTEMD_SYSTEM) { wait_msec = (int) (m->runtime_watchdog / 2 / USEC_PER_MSEC); if (wait_msec <= 0) wait_msec = 1; @@ -2541,9 +1499,10 @@ int manager_loop(Manager *m) { return m->exit_code; } -int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) { +int manager_load_unit_from_dbus_path(Manager *m, const char *s, DBusError *e, Unit **_u) { char *n; Unit *u; + int r; assert(m); assert(s); @@ -2552,14 +1511,15 @@ int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) { if (!startswith(s, "/org/freedesktop/systemd1/unit/")) return -EINVAL; - if (!(n = bus_path_unescape(s+31))) + n = bus_path_unescape(s+31); + if (!n) return -ENOMEM; - u = manager_get_unit(m, n); + r = manager_load_unit(m, n, NULL, e, &u); free(n); - if (!u) - return -ENOENT; + if (r < 0) + return r; *_u = u; @@ -2602,7 +1562,7 @@ void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) { if (m->n_reloading > 0) return; - if (m->running_as != MANAGER_SYSTEM) + if (m->running_as != SYSTEMD_SYSTEM) return; if (u->type != UNIT_SERVICE) @@ -2639,7 +1599,7 @@ void manager_send_unit_plymouth(Manager *m, Unit *u) { if (m->n_reloading > 0) return; - if (m->running_as != MANAGER_SYSTEM) + if (m->running_as != SYSTEMD_SYSTEM) return; if (u->type != UNIT_SERVICE && @@ -2671,7 +1631,7 @@ void manager_send_unit_plymouth(Manager *m, Unit *u) { } if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) { - log_error("Out of memory"); + log_oom(); goto finish; } @@ -2738,7 +1698,7 @@ int manager_open_serialization(Manager *m, FILE **_f) { assert(_f); - if (m->running_as == MANAGER_SYSTEM) + if (m->running_as == SYSTEMD_SYSTEM) asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid()); else asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid()); @@ -2768,7 +1728,7 @@ int manager_open_serialization(Manager *m, FILE **_f) { return 0; } -int manager_serialize(Manager *m, FILE *f, FDSet *fds) { +int manager_serialize(Manager *m, FILE *f, FDSet *fds, bool serialize_jobs) { Iterator i; Unit *u; const char *t; @@ -2782,10 +1742,18 @@ int manager_serialize(Manager *m, FILE *f, FDSet *fds) { fprintf(f, "current-job-id=%i\n", m->current_job_id); fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr)); + fprintf(f, "n-installed-jobs=%u\n", m->n_installed_jobs); + fprintf(f, "n-failed-jobs=%u\n", m->n_failed_jobs); + dual_timestamp_serialize(f, "firmware-timestamp", &m->firmware_timestamp); + dual_timestamp_serialize(f, "kernel-timestamp", &m->kernel_timestamp); + dual_timestamp_serialize(f, "loader-timestamp", &m->loader_timestamp); dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp); - dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp); - dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp); + + if (!in_initrd()) { + dual_timestamp_serialize(f, "userspace-timestamp", &m->userspace_timestamp); + dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp); + } fputc('\n', f); @@ -2800,7 +1768,7 @@ int manager_serialize(Manager *m, FILE *f, FDSet *fds) { fputs(u->id, f); fputc('\n', f); - if ((r = unit_serialize(u, f, fds)) < 0) { + if ((r = unit_serialize(u, f, fds, serialize_jobs)) < 0) { m->n_reloading --; return r; } @@ -2854,6 +1822,20 @@ int manager_deserialize(Manager *m, FILE *f, FDSet *fds) { log_debug("Failed to parse current job id value %s", l+15); else m->current_job_id = MAX(m->current_job_id, id); + } else if (startswith(l, "n-installed-jobs=")) { + uint32_t n; + + if (safe_atou32(l+17, &n) < 0) + log_debug("Failed to parse installed jobs counter %s", l+17); + else + m->n_installed_jobs += n; + } else if (startswith(l, "n-failed-jobs=")) { + uint32_t n; + + if (safe_atou32(l+14, &n) < 0) + log_debug("Failed to parse failed jobs counter %s", l+14); + else + m->n_failed_jobs += n; } else if (startswith(l, "taint-usr=")) { int b; @@ -2861,10 +1843,16 @@ int manager_deserialize(Manager *m, FILE *f, FDSet *fds) { log_debug("Failed to parse taint /usr flag %s", l+10); else m->taint_usr = m->taint_usr || b; - } else if (startswith(l, "initrd-timestamp=")) + } else if (startswith(l, "firmware-timestamp=")) + dual_timestamp_deserialize(l+19, &m->firmware_timestamp); + else if (startswith(l, "loader-timestamp=")) + dual_timestamp_deserialize(l+17, &m->loader_timestamp); + else if (startswith(l, "kernel-timestamp=")) + dual_timestamp_deserialize(l+17, &m->kernel_timestamp); + else if (startswith(l, "initrd-timestamp=")) dual_timestamp_deserialize(l+17, &m->initrd_timestamp); - else if (startswith(l, "startup-timestamp=")) - dual_timestamp_deserialize(l+18, &m->startup_timestamp); + else if (startswith(l, "userspace-timestamp=")) + dual_timestamp_deserialize(l+20, &m->userspace_timestamp); else if (startswith(l, "finish-timestamp=")) dual_timestamp_deserialize(l+17, &m->finish_timestamp); else @@ -2913,18 +1901,21 @@ int manager_reload(Manager *m) { assert(m); - if ((r = manager_open_serialization(m, &f)) < 0) + r = manager_open_serialization(m, &f); + if (r < 0) return r; m->n_reloading ++; - if (!(fds = fdset_new())) { + fds = fdset_new(); + if (!fds) { m->n_reloading --; r = -ENOMEM; goto finish; } - if ((r = manager_serialize(m, f, fds)) < 0) { + r = manager_serialize(m, f, fds, true); + if (r < 0) { m->n_reloading --; goto finish; } @@ -2938,29 +1929,37 @@ int manager_reload(Manager *m) { /* From here on there is no way back. */ manager_clear_jobs_and_units(m); manager_undo_generators(m); - - /* Find new unit paths */ lookup_paths_free(&m->lookup_paths); - if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0) - r = q; + /* Find new unit paths */ manager_run_generators(m); + q = lookup_paths_init( + &m->lookup_paths, m->running_as, true, + m->generator_unit_path, + m->generator_unit_path_early, + m->generator_unit_path_late); + if (q < 0) + r = q; + manager_build_unit_path_cache(m); /* First, enumerate what we can from all config files */ - if ((q = manager_enumerate(m)) < 0) + q = manager_enumerate(m); + if (q < 0) r = q; /* Second, deserialize our stored data */ - if ((q = manager_deserialize(m, f, fds)) < 0) + q = manager_deserialize(m, f, fds); + if (q < 0) r = q; fclose(f); f = NULL; /* Third, fire things up! */ - if ((q = manager_coldplug(m)) < 0) + q = manager_coldplug(m); + if (q < 0) r = q; assert(m->n_reloading > 0); @@ -3018,68 +2017,154 @@ bool manager_unit_pending_inactive(Manager *m, const char *name) { void manager_check_finished(Manager *m) { char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX]; - usec_t kernel_usec, initrd_usec, userspace_usec, total_usec; + usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec; assert(m); - if (dual_timestamp_is_set(&m->finish_timestamp)) + if (hashmap_size(m->jobs) > 0) return; - if (hashmap_size(m->jobs) > 0) + /* Notify Type=idle units that we are done now */ + close_pipe(m->idle_pipe); + + /* Turn off confirm spawn now */ + m->confirm_spawn = false; + + if (dual_timestamp_is_set(&m->finish_timestamp)) return; dual_timestamp_get(&m->finish_timestamp); - if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) { + if (m->running_as == SYSTEMD_SYSTEM && detect_container(NULL) <= 0) { - userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic; - total_usec = m->finish_timestamp.monotonic; + /* Note that m->kernel_usec.monotonic is always at 0, + * and m->firmware_usec.monotonic and + * m->loader_usec.monotonic should be considered + * negative values. */ - if (dual_timestamp_is_set(&m->initrd_timestamp)) { + firmware_usec = m->firmware_timestamp.monotonic - m->loader_timestamp.monotonic; + loader_usec = m->loader_timestamp.monotonic - m->kernel_timestamp.monotonic; + userspace_usec = m->finish_timestamp.monotonic - m->userspace_timestamp.monotonic; + total_usec = m->firmware_timestamp.monotonic + m->finish_timestamp.monotonic; - kernel_usec = m->initrd_timestamp.monotonic; - initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic; + if (dual_timestamp_is_set(&m->initrd_timestamp)) { - log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.", - format_timespan(kernel, sizeof(kernel), kernel_usec), - format_timespan(initrd, sizeof(initrd), initrd_usec), - format_timespan(userspace, sizeof(userspace), userspace_usec), - format_timespan(sum, sizeof(sum), total_usec)); + kernel_usec = m->initrd_timestamp.monotonic - m->kernel_timestamp.monotonic; + initrd_usec = m->userspace_timestamp.monotonic - m->initrd_timestamp.monotonic; + + if (!log_on_console()) + log_struct(LOG_INFO, + "MESSAGE_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(SD_MESSAGE_STARTUP_FINISHED), + "KERNEL_USEC=%llu", (unsigned long long) kernel_usec, + "INITRD_USEC=%llu", (unsigned long long) initrd_usec, + "USERSPACE_USEC=%llu", (unsigned long long) userspace_usec, + "MESSAGE=Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.", + format_timespan(kernel, sizeof(kernel), kernel_usec), + format_timespan(initrd, sizeof(initrd), initrd_usec), + format_timespan(userspace, sizeof(userspace), userspace_usec), + format_timespan(sum, sizeof(sum), total_usec), + NULL); } else { - kernel_usec = m->startup_timestamp.monotonic; + kernel_usec = m->userspace_timestamp.monotonic - m->kernel_timestamp.monotonic; initrd_usec = 0; - log_info("Startup finished in %s (kernel) + %s (userspace) = %s.", - format_timespan(kernel, sizeof(kernel), kernel_usec), - format_timespan(userspace, sizeof(userspace), userspace_usec), - format_timespan(sum, sizeof(sum), total_usec)); + if (!log_on_console()) + log_struct(LOG_INFO, + "MESSAGE_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(SD_MESSAGE_STARTUP_FINISHED), + "KERNEL_USEC=%llu", (unsigned long long) kernel_usec, + "USERSPACE_USEC=%llu", (unsigned long long) userspace_usec, + "MESSAGE=Startup finished in %s (kernel) + %s (userspace) = %s.", + format_timespan(kernel, sizeof(kernel), kernel_usec), + format_timespan(userspace, sizeof(userspace), userspace_usec), + format_timespan(sum, sizeof(sum), total_usec), + NULL); } } else { - userspace_usec = initrd_usec = kernel_usec = 0; - total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic; + firmware_usec = loader_usec = initrd_usec = kernel_usec = 0; + total_usec = userspace_usec = m->finish_timestamp.monotonic - m->userspace_timestamp.monotonic; - log_debug("Startup finished in %s.", - format_timespan(sum, sizeof(sum), total_usec)); + if (!log_on_console()) + log_struct(LOG_INFO, + "MESSAGE_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(SD_MESSAGE_STARTUP_FINISHED), + "USERSPACE_USEC=%llu", (unsigned long long) userspace_usec, + "MESSAGE=Startup finished in %s.", + format_timespan(sum, sizeof(sum), total_usec), + NULL); } - bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec); + bus_broadcast_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec); sd_notifyf(false, "READY=1\nSTATUS=Startup finished in %s.", format_timespan(sum, sizeof(sum), total_usec)); } +static int create_generator_dir(Manager *m, char **generator, const char *name) { + char *p; + int r; + + assert(m); + assert(generator); + assert(name); + + if (*generator) + return 0; + + if (m->running_as == SYSTEMD_SYSTEM && getpid() == 1) { + + p = strappend("/run/systemd/", name); + if (!p) + return log_oom(); + + r = mkdir_p_label(p, 0755); + if (r < 0) { + log_error("Failed to create generator directory: %s", strerror(-r)); + free(p); + return r; + } + } else { + p = strjoin("/tmp/systemd-", name, ".XXXXXX", NULL); + if (!p) + return log_oom(); + + if (!mkdtemp(p)) { + free(p); + log_error("Failed to create generator directory: %m"); + return -errno; + } + } + + *generator = p; + return 0; +} + +static void trim_generator_dir(Manager *m, char **generator) { + assert(m); + assert(generator); + + if (!*generator) + return; + + if (rmdir(*generator) >= 0) { + free(*generator); + *generator = NULL; + } + + return; +} + void manager_run_generators(Manager *m) { DIR *d = NULL; const char *generator_path; - const char *argv[3]; + const char *argv[5]; mode_t u; + int r; assert(m); - generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH; - if (!(d = opendir(generator_path))) { - + generator_path = m->running_as == SYSTEMD_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH; + d = opendir(generator_path); + if (!d) { if (errno == ENOENT) return; @@ -3087,79 +2172,57 @@ void manager_run_generators(Manager *m) { return; } - if (!m->generator_unit_path) { - const char *p; - char user_path[] = "/tmp/systemd-generator-XXXXXX"; - - if (m->running_as == MANAGER_SYSTEM && getpid() == 1) { - p = "/run/systemd/generator"; - - if (mkdir_p(p, 0755) < 0) { - log_error("Failed to create generator directory: %m"); - goto finish; - } + r = create_generator_dir(m, &m->generator_unit_path, "generator"); + if (r < 0) + goto finish; - } else { - if (!(p = mkdtemp(user_path))) { - log_error("Failed to create generator directory: %m"); - goto finish; - } - } + r = create_generator_dir(m, &m->generator_unit_path_early, "generator.early"); + if (r < 0) + goto finish; - if (!(m->generator_unit_path = strdup(p))) { - log_error("Failed to allocate generator unit path."); - goto finish; - } - } + r = create_generator_dir(m, &m->generator_unit_path_late, "generator.late"); + if (r < 0) + goto finish; argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */ argv[1] = m->generator_unit_path; - argv[2] = NULL; + argv[2] = m->generator_unit_path_early; + argv[3] = m->generator_unit_path_late; + argv[4] = NULL; u = umask(0022); execute_directory(generator_path, d, (char**) argv); umask(u); - if (rmdir(m->generator_unit_path) >= 0) { - /* Uh? we were able to remove this dir? I guess that - * means the directory was empty, hence let's shortcut - * this */ - - free(m->generator_unit_path); - m->generator_unit_path = NULL; - goto finish; - } - - if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) { - char **l; - - if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) { - log_error("Failed to add generator directory to unit search path: %m"); - goto finish; - } - - strv_free(m->lookup_paths.unit_path); - m->lookup_paths.unit_path = l; - - log_debug("Added generator unit path %s to search path.", m->generator_unit_path); - } + trim_generator_dir(m, &m->generator_unit_path); + trim_generator_dir(m, &m->generator_unit_path_early); + trim_generator_dir(m, &m->generator_unit_path_late); finish: if (d) closedir(d); } -void manager_undo_generators(Manager *m) { +static void remove_generator_dir(Manager *m, char **generator) { assert(m); + assert(generator); - if (!m->generator_unit_path) + if (!*generator) return; - strv_remove(m->lookup_paths.unit_path, m->generator_unit_path); - rm_rf(m->generator_unit_path, false, true, false); + strv_remove(m->lookup_paths.unit_path, *generator); + rm_rf(*generator, false, true, false); + + free(*generator); + *generator = NULL; +} + +void manager_undo_generators(Manager *m) { + assert(m); - free(m->generator_unit_path); - m->generator_unit_path = NULL; + remove_generator_dir(m, &m->generator_unit_path); + remove_generator_dir(m, &m->generator_unit_path_early); + remove_generator_dir(m, &m->generator_unit_path_late); } int manager_set_default_controllers(Manager *m, char **controllers) { @@ -3179,12 +2242,29 @@ int manager_set_default_controllers(Manager *m, char **controllers) { return 0; } +int manager_set_default_rlimits(Manager *m, struct rlimit **default_rlimit) { + int i; + + assert(m); + + for (i = 0; i < RLIMIT_NLIMITS; i++) { + if (!default_rlimit[i]) + continue; + + m->rlimit[i] = newdup(struct rlimit, default_rlimit[i], 1); + if (!m->rlimit[i]) + return -ENOMEM; + } + + return 0; +} + void manager_recheck_journal(Manager *m) { Unit *u; assert(m); - if (m->running_as != MANAGER_SYSTEM) + if (m->running_as != SYSTEMD_SYSTEM) return; u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET); @@ -3207,7 +2287,7 @@ void manager_recheck_journal(Manager *m) { void manager_set_show_status(Manager *m, bool b) { assert(m); - if (m->running_as != MANAGER_SYSTEM) + if (m->running_as != SYSTEMD_SYSTEM) return; m->show_status = b; @@ -3221,7 +2301,7 @@ void manager_set_show_status(Manager *m, bool b) { bool manager_get_show_status(Manager *m) { assert(m); - if (m->running_as != MANAGER_SYSTEM) + if (m->running_as != SYSTEMD_SYSTEM) return false; if (m->show_status) @@ -3232,10 +2312,3 @@ bool manager_get_show_status(Manager *m) { return plymouth_running(); } - -static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = { - [MANAGER_SYSTEM] = "system", - [MANAGER_USER] = "user" -}; - -DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);