SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
SIGRTMIN+0, /* systemd: start default.target */
- SIGRTMIN+1, /* systemd: start rescue.target */
+ SIGRTMIN+1, /* systemd: isolate rescue.target */
SIGRTMIN+2, /* systemd: isolate emergency.target */
SIGRTMIN+3, /* systemd: start halt.target */
SIGRTMIN+4, /* systemd: start poweroff.target */
SIGRTMIN+5, /* systemd: start reboot.target */
+ SIGRTMIN+6, /* systemd: start kexec.target */
+ SIGRTMIN+13, /* systemd: Immediate halt */
+ SIGRTMIN+14, /* systemd: Immediate poweroff */
+ SIGRTMIN+15, /* systemd: Immediate reboot */
+ SIGRTMIN+16, /* systemd: Immediate kexec */
+ SIGRTMIN+20, /* systemd: enable status messages */
+ SIGRTMIN+21, /* systemd: disable status messages */
-1);
assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
m->audit_fd = -1;
#endif
- m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = -1;
+ m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
if (!(m->environment = strv_copy(environ)))
goto fail;
+ if (!(m->default_controllers = strv_new("cpu", NULL)))
+ goto fail;
+
if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
goto fail;
goto fail;
/* Try to connect to the busses, if possible. */
- if ((r = bus_init(m)) < 0)
+ if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
goto fail;
#ifdef HAVE_AUDIT
}
enum {
- GC_OFFSET_IN_PATH, /* This one is on the path we were travelling */
+ GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
GC_OFFSET_UNSURE, /* No clue */
GC_OFFSET_GOOD, /* We still need this unit */
GC_OFFSET_BAD, /* We don't need this unit anymore */
* around */
manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
+ manager_undo_generators(m);
+
bus_done(m);
hashmap_free(m->units);
#endif
free(m->notify_socket);
- free(m->console);
lookup_paths_free(&m->lookup_paths);
strv_free(m->environment);
+ strv_free(m->default_controllers);
+
hashmap_free(m->cgroup_bondings);
set_free_free(m->unit_path_cache);
assert(m);
+ manager_run_generators(m);
+
manager_build_unit_path_cache(m);
/* If we will deserialize make sure that during enumeration
/* Tries to delete one item in the linked list
* j->transaction_next->transaction_next->... that conflicts
- * whith another one, in an attempt to make an inconsistent
+ * with another one, in an attempt to make an inconsistent
* transaction work. */
/* We rely here on the fact that if a merged with b does not
LIST_FOREACH(transaction, k, j) {
if (!job_is_anchor(k) &&
- job_type_is_redundant(k->type, unit_active_state(k->unit)))
+ (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
+ (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
continue;
changes_something = true;
if (changes_something)
continue;
- log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type));
+ /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
transaction_delete_job(m, j, false);
again = true;
break;
again = false;
HASHMAP_FOREACH(j, m->transaction_jobs, i) {
- if (j->object_list)
+ if (j->object_list) {
+ /* log_debug("Keeping job %s/%s because of %s/%s", */
+ /* j->unit->meta.id, job_type_to_string(j->type), */
+ /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
+ /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
continue;
+ }
- log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type));
+ /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
transaction_delete_job(m, j, true);
again = true;
break;
j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
changes_existing_job =
- j->unit->meta.job && job_type_is_conflicting(j->type, j->unit->meta.job->type);
+ j->unit->meta.job &&
+ job_type_is_conflicting(j->type, j->unit->meta.job->type);
if (!stops_running_service && !changes_existing_job)
continue;
}
while ((j = hashmap_steal_first(m->transaction_jobs))) {
- if (j->installed)
+ if (j->installed) {
+ /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
continue;
+ }
if (j->unit->meta.job)
job_free(j->unit->meta.job);
job_add_to_run_queue(j);
job_add_to_dbus_queue(j);
job_start_timer(j);
+
+ log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
}
/* As last step, kill all remaining job dependencies. */
/* Second step: Try not to stop any running services if
* we don't have to. Don't try to reverse running
* jobs if we don't have to. */
- if (mode != JOB_ISOLATE)
+ if (mode == JOB_FAIL)
transaction_minimize_impact(m);
/* Third step: Drop redundant jobs */
break;
if (r != -EAGAIN) {
- log_warning("Requested transaction contains unmergable jobs: %s", bus_error(e, r));
+ log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
goto rollback;
}
assert(m);
assert(unit);
- /* Looks for an axisting prospective job and returns that. If
+ /* Looks for an existing prospective job and returns that. If
* it doesn't exist it is created and added to the prospective
* jobs list. */
if (is_new)
*is_new = true;
- log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type));
+ /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
return j;
}
bool matters,
bool override,
bool conflicts,
+ bool ignore_deps,
DBusError *e,
Job **_ret) {
Job *ret;
assert(type < _JOB_TYPE_MAX);
assert(unit);
+ /* log_debug("Pulling in %s/%s from %s/%s", */
+ /* unit->meta.id, job_type_to_string(type), */
+ /* by ? by->unit->meta.id : "NA", */
+ /* by ? job_type_to_string(by->type) : "NA"); */
+
if (unit->meta.load_state != UNIT_LOADED &&
unit->meta.load_state != UNIT_ERROR &&
unit->meta.load_state != UNIT_MASKED) {
if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
"Unit %s failed to load: %s. "
- "You might find more information in the system logs.",
+ "See system logs and 'systemctl status' for details.",
unit->meta.id,
strerror(-unit->meta.load_error));
return -EINVAL;
if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
return -ENOMEM;
+ ret->ignore_deps = ret->ignore_deps || ignore_deps;
+
/* Then, add a link to the job. */
if (!job_dependency_new(by, ret, matters, conflicts))
return -ENOMEM;
- if (is_new) {
+ if (is_new && !ignore_deps) {
+ Set *following;
+
+ /* If we are following some other unit, make sure we
+ * add all dependencies of everybody following. */
+ if (unit_following_set(ret->unit, &following) > 0) {
+ SET_FOREACH(dep, following, i)
+ if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, e, NULL)) < 0) {
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
+
+ if (e)
+ dbus_error_free(e);
+ }
+
+ set_free(following);
+ }
+
/* Finally, recursively add in all dependencies. */
if (type == JOB_START || type == JOB_RELOAD_OR_START) {
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
- goto fail;
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
+
+ SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
+
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, e, NULL)) < 0 && r != -EBADR) {
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, e, NULL)) < 0) {
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
if (e)
}
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, e, NULL)) < 0) {
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, e, NULL)) < 0) {
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
if (e)
}
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
- goto fail;
+ if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, e, NULL)) < 0) {
+
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, e, NULL)) < 0 && r != -EBADR) {
+ if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, e, NULL)) < 0) {
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
if (e)
}
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, e, NULL)) < 0 && r != -EBADR)
- goto fail;
+ if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, e, NULL)) < 0) {
+
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
- goto fail;
+ if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, e, NULL)) < 0) {
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
+
+ if (e)
+ dbus_error_free(e);
+ }
} else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
- if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
- goto fail;
+ if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
+
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
+
+ SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
+ if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
+
+ if (r != -EBADR)
+ goto fail;
+
+ if (e)
+ dbus_error_free(e);
+ }
}
/* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
continue;
/* No need to stop inactive jobs */
- if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
+ if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
continue;
/* Is there already something listed for this? */
if (hashmap_get(m->transaction_jobs, u))
continue;
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, NULL, NULL)) < 0)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, NULL, NULL)) < 0)
log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
}
log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
- if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, e, &ret)) < 0) {
+ if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
transaction_abort(m);
return r;
}
transaction_abort(m);
while ((j = hashmap_first(m->jobs)))
- job_free(j);
+ job_finish_and_invalidate(j, JOB_CANCELED);
}
unsigned manager_dispatch_run_queue(Manager *m) {
dbus_error_init(&error);
- log_info("Activating special unit %s", name);
+ log_debug("Activating special unit %s", name);
if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
}
/* Run the exit target if there is one, if not, just exit. */
- if (manager_start_target(m, SPECIAL_EXIT_SERVICE, JOB_REPLACE) < 0) {
+ if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
m->exit_code = MANAGER_EXIT;
return 0;
}
if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
log_info("Trying to reconnect to bus...");
- bus_init(m);
+ bus_init(m, true);
}
if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
break;
default: {
- static const char * const table[] = {
+ /* Starting SIGRTMIN+0 */
+ static const char * const target_table[] = {
[0] = SPECIAL_DEFAULT_TARGET,
[1] = SPECIAL_RESCUE_TARGET,
[2] = SPECIAL_EMERGENCY_TARGET,
[3] = SPECIAL_HALT_TARGET,
[4] = SPECIAL_POWEROFF_TARGET,
- [5] = SPECIAL_REBOOT_TARGET
+ [5] = SPECIAL_REBOOT_TARGET,
+ [6] = SPECIAL_KEXEC_TARGET
+ };
+
+ /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
+ static const ManagerExitCode code_table[] = {
+ [0] = MANAGER_HALT,
+ [1] = MANAGER_POWEROFF,
+ [2] = MANAGER_REBOOT,
+ [3] = MANAGER_KEXEC
};
if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
- (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(table)) {
- manager_start_target(m, table[sfsi.ssi_signo - SIGRTMIN],
+ (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
+ manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
(sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
break;
}
- log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
+ if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
+ (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
+ m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
+ break;
+ }
+
+ switch (sfsi.ssi_signo - SIGRTMIN) {
+
+ case 20:
+ log_debug("Enabling showing of status.");
+ m->show_status = true;
+ break;
+
+ case 21:
+ log_debug("Disabling showing of status.");
+ m->show_status = false;
+ break;
+
+ default:
+ log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
+ }
}
}
}
assert(w = ev->data.ptr);
+ if (w->type == WATCH_INVALID)
+ return 0;
+
switch (w->type) {
case WATCH_SIGNAL:
mount_fd_event(m, ev->events);
break;
+ case WATCH_SWAP:
+ /* Some swap table change, intended for the swap subsystem */
+ swap_fd_event(m, ev->events);
+ break;
+
case WATCH_UDEV:
/* Some notification from udev, intended for the device subsystem */
device_fd_event(m, ev->events);
break;
default:
+ log_error("event type=%i", w->type);
assert_not_reached("Unknown epoll event type.");
}
if (manager_dispatch_dbus_queue(m) > 0)
continue;
+ if (swap_dispatch_reload(m) > 0)
+ continue;
+
if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
if (errno == EINTR)
union sockaddr_union sa;
int n = 0;
char *message = NULL;
- ssize_t r;
/* Don't generate plymouth events if the service was already
* started and we're just deserializing */
zero(sa);
sa.sa.sa_family = AF_UNIX;
- strncpy(sa.un.sun_path+1, "/ply-boot-protocol", sizeof(sa.un.sun_path)-1);
- if (connect(fd, &sa.sa, sizeof(sa.un)) < 0) {
+ strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
+ if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
if (errno != EPIPE &&
errno != EAGAIN &&
}
errno = 0;
- if ((r = write(fd, message, n + 1)) != n + 1) {
+ if (write(fd, message, n + 1) != n + 1) {
if (errno != EPIPE &&
errno != EAGAIN &&
assert(f);
assert(fds);
- fprintf(f, "startup-timestamp=%llu %llu\n\n",
- (unsigned long long) m->startup_timestamp.realtime,
- (unsigned long long) m->startup_timestamp.monotonic);
+ dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
+ dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
+ dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
+
+ fputc('\n', f);
HASHMAP_FOREACH_KEY(u, t, m->units, i) {
if (u->meta.id != t)
if (l[0] == 0)
break;
- if (startswith(l, "startup-timestamp=")) {
- unsigned long long a, b;
-
- if (sscanf(l+18, "%lli %llu", &a, &b) != 2)
- log_debug("Failed to parse startup timestamp value %s", l+18);
- else {
- m->startup_timestamp.realtime = a;
- m->startup_timestamp.monotonic = b;
- }
- } else
+ if (startswith(l, "initrd-timestamp="))
+ dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
+ else if (startswith(l, "startup-timestamp="))
+ dual_timestamp_deserialize(l+18, &m->startup_timestamp);
+ else if (startswith(l, "finish-timestamp="))
+ dual_timestamp_deserialize(l+17, &m->finish_timestamp);
+ else
log_debug("Unknown serialization item '%s'", l);
}
/* From here on there is no way back. */
manager_clear_jobs_and_units(m);
+ manager_undo_generators(m);
/* Find new unit paths */
lookup_paths_free(&m->lookup_paths);
if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
r = q;
+ manager_run_generators(m);
+
+ manager_build_unit_path_cache(m);
+
m->n_deserializing ++;
/* First, enumerate what we can from all config files */
unit_reset_failed(u);
}
-int manager_set_console(Manager *m, const char *console) {
- char *c;
-
- assert(m);
-
- if (!(c = strdup(console)))
- return -ENOMEM;
-
- free(m->console);
- m->console = c;
-
- log_debug("Using kernel console %s", c);
-
- return 0;
-}
-
bool manager_unit_pending_inactive(Manager *m, const char *name) {
Unit *u;
}
void manager_check_finished(Manager *m) {
- char userspace[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
+ char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
assert(m);
dual_timestamp_get(&m->finish_timestamp);
- if (m->running_as == MANAGER_SYSTEM)
- log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
- format_timespan(kernel, sizeof(kernel),
- m->startup_timestamp.monotonic),
- format_timespan(userspace, sizeof(userspace),
- m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
- format_timespan(sum, sizeof(sum),
- m->finish_timestamp.monotonic));
- else
+ if (m->running_as == MANAGER_SYSTEM) {
+ if (dual_timestamp_is_set(&m->initrd_timestamp)) {
+ log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
+ format_timespan(kernel, sizeof(kernel),
+ m->initrd_timestamp.monotonic),
+ format_timespan(initrd, sizeof(initrd),
+ m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic),
+ format_timespan(userspace, sizeof(userspace),
+ m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
+ format_timespan(sum, sizeof(sum),
+ m->finish_timestamp.monotonic));
+ } else
+ log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
+ format_timespan(kernel, sizeof(kernel),
+ m->startup_timestamp.monotonic),
+ format_timespan(userspace, sizeof(userspace),
+ m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
+ format_timespan(sum, sizeof(sum),
+ m->finish_timestamp.monotonic));
+ } else
log_debug("Startup finished in %s.",
format_timespan(userspace, sizeof(userspace),
m->finish_timestamp.monotonic - m->startup_timestamp.monotonic));
}
+void manager_run_generators(Manager *m) {
+ DIR *d = NULL;
+ const char *generator_path;
+ const char *argv[3];
+
+ assert(m);
+
+ generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
+ if (!(d = opendir(generator_path))) {
+
+ if (errno == ENOENT)
+ return;
+
+ log_error("Failed to enumerate generator directory: %m");
+ return;
+ }
+
+ if (!m->generator_unit_path) {
+ char *p;
+ char system_path[] = "/dev/.systemd/generator-XXXXXX",
+ user_path[] = "/tmp/systemd-generator-XXXXXX";
+
+ if (!(p = mkdtemp(m->running_as == MANAGER_SYSTEM ? system_path : user_path))) {
+ log_error("Failed to generate generator directory: %m");
+ goto finish;
+ }
+
+ if (!(m->generator_unit_path = strdup(p))) {
+ log_error("Failed to allocate generator unit path.");
+ goto finish;
+ }
+ }
+
+ argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
+ argv[1] = m->generator_unit_path;
+ argv[2] = NULL;
+
+ execute_directory(generator_path, d, (char**) argv);
+
+ if (rmdir(m->generator_unit_path) >= 0) {
+ /* Uh? we were able to remove this dir? I guess that
+ * means the directory was empty, hence let's shortcut
+ * this */
+
+ free(m->generator_unit_path);
+ m->generator_unit_path = NULL;
+ goto finish;
+ }
+
+ if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
+ char **l;
+
+ if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
+ log_error("Failed to add generator directory to unit search path: %m");
+ goto finish;
+ }
+
+ strv_free(m->lookup_paths.unit_path);
+ m->lookup_paths.unit_path = l;
+
+ log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
+ }
+
+finish:
+ if (d)
+ closedir(d);
+}
+
+void manager_undo_generators(Manager *m) {
+ assert(m);
+
+ if (!m->generator_unit_path)
+ return;
+
+ strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
+ rm_rf(m->generator_unit_path, false, true);
+
+ free(m->generator_unit_path);
+ m->generator_unit_path = NULL;
+}
+
+int manager_set_default_controllers(Manager *m, char **controllers) {
+ char **l;
+
+ assert(m);
+
+ if (!(l = strv_copy(controllers)))
+ return -ENOMEM;
+
+ strv_free(m->default_controllers);
+ m->default_controllers = l;
+
+ return 0;
+}
+
static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
[MANAGER_SYSTEM] = "system",
- [MANAGER_SESSION] = "session"
+ [MANAGER_USER] = "user"
};
DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);