X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=manager.c;h=682c7e7f26b451172563644618eff712ef9fe723;hp=97c99e50465538f100b39f46c4a53cce70956c3a;hb=0301abf48ed3be921c33d409c73b554435cf6378;hpb=223dabab498a89e008a2a5cebb24bd168c80bae5 diff --git a/manager.c b/manager.c index 97c99e504..682c7e7f2 100644 --- a/manager.c +++ b/manager.c @@ -2,29 +2,57 @@ #include #include +#include +#include +#include +#include +#include +#include +#include #include "manager.h" #include "hashmap.h" #include "macro.h" #include "strv.h" -#include "load-fragment.h" +#include "log.h" Manager* manager_new(void) { Manager *m; + sigset_t mask; + struct epoll_event ev; if (!(m = new0(Manager, 1))) return NULL; - if (!(m->names = hashmap_new(string_hash_func, string_compare_func))) + m->signal_fd = m->epoll_fd = -1; + + if (!(m->units = hashmap_new(string_hash_func, string_compare_func))) goto fail; if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func))) goto fail; - if (!(m->jobs_to_add = hashmap_new(trivial_hash_func, trivial_compare_func))) + if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func))) + goto fail; + + if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func))) goto fail; - if (!(m->jobs_to_remove = set_new(trivial_hash_func, trivial_compare_func))) + if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0) + goto fail; + + assert_se(sigemptyset(&mask) == 0); + assert_se(sigaddset(&mask, SIGCHLD) == 0); + assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0); + + if ((m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0) + goto fail; + + zero(ev); + ev.events = EPOLLIN; + ev.data.fd = m->signal_fd; + + if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_fd, &ev) < 0) goto fail; return m; @@ -35,289 +63,998 @@ fail: } void manager_free(Manager *m) { - Name *n; + Unit *u; + Job *j; assert(m); - while ((n = hashmap_first(m->names))) - name_free(n); + while ((j = hashmap_first(m->transaction_jobs))) + job_free(j); - hashmap_free(m->names); - hashmap_free(m->jobs); + while ((u = hashmap_first(m->units))) + unit_free(u); - /* FIXME: This is incomplete */ + hashmap_free(m->units); + hashmap_free(m->jobs); + hashmap_free(m->transaction_jobs); + hashmap_free(m->watch_pids); - hashmap_free(m->jobs_to_add); - set_free(m->jobs_to_remove); + if (m->epoll_fd >= 0) + close_nointr(m->epoll_fd); + if (m->signal_fd >= 0) + close_nointr(m->signal_fd); free(m); } -int manager_add_job(Manager *m, JobType type, Name *name, JobMode mode, Job **_ret) { - Job *ret, *other; - void *state; - Name *dep; - int r; +static void transaction_delete_job(Manager *m, Job *j) { + assert(m); + assert(j); + + /* Deletes one job from the transaction */ + + manager_transaction_unlink_job(m, j); + + if (!j->installed) + job_free(j); +} + +static void transaction_delete_unit(Manager *m, Unit *u) { + Job *j; + + /* Deletes all jobs associated with a certain unit from the + * transaction */ + + while ((j = hashmap_get(m->transaction_jobs, u))) + transaction_delete_job(m, j); +} + +static void transaction_abort(Manager *m) { + Job *j; assert(m); - assert(type < _JOB_TYPE_MAX); - assert(name); - assert(mode < _JOB_MODE_MAX); - assert(_ret); - /* Check for conflicts, first against the jobs we shall - * create */ - if ((other = hashmap_get(m->jobs_to_add, name))) { + while ((j = hashmap_first(m->transaction_jobs))) + if (j->installed) + transaction_delete_job(m, j); + else + job_free(j); - if (other->type != type) - return -EEXIST; + assert(hashmap_isempty(m->transaction_jobs)); + assert(!m->transaction_anchor); +} - } else if (name->meta.job) { +static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) { + JobDependency *l; - if (name->meta.job->type != type) { + assert(m); - if (mode == JOB_FAIL) - return -EEXIST; + /* A recursive sweep through the graph that marks all units + * that matter to the anchor job, i.e. are directly or + * indirectly a dependency of the anchor job via paths that + * are fully marked as mattering. */ - if ((r = set_put(m->jobs_to_remove, name->meta.job)) < 0) - return r; - } + if (j) + l = j->subject_list; + else + l = m->transaction_anchor; + + LIST_FOREACH(subject, l, l) { + + /* This link does not matter */ + if (!l->matters) + continue; + + /* This unit has already been marked */ + if (l->object->generation == generation) + continue; + + l->object->matters_to_anchor = true; + l->object->generation = generation; + + transaction_find_jobs_that_matter_to_anchor(m, l->object, generation); } +} - if (!(ret = job_new(m, type, name))) - return -ENOMEM; +static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) { + JobDependency *l, *last; - if ((r = hashmap_put(m->jobs_to_add, name, ret)) < 0) - goto fail; + assert(j); + assert(other); + assert(j->unit == other->unit); + assert(!j->installed); - if (type == JOB_START || type == JOB_VERIFY_STARTED || type == JOB_RESTART_FINISH) { - SET_FOREACH(dep, ret->name->meta.requires, state) - if ((r = manager_add_job(m, type, dep, mode, NULL)) < 0) - goto fail; - SET_FOREACH(dep, ret->name->meta.soft_requires, state) - if ((r = manager_add_job(m, type, dep, JOB_FAIL, NULL)) < 0) - goto fail; - SET_FOREACH(dep, ret->name->meta.wants, state) - if ((r = manager_add_job(m, type, dep, JOB_FAIL, NULL)) < 0) - goto fail; - SET_FOREACH(dep, ret->name->meta.requisite, state) - if ((r = manager_add_job(m, JOB_VERIFY_STARTED, dep, mode, NULL)) < 0) - goto fail; - SET_FOREACH(dep, ret->name->meta.soft_requisite, state) - if ((r = manager_add_job(m, JOB_VERIFY_STARTED, dep, JOB_FAIL, NULL)) < 0) - goto fail; - SET_FOREACH(dep, ret->name->meta.conflicts, state) - if ((r = manager_add_job(m, type, dep, mode, NULL)) < 0) - goto fail; - - } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) { - - SET_FOREACH(dep, ret->name->meta.required_by, state) - if ((r = manager_add_job(m, type, dep, mode, NULL)) < 0) - goto fail; + /* Merges 'other' into 'j' and then deletes j. */ + + j->type = t; + j->state = JOB_WAITING; + j->forced = j->forced || other->forced; + + j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; + + /* Patch us in as new owner of the JobDependency objects */ + last = NULL; + LIST_FOREACH(subject, l, other->subject_list) { + assert(l->subject == other); + l->subject = j; + last = l; } - if (_ret) - *_ret = ret; + /* Merge both lists */ + if (last) { + last->subject_next = j->subject_list; + if (j->subject_list) + j->subject_list->subject_prev = last; + j->subject_list = other->subject_list; + } - return 0; + /* Patch us in as new owner of the JobDependency objects */ + last = NULL; + LIST_FOREACH(object, l, other->object_list) { + assert(l->object == other); + l->object = j; + last = l; + } -fail: - job_free(ret); + /* Merge both lists */ + if (last) { + last->object_next = j->object_list; + if (j->object_list) + j->object_list->object_prev = last; + j->object_list = other->object_list; + } - return r; + /* Kill the other job */ + other->subject_list = NULL; + other->object_list = NULL; + transaction_delete_job(m, other); } +static int delete_one_unmergeable_job(Manager *m, Job *j) { + Job *k; + + assert(j); + + /* Tries to delete one item in the linked list + * j->transaction_next->transaction_next->... that conflicts + * whith another one, in an attempt to make an inconsistent + * transaction work. */ + + /* We rely here on the fact that if a merged with b does not + * merge with c, either a or b merge with c neither */ + LIST_FOREACH(transaction, j, j) + LIST_FOREACH(transaction, k, j->transaction_next) { + Job *d; + + /* Is this one mergeable? Then skip it */ + if (job_type_is_mergeable(j->type, k->type)) + continue; + + /* Ok, we found two that conflict, let's see if we can + * drop one of them */ + if (!j->matters_to_anchor) + d = j; + else if (!k->matters_to_anchor) + d = k; + else + return -ENOEXEC; + + /* Ok, we can drop one, so let's do so. */ + log_debug("Try to fix job merging by deleting job %s/%s", unit_id(d->unit), job_type_to_string(d->type)); + transaction_delete_job(m, d); + return 0; + } -Job *manager_get_job(Manager *m, uint32_t id) { - assert(m); - - return hashmap_get(m->jobs, UINT32_TO_PTR(id)); + return -EINVAL; } -Name *manager_get_name(Manager *m, const char *name) { +static int transaction_merge_jobs(Manager *m) { + Job *j; + Iterator i; + int r; + assert(m); - assert(name); - return hashmap_get(m->names, name); + /* First step, check whether any of the jobs for one specific + * task conflict. If so, try to drop one of them. */ + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + JobType t; + Job *k; + + t = j->type; + LIST_FOREACH(transaction, k, j->transaction_next) { + if ((r = job_type_merge(&t, k->type)) >= 0) + continue; + + /* OK, we could not merge all jobs for this + * action. Let's see if we can get rid of one + * of them */ + + if ((r = delete_one_unmergeable_job(m, j)) >= 0) + /* Ok, we managed to drop one, now + * let's ask our callers to call us + * again after garbage collecting */ + return -EAGAIN; + + /* We couldn't merge anything. Failure */ + return r; + } + } + + /* Second step, merge the jobs. */ + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + JobType t = j->type; + Job *k; + + /* Merge all transactions */ + LIST_FOREACH(transaction, k, j->transaction_next) + assert_se(job_type_merge(&t, k->type) == 0); + + /* If an active job is mergeable, merge it too */ + if (j->unit->meta.job) + job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */ + + while ((k = j->transaction_next)) { + if (j->installed) { + transaction_merge_and_delete_job(m, k, j, t); + j = k; + } else + transaction_merge_and_delete_job(m, j, k, t); + } + + assert(!j->transaction_next); + assert(!j->transaction_prev); + } + + return 0; } -static int detect_type(Name *name) { - char **n; +static bool unit_matters_to_anchor(Unit *u, Job *j) { + assert(u); + assert(!j->transaction_prev); - assert(name); + /* Checks whether at least one of the jobs for this unit + * matters to the anchor. */ - name->meta.type = _NAME_TYPE_INVALID; + LIST_FOREACH(transaction, j, j) + if (j->matters_to_anchor) + return true; - STRV_FOREACH(n, name->meta.names) { - NameType t; + return false; +} - if ((t = name_type_from_string(*n)) == _NAME_TYPE_INVALID) - return -EINVAL; +static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation) { + Iterator i; + Unit *u; + int r; - if (name->meta.type == _NAME_TYPE_INVALID) { - name->meta.type = t; - continue; + assert(m); + assert(j); + assert(!j->transaction_prev); + + /* Does a recursive sweep through the ordering graph, looking + * for a cycle. If we find cycle we try to break it. */ + + /* Did we find a cycle? */ + if (j->marker && j->generation == generation) { + Job *k; + + /* So, we already have been here. We have a + * cycle. Let's try to break it. We go backwards in + * our path and try to find a suitable job to + * remove. We use the marker to find our way back, + * since smart how we are we stored our way back in + * there. */ + + for (k = from; k; k = (k->generation == generation ? k->marker : NULL)) { + + if (!k->installed && + !unit_matters_to_anchor(k->unit, k)) { + /* Ok, we can drop this one, so let's + * do so. */ + log_debug("Breaking order cycle by deleting job %s/%s", unit_id(k->unit), job_type_to_string(k->type)); + transaction_delete_unit(m, k->unit); + return -EAGAIN; + } + + /* Check if this in fact was the beginning of + * the cycle */ + if (k == j) + break; } - if (name->meta.type != t) - return -EINVAL; + return -ENOEXEC; + } + + /* Make the marker point to where we come from, so that we can + * find our way backwards if we want to break a cycle */ + j->marker = from; + j->generation = generation; + + /* We assume that the the dependencies are bidirectional, and + * hence can ignore UNIT_AFTER */ + SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) { + Job *o; + + /* Is there a job for this unit? */ + if (!(o = hashmap_get(m->transaction_jobs, u))) + + /* Ok, there is no job for this in the + * transaction, but maybe there is already one + * running? */ + if (!(o = u->meta.job)) + continue; + + if ((r = transaction_verify_order_one(m, o, j, generation)) < 0) + return r; } return 0; } -static int service_load_sysv(Service *s) { - assert(s); +static int transaction_verify_order(Manager *m, unsigned *generation) { + Job *j; + int r; + Iterator i; + + assert(m); + assert(generation); + + /* Check if the ordering graph is cyclic. If it is, try to fix + * that up by dropping one of the jobs. */ - /* Load service data from SysV init scripts, preferably with - * LSB headers ... */ + HASHMAP_FOREACH(j, m->transaction_jobs, i) + if ((r = transaction_verify_order_one(m, j, NULL, (*generation)++)) < 0) + return r; return 0; } -static int name_load_fstab(Name *n) { - assert(n); - assert(n->meta.type == NAME_MOUNT || n->meta.type == NAME_AUTOMOUNT); +static void transaction_collect_garbage(Manager *m) { + bool again; + + assert(m); + + /* Drop jobs that are not required by any other job */ + + do { + Iterator i; + Job *j; - /* Load mount data from /etc/fstab */ + again = false; + + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + if (j->object_list) + continue; + + log_debug("Garbage collecting job %s/%s", unit_id(j->unit), job_type_to_string(j->type)); + transaction_delete_job(m, j); + again = true; + break; + } + + } while (again); +} + +static int transaction_is_destructive(Manager *m, JobMode mode) { + Iterator i; + Job *j; + + assert(m); + + /* Checks whether applying this transaction means that + * existing jobs would be replaced */ + + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + + /* Assume merged */ + assert(!j->transaction_prev); + assert(!j->transaction_next); + + if (j->unit->meta.job && + j->unit->meta.job != j && + !job_type_is_superset(j->type, j->unit->meta.job->type)) + return -EEXIST; + } return 0; } -static int snapshot_load(Snapshot *s) { - assert(s); +static void transaction_minimize_impact(Manager *m) { + bool again; + assert(m); + + /* Drops all unnecessary jobs that reverse already active jobs + * or that stop a running service. */ + + do { + Job *j; + Iterator i; + + again = false; + + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + LIST_FOREACH(transaction, j, j) { + + /* If it matters, we shouldn't drop it */ + if (j->matters_to_anchor) + continue; + + /* Would this stop a running service? + * Would this change an existing job? + * If so, let's drop this entry */ + if ((j->type != JOB_STOP || UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(j->unit))) && + (!j->unit->meta.job || job_type_is_conflicting(j->type, j->unit->meta.job->state))) + continue; + + /* Ok, let's get rid of this */ + log_debug("Deleting %s/%s to minimize impact", unit_id(j->unit), job_type_to_string(j->type)); + transaction_delete_job(m, j); + again = true; + break; + } + + if (again) + break; + } + + } while (again); +} + +static int transaction_apply(Manager *m, JobMode mode) { + Iterator i; + Job *j; + int r; + + /* Moves the transaction jobs to the set of active jobs */ + + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + /* Assume merged */ + assert(!j->transaction_prev); + assert(!j->transaction_next); + + if (j->installed) + continue; + + if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0) + goto rollback; + } - /* Load snapshots from disk */ + while ((j = hashmap_steal_first(m->transaction_jobs))) { + if (j->installed) + continue; + + if (j->unit->meta.job) + job_free(j->unit->meta.job); + + j->unit->meta.job = j; + j->installed = true; + + /* We're fully installed. Now let's free data we don't + * need anymore. */ + + assert(!j->transaction_next); + assert(!j->transaction_prev); + + while (j->subject_list) + job_dependency_free(j->subject_list); + while (j->object_list) + job_dependency_free(j->object_list); + } + + m->transaction_anchor = NULL; return 0; + +rollback: + + HASHMAP_FOREACH(j, m->transaction_jobs, i) { + if (j->installed) + continue; + + hashmap_remove(m->jobs, UINT32_TO_PTR(j->id)); + } + + return r; } -static int name_load_dropin(Name *n) { - assert(n); +static int transaction_activate(Manager *m, JobMode mode) { + int r; + unsigned generation = 1; + + assert(m); + + /* This applies the changes recorded in transaction_jobs to + * the actual list of jobs, if possible. */ + + /* First step: figure out which jobs matter */ + transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++); + + /* Second step: Try not to stop any running services if + * we don't have to. Don't try to reverse running + * jobs if we don't have to. */ + transaction_minimize_impact(m); - /* Load dependencies from drop-in directories */ + for (;;) { + /* Third step: Let's remove unneeded jobs that might + * be lurking. */ + transaction_collect_garbage(m); + + /* Fourth step: verify order makes sense and correct + * cycles if necessary and possible */ + if ((r = transaction_verify_order(m, &generation)) >= 0) + break; + + if (r != -EAGAIN) + goto rollback; + + /* Let's see if the resulting transaction ordering + * graph is still cyclic... */ + } + + for (;;) { + /* Fifth step: let's drop unmergeable entries if + * necessary and possible, merge entries we can + * merge */ + if ((r = transaction_merge_jobs(m)) >= 0) + break; + + if (r != -EAGAIN) + goto rollback; + + /* Sixth step: an entry got dropped, let's garbage + * collect its dependencies. */ + transaction_collect_garbage(m); + + /* Let's see if the resulting transaction still has + * unmergeable entries ... */ + } + + /* Seventh step: check whether we can actually apply this */ + if (mode == JOB_FAIL) + if ((r = transaction_is_destructive(m, mode)) < 0) + goto rollback; + + /* Eights step: apply changes */ + if ((r = transaction_apply(m, mode)) < 0) + goto rollback; + + assert(hashmap_isempty(m->transaction_jobs)); + assert(!m->transaction_anchor); return 0; + +rollback: + transaction_abort(m); + return r; } -static int load(Name *name) { +static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool force, bool *is_new) { + Job *j, *f; int r; - assert(name); + assert(m); + assert(unit); - if (name->meta.state != NAME_STUB) - return 0; + /* Looks for an axisting prospective job and returns that. If + * it doesn't exist it is created and added to the prospective + * jobs list. */ - if ((r = detect_type(name)) < 0) - return r; + f = hashmap_get(m->transaction_jobs, unit); - if (name->meta.type == NAME_SERVICE) { + LIST_FOREACH(transaction, j, f) { + assert(j->unit == unit); - /* Load a .service file */ - if ((r = name_load_fragment(name)) == 0) - goto finish; + if (j->type == type) { + if (is_new) + *is_new = false; + return j; + } + } - /* Load a classic init script */ - if (r == -ENOENT) - if ((r = service_load_sysv(SERVICE(name))) == 0) - goto finish; + if (unit->meta.job && unit->meta.job->type == type) + j = unit->meta.job; + else if (!(j = job_new(m, type, unit))) + return NULL; + + j->generation = 0; + j->marker = NULL; + j->matters_to_anchor = false; + j->forced = force; + + LIST_PREPEND(Job, transaction, f, j); + + if ((r = hashmap_replace(m->transaction_jobs, unit, f)) < 0) { + job_free(j); + return NULL; + } + + if (is_new) + *is_new = true; + + return j; +} + +void manager_transaction_unlink_job(Manager *m, Job *j) { + assert(m); + assert(j); - } else if (name->meta.type == NAME_MOUNT || - name->meta.type == NAME_AUTOMOUNT) { + if (j->transaction_prev) + j->transaction_prev->transaction_next = j->transaction_next; + else if (j->transaction_next) + hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next); + else + hashmap_remove_value(m->transaction_jobs, j->unit, j); - if ((r = name_load_fstab(name)) == 0) - goto finish; + if (j->transaction_next) + j->transaction_next->transaction_prev = j->transaction_prev; - } else if (name->meta.type == NAME_SNAPSHOT) { + j->transaction_prev = j->transaction_next = NULL; - if ((r = snapshot_load(SNAPSHOT(name))) == 0) - goto finish; + while (j->subject_list) + job_dependency_free(j->subject_list); - } else { - if ((r = name_load_fragment(name)) == 0) - goto finish; + while (j->object_list) { + Job *other = j->object_list->matters ? j->object_list->subject : NULL; + + job_dependency_free(j->object_list); + + if (other) { + log_debug("Deleting job %s/%s as dependency of job %s/%s", + unit_id(other->unit), job_type_to_string(other->type), + unit_id(j->unit), job_type_to_string(j->type)); + transaction_delete_job(m, other); + } } +} + +static int transaction_add_job_and_dependencies(Manager *m, JobType type, Unit *unit, Job *by, bool matters, bool force, Job **_ret) { + Job *ret; + Iterator i; + Unit *dep; + int r; + bool is_new; - name->meta.state = NAME_FAILED; + assert(m); + assert(type < _JOB_TYPE_MAX); + assert(unit); + + if (unit->meta.load_state != UNIT_LOADED) + return -EINVAL; + + if (!unit_job_is_applicable(unit, type)) + return -EBADR; + + /* First add the job. */ + if (!(ret = transaction_add_one_job(m, type, unit, force, &is_new))) + return -ENOMEM; + + /* Then, add a link to the job. */ + if (!job_dependency_new(by, ret, matters)) + return -ENOMEM; + + if (is_new) { + /* Finally, recursively add in all dependencies. */ + if (type == JOB_START || type == JOB_RELOAD_OR_START) { + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, force, NULL)) < 0 && r != -EBADR) + goto fail; + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_SOFT_REQUIRES], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !force, force, NULL)) < 0 && r != -EBADR) + goto fail; + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, force, NULL)) < 0 && r != -EBADR) + goto fail; + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, force, NULL)) < 0 && r != -EBADR) + goto fail; + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_SOFT_REQUISITE], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !force, force, NULL)) < 0 && r != -EBADR) + goto fail; + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i) + if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, force, NULL)) < 0 && r != -EBADR) + goto fail; + + } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) { + + SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i) + if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, force, NULL)) < 0 && r != -EBADR) + goto fail; + } + + /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */ + } + + return 0; + +fail: return r; +} + +int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool force, Job **_ret) { + int r; + Job *ret; + + assert(m); + assert(type < _JOB_TYPE_MAX); + assert(unit); + assert(mode < _JOB_MODE_MAX); -finish: - if ((r = name_load_dropin(name)) < 0) + if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, force, &ret))) { + transaction_abort(m); return r; + } + + if ((r = transaction_activate(m, mode)) < 0) + return r; + + if (_ret) + *_ret = ret; - name->meta.state = NAME_LOADED; return 0; } -static int dispatch_load_queue(Manager *m) { +Job *manager_get_job(Manager *m, uint32_t id) { + assert(m); + + return hashmap_get(m->jobs, UINT32_TO_PTR(id)); +} + +Unit *manager_get_unit(Manager *m, const char *name) { + assert(m); + assert(name); + + return hashmap_get(m->units, name); +} + +static void dispatch_load_queue(Manager *m) { Meta *meta; assert(m); /* Make sure we are not run recursively */ if (m->dispatching_load_queue) - return 0; + return; m->dispatching_load_queue = true; - /* Dispatches the load queue. Takes a name from the queue and + /* Dispatches the load queue. Takes a unit from the queue and * tries to load its data until the queue is empty */ while ((meta = m->load_queue)) { - load(NAME(meta)); - LIST_REMOVE(Meta, m->load_queue, meta); + assert(meta->in_load_queue); + + unit_load(UNIT(meta)); } m->dispatching_load_queue = false; - - return 0; } -int manager_load_name(Manager *m, const char *name, Name **_ret) { - Name *ret; - NameType t; +int manager_load_unit(Manager *m, const char *path, Unit **_ret) { + Unit *ret; int r; + const char *name; assert(m); - assert(name); + assert(path); assert(_ret); - if (!name_is_valid(name)) - return -EINVAL; - /* This will load the service information files, but not actually - * start any services or anything */ + * start any services or anything. */ - if ((ret = manager_get_name(m, name))) - goto finish; + name = file_name_from_path(path); - if ((t = name_type_from_string(name)) == _NAME_TYPE_INVALID) - return -EINVAL; + if ((ret = manager_get_unit(m, name))) { + *_ret = ret; + return 0; + } - if (!(ret = name_new(m))) + if (!(ret = unit_new(m))) return -ENOMEM; - ret->meta.type = t; - - if (!(ret->meta.names = strv_new(name, NULL))) { - name_free(ret); - return -ENOMEM; + if (is_path(path)) { + if (!(ret->meta.load_path = strdup(path))) { + unit_free(ret); + return -ENOMEM; + } } - if ((r = name_link(ret)) < 0) { - name_free(ret); + if ((r = unit_add_name(ret, name)) < 0) { + unit_free(ret); return r; } - /* At this point the new entry is created and linked. However, - * not loaded. Now load this entry and all its dependencies - * recursively */ - + unit_add_to_load_queue(ret); dispatch_load_queue(m); -finish: - *_ret = ret; return 0; } + +void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) { + Iterator i; + Job *j; + + assert(s); + assert(f); + + HASHMAP_FOREACH(j, s->jobs, i) + job_dump(j, f, prefix); +} + +void manager_dump_units(Manager *s, FILE *f, const char *prefix) { + Iterator i; + Unit *u; + const char *t; + + assert(s); + assert(f); + + HASHMAP_FOREACH_KEY(u, t, s->units, i) + if (unit_id(u) == t) + unit_dump(u, f, prefix); +} + +void manager_clear_jobs(Manager *m) { + Job *j; + + assert(m); + + transaction_abort(m); + + while ((j = hashmap_first(m->jobs))) + job_free(j); +} + +void manager_dispatch_run_queue(Manager *m) { + Job *j; + + if (m->dispatching_run_queue) + return; + + m->dispatching_run_queue = true; + + while ((j = m->run_queue)) { + assert(j->installed); + assert(j->in_run_queue); + + job_run_and_invalidate(j); + } + + m->dispatching_run_queue = false; +} + +static int manager_dispatch_sigchld(Manager *m) { + assert(m); + + for (;;) { + siginfo_t si; + Unit *u; + + zero(si); + if (waitid(P_ALL, 0, &si, WNOHANG) < 0) + return -errno; + + if (si.si_pid == 0) + break; + + if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED) + continue; + + if (!(u = hashmap_remove(m->watch_pids, UINT32_TO_PTR(si.si_pid)))) + continue; + + UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status); + } + + return 0; +} + +static int manager_process_signal_fd(Manager *m) { + ssize_t n; + struct signalfd_siginfo sfsi; + bool sigchld = false; + + assert(m); + + for (;;) { + if ((n = read(m->signal_fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) { + + if (n >= 0) + return -EIO; + + if (errno == EAGAIN) + return 0; + + return -errno; + } + + if (sfsi.ssi_signo == SIGCHLD) + sigchld = true; + } + + if (sigchld) + return manager_dispatch_sigchld(m); + + return 0; +} + +static int process_event(Manager *m, struct epoll_event *ev) { + int r; + + assert(m); + assert(ev); + + switch (ev->data.u32) { + + case MANAGER_SIGNAL: + assert(ev->data.fd == m->signal_fd); + + /* An incoming signal? */ + if (ev->events != POLLIN) + return -EINVAL; + + if ((r = manager_process_signal_fd(m)) < 0) + return -r; + + break; + + case MANAGER_FD: { + Unit *u; + + /* Some fd event, to be dispatched to the units */ + assert_se(u = ev->data.ptr); + UNIT_VTABLE(u)->fd_event(u, ev->data.fd, ev->events); + break; + } + + case MANAGER_TIMER: { + Unit *u; + uint64_t v; + ssize_t k; + + /* Some timer event, to be dispatched to the units */ + if ((k = read(ev->data.fd, &v, sizeof(v))) != sizeof(v)) { + + if (k < 0 && (errno == EINTR || errno == EAGAIN)) + break; + + return k < 0 ? -errno : -EIO; + } + + assert_se(u = ev->data.ptr); + UNIT_VTABLE(u)->timer_event(u, ev->data.fd, v); + break; + } + + default: + assert_not_reached("Unknown epoll event type."); + } + + return 0; +} + +int manager_loop(Manager *m) { + int r; + + assert(m); + + for (;;) { + struct epoll_event events[32]; + int n, i; + + manager_dispatch_run_queue(m); + + if ((n = epoll_wait(m->epoll_fd, events, ELEMENTSOF(events), -1)) < 0) { + + if (errno == -EINTR) + continue; + + return -errno; + } + + for (i = 0; i < n; i++) + if ((r = process_event(m, events + i)) < 0) + return r; + } +}