#include <assert.h>
#include <errno.h>
#include <string.h>
+#include <sys/epoll.h>
+#include <signal.h>
+#include <sys/signalfd.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/poll.h>
#include "manager.h"
#include "hashmap.h"
Manager* manager_new(void) {
Manager *m;
+ sigset_t mask;
+ struct epoll_event ev;
if (!(m = new0(Manager, 1)))
return NULL;
+ m->signal_fd = m->epoll_fd = -1;
+
if (!(m->names = hashmap_new(string_hash_func, string_compare_func)))
goto fail;
if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
goto fail;
+ if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
+ goto fail;
+
+ if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
+ goto fail;
+
+ assert_se(sigemptyset(&mask) == 0);
+ assert_se(sigaddset(&mask, SIGCHLD) == 0);
+ assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
+
+ if ((m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
+ goto fail;
+
+ zero(ev);
+ ev.events = EPOLLIN;
+ ev.data.fd = m->signal_fd;
+
+ if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_fd, &ev) < 0)
+ goto fail;
+
return m;
fail:
hashmap_free(m->names);
hashmap_free(m->jobs);
hashmap_free(m->transaction_jobs);
+ hashmap_free(m->watch_pids);
+
+ if (m->epoll_fd >= 0)
+ close_nointr(m->epoll_fd);
+ if (m->signal_fd >= 0)
+ close_nointr(m->signal_fd);
free(m);
}
j->type = t;
j->state = JOB_WAITING;
+ j->forced = j->forced || other->forced;
j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
transaction_delete_job(m, other);
}
-static int delete_one_unmergable_job(Manager *m, Job *j) {
+static int delete_one_unmergeable_job(Manager *m, Job *j) {
Job *k;
assert(j);
/* We rely here on the fact that if a merged with b does not
* merge with c, either a or b merge with c neither */
- for (; j; j = j->transaction_next)
- for (k = j->transaction_next; k; k = k->transaction_next) {
+ LIST_FOREACH(transaction, j, j)
+ LIST_FOREACH(transaction, k, j->transaction_next) {
Job *d;
/* Is this one mergeable? Then skip it */
- if (job_type_mergeable(j->type, k->type))
+ if (job_type_is_mergeable(j->type, k->type))
continue;
/* Ok, we found two that conflict, let's see if we can
return -ENOEXEC;
/* Ok, we can drop one, so let's do so. */
- log_debug("Try to fix job merging by deleting job %s", name_id(d->name));
+ log_debug("Try to fix job merging by deleting job %s/%s", name_id(d->name), job_type_to_string(d->type));
transaction_delete_job(m, d);
return 0;
}
static int transaction_merge_jobs(Manager *m) {
Job *j;
- void *state;
+ Iterator i;
int r;
assert(m);
/* First step, check whether any of the jobs for one specific
* task conflict. If so, try to drop one of them. */
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
JobType t;
Job *k;
t = j->type;
- for (k = j->transaction_next; k; k = k->transaction_next) {
+ LIST_FOREACH(transaction, k, j->transaction_next) {
if ((r = job_type_merge(&t, k->type)) >= 0)
continue;
* action. Let's see if we can get rid of one
* of them */
- if ((r = delete_one_unmergable_job(m, j)) >= 0)
+ if ((r = delete_one_unmergeable_job(m, j)) >= 0)
/* Ok, we managed to drop one, now
* let's ask our callers to call us
* again after garbage collecting */
}
/* Second step, merge the jobs. */
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
JobType t = j->type;
Job *k;
/* Merge all transactions */
- for (k = j->transaction_next; k; k = k->transaction_next)
+ LIST_FOREACH(transaction, k, j->transaction_next)
assert_se(job_type_merge(&t, k->type) == 0);
- /* If an active job is mergable, merge it too */
+ /* If an active job is mergeable, merge it too */
if (j->name->meta.job)
job_type_merge(&t, j->name->meta.job->type); /* Might fail. Which is OK */
/* Checks whether at least one of the jobs for this name
* matters to the anchor. */
- for (; j; j = j->transaction_next)
+ LIST_FOREACH(transaction, j, j)
if (j->matters_to_anchor)
return true;
}
static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation) {
- void *state;
+ Iterator i;
Name *n;
int r;
!name_matters_to_anchor(k->name, k)) {
/* Ok, we can drop this one, so let's
* do so. */
- log_debug("Breaking order cycle by deleting job %s", name_id(k->name));
+ log_debug("Breaking order cycle by deleting job %s/%s", name_id(k->name), job_type_to_string(k->type));
transaction_delete_name(m, k->name);
return -EAGAIN;
}
/* We assume that the the dependencies are bidirectional, and
* hence can ignore NAME_AFTER */
- SET_FOREACH(n, j->name->meta.dependencies[NAME_BEFORE], state) {
+ SET_FOREACH(n, j->name->meta.dependencies[NAME_BEFORE], i) {
Job *o;
/* Is there a job for this name? */
static int transaction_verify_order(Manager *m, unsigned *generation) {
Job *j;
int r;
- void *state;
+ Iterator i;
assert(m);
assert(generation);
/* Check if the ordering graph is cyclic. If it is, try to fix
* that up by dropping one of the jobs. */
- HASHMAP_FOREACH(j, m->transaction_jobs, state)
+ HASHMAP_FOREACH(j, m->transaction_jobs, i)
if ((r = transaction_verify_order_one(m, j, NULL, (*generation)++)) < 0)
return r;
/* Drop jobs that are not required by any other job */
do {
- void *state;
+ Iterator i;
Job *j;
again = false;
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
if (j->object_list)
continue;
- log_debug("Garbage collecting job %s", name_id(j->name));
-
+ log_debug("Garbage collecting job %s/%s", name_id(j->name), job_type_to_string(j->type));
transaction_delete_job(m, j);
again = true;
break;
}
static int transaction_is_destructive(Manager *m, JobMode mode) {
- void *state;
+ Iterator i;
Job *j;
assert(m);
/* Checks whether applying this transaction means that
* existing jobs would be replaced */
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
/* Assume merged */
assert(!j->transaction_prev);
do {
Job *j;
- void *state;
+ Iterator i;
again = false;
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
- for (; j; j = j->transaction_next) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
+ LIST_FOREACH(transaction, j, j) {
/* If it matters, we shouldn't drop it */
if (j->matters_to_anchor)
/* Would this stop a running service?
* Would this change an existing job?
* If so, let's drop this entry */
- if ((j->type != JOB_STOP || name_is_dead(j->name)) &&
+ if ((j->type != JOB_STOP || NAME_IS_INACTIVE_OR_DEACTIVATING(name_active_state(j->name))) &&
(!j->name->meta.job || job_type_is_conflicting(j->type, j->name->meta.job->state)))
continue;
/* Ok, let's get rid of this */
+ log_debug("Deleting %s/%s to minimize impact", name_id(j->name), job_type_to_string(j->type));
transaction_delete_job(m, j);
again = true;
break;
}
static int transaction_apply(Manager *m, JobMode mode) {
- void *state;
+ Iterator i;
Job *j;
int r;
/* Moves the transaction jobs to the set of active jobs */
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
/* Assume merged */
assert(!j->transaction_prev);
assert(!j->transaction_next);
rollback:
- HASHMAP_FOREACH(j, m->transaction_jobs, state) {
+ HASHMAP_FOREACH(j, m->transaction_jobs, i) {
if (j->linked)
continue;
}
for (;;) {
- /* Fifth step: let's drop unmergable entries if
+ /* Fifth step: let's drop unmergeable entries if
* necessary and possible, merge entries we can
* merge */
if ((r = transaction_merge_jobs(m)) >= 0)
transaction_collect_garbage(m);
/* Let's see if the resulting transaction still has
- * unmergable entries ... */
+ * unmergeable entries ... */
}
/* Seventh step: check whether we can actually apply this */
return r;
}
-static Job* transaction_add_one_job(Manager *m, JobType type, Name *name, bool *is_new) {
+static Job* transaction_add_one_job(Manager *m, JobType type, Name *name, bool force, bool *is_new) {
Job *j, *f;
int r;
f = hashmap_get(m->transaction_jobs, name);
- for (j = f; j; j = j->transaction_next) {
+ LIST_FOREACH(transaction, j, f) {
assert(j->name == name);
if (j->type == type) {
else if (!(j = job_new(m, type, name)))
return NULL;
- if ((r = hashmap_replace(m->transaction_jobs, name, j)) < 0) {
- job_free(j);
- return NULL;
- }
-
- j->transaction_next = f;
-
- if (f)
- f->transaction_prev = j;
-
j->generation = 0;
j->marker = NULL;
j->matters_to_anchor = false;
+ j->forced = force;
+
+ LIST_PREPEND(Job, transaction, f, j);
+
+ if ((r = hashmap_replace(m->transaction_jobs, name, f)) < 0) {
+ job_free(j);
+ return NULL;
+ }
if (is_new)
*is_new = true;
job_dependency_free(j->object_list);
if (other) {
- log_debug("Deleting job %s as dependency of job %s", name_id(other->name), name_id(j->name));
+ log_debug("Deleting job %s/%s as dependency of job %s/%s",
+ name_id(other->name), job_type_to_string(other->type),
+ name_id(j->name), job_type_to_string(j->type));
transaction_delete_job(m, other);
}
}
static int transaction_add_job_and_dependencies(Manager *m, JobType type, Name *name, Job *by, bool matters, bool force, Job **_ret) {
Job *ret;
- void *state;
+ Iterator i;
Name *dep;
int r;
bool is_new;
assert(type < _JOB_TYPE_MAX);
assert(name);
- if (name->meta.state != NAME_LOADED)
+ if (name->meta.load_state != NAME_LOADED)
return -EINVAL;
- if (!job_type_applicable(type, name->meta.type))
+ if (!name_job_is_applicable(name, type))
return -EBADR;
/* First add the job. */
- if (!(ret = transaction_add_one_job(m, type, name, &is_new)))
+ if (!(ret = transaction_add_one_job(m, type, name, force, &is_new)))
return -ENOMEM;
/* Then, add a link to the job. */
if (is_new) {
/* Finally, recursively add in all dependencies. */
if (type == JOB_START || type == JOB_RELOAD_OR_START) {
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUIRES], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUIRES], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, force, NULL)) < 0 && r != -EBADR)
goto fail;
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_SOFT_REQUIRES], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !force, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_SOFT_REQUIRES], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !force, force, NULL)) < 0 && r != -EBADR)
goto fail;
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_WANTS], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_WANTS], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, force, NULL)) < 0 && r != -EBADR)
goto fail;
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUISITE], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_STARTED, dep, ret, true, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUISITE], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, force, NULL)) < 0 && r != -EBADR)
goto fail;
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_SOFT_REQUISITE], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_STARTED, dep, ret, !force, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_SOFT_REQUISITE], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !force, force, NULL)) < 0 && r != -EBADR)
goto fail;
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_CONFLICTS], state)
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_CONFLICTS], i)
+ if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, force, NULL)) < 0 && r != -EBADR)
goto fail;
} else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
- SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUIRED_BY], state)
- if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, force, NULL)) != -EBADR)
+ SET_FOREACH(dep, ret->name->meta.dependencies[NAME_REQUIRED_BY], i)
+ if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, force, NULL)) < 0 && r != -EBADR)
goto fail;
}
return hashmap_get(m->names, name);
}
-static int dispatch_load_queue(Manager *m) {
+static void dispatch_load_queue(Manager *m) {
Meta *meta;
assert(m);
/* Make sure we are not run recursively */
if (m->dispatching_load_queue)
- return 0;
+ return;
m->dispatching_load_queue = true;
* tries to load its data until the queue is empty */
while ((meta = m->load_queue)) {
+ assert(meta->linked);
+ assert(meta->in_load_queue);
+
name_load(NAME(meta));
- LIST_REMOVE(Meta, m->load_queue, meta);
}
m->dispatching_load_queue = false;
-
- return 0;
}
int manager_load_name(Manager *m, const char *name, Name **_ret) {
Name *ret;
- NameType t;
int r;
- char *n;
assert(m);
assert(name);
assert(_ret);
- if (!name_is_valid(name))
- return -EINVAL;
-
/* This will load the service information files, but not actually
* start any services or anything */
- if ((ret = manager_get_name(m, name)))
- goto finish;
-
- if ((t = name_type_from_string(name)) == _NAME_TYPE_INVALID)
- return -EINVAL;
+ if ((ret = manager_get_name(m, name))) {
+ *_ret = ret;
+ return 0;
+ }
if (!(ret = name_new(m)))
return -ENOMEM;
- ret->meta.type = t;
-
- if (!(n = strdup(name))) {
- name_free(ret);
- return -ENOMEM;
- }
-
- if ((r = set_put(ret->meta.names, n)) < 0) {
+ if ((r = name_add_name(ret, name)) < 0) {
name_free(ret);
- free(n);
return r;
}
return r;
}
- /* At this point the new entry is created and linked. However,
+ /* At this point the new entry is created and linked. However
* not loaded. Now load this entry and all its dependencies
* recursively */
dispatch_load_queue(m);
-finish:
-
*_ret = ret;
return 0;
}
void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
- void *state;
+ Iterator i;
Job *j;
assert(s);
assert(f);
- HASHMAP_FOREACH(j, s->jobs, state)
+ HASHMAP_FOREACH(j, s->jobs, i)
job_dump(j, f, prefix);
}
void manager_dump_names(Manager *s, FILE *f, const char *prefix) {
- void *state;
+ Iterator i;
Name *n;
const char *t;
assert(s);
assert(f);
- HASHMAP_FOREACH_KEY(n, t, s->names, state)
+ HASHMAP_FOREACH_KEY(n, t, s->names, i)
if (name_id(n) == t)
name_dump(n, f, prefix);
}
while ((j = hashmap_first(m->jobs)))
job_free(j);
}
+
+void manager_dispatch_run_queue(Manager *m) {
+ Job *j;
+
+ if (m->dispatching_run_queue)
+ return;
+
+ m->dispatching_run_queue = true;
+
+ while ((j = m->run_queue)) {
+ assert(j->linked);
+ assert(j->in_run_queue);
+
+ job_run_and_invalidate(j);
+ }
+
+ m->dispatching_run_queue = false;
+}
+
+static int manager_dispatch_sigchld(Manager *m) {
+ assert(m);
+
+ for (;;) {
+ siginfo_t si;
+ Name *n;
+
+ zero(si);
+ if (waitid(P_ALL, 0, &si, WNOHANG) < 0)
+ return -errno;
+
+ if (si.si_pid == 0)
+ break;
+
+ if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
+ continue;
+
+ if (!(n = hashmap_remove(m->watch_pids, UINT32_TO_PTR(si.si_pid))))
+ continue;
+
+ NAME_VTABLE(n)->sigchld_event(n, si.si_pid, si.si_code, si.si_status);
+ }
+
+ return 0;
+}
+
+static int manager_process_signal_fd(Manager *m) {
+ ssize_t n;
+ struct signalfd_siginfo sfsi;
+ bool sigchld = false;
+
+ assert(m);
+
+ for (;;) {
+ if ((n = read(m->signal_fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
+
+ if (n >= 0)
+ return -EIO;
+
+ if (errno == EAGAIN)
+ return 0;
+
+ return -errno;
+ }
+
+ if (sfsi.ssi_signo == SIGCHLD)
+ sigchld = true;
+ }
+
+ if (sigchld)
+ return manager_dispatch_sigchld(m);
+
+ return 0;
+}
+
+static int process_event(Manager *m, struct epoll_event *ev) {
+ int r;
+
+ assert(m);
+ assert(ev);
+
+ switch (ev->data.u32) {
+
+ case MANAGER_SIGNAL:
+ assert(ev->data.fd == m->signal_fd);
+
+ /* An incoming signal? */
+ if (ev->events != POLLIN)
+ return -EINVAL;
+
+ if ((r = manager_process_signal_fd(m)) < 0)
+ return -r;
+
+ break;
+
+ case MANAGER_FD: {
+ Name *n;
+
+ /* Some fd event, to be dispatched to the names */
+ assert_se(n = ev->data.ptr);
+ NAME_VTABLE(n)->fd_event(n, ev->data.fd, ev->events);
+ break;
+ }
+
+ case MANAGER_TIMER: {
+ Name *n;
+ uint64_t u;
+ ssize_t k;
+
+ /* Some timer event, to be dispatched to the names */
+ if ((k = read(ev->data.fd, &u, sizeof(u))) != sizeof(u)) {
+
+ if (k < 0 && (errno == EINTR || errno == EAGAIN))
+ break;
+
+ return k < 0 ? -errno : -EIO;
+ }
+
+ assert_se(n = ev->data.ptr);
+ NAME_VTABLE(n)->timer_event(n, ev->data.fd, u);
+ break;
+ }
+
+ default:
+ assert_not_reached("Unknown epoll event type.");
+ }
+
+ return 0;
+}
+
+int manager_loop(Manager *m) {
+ int r;
+
+ assert(m);
+
+ for (;;) {
+ struct epoll_event events[32];
+ int n, i;
+
+ manager_dispatch_run_queue(m);
+
+ if ((n = epoll_wait(m->epoll_fd, events, ELEMENTSOF(events), -1)) < 0) {
+
+ if (errno == -EINTR)
+ continue;
+
+ return -errno;
+ }
+
+ for (i = 0; i < n; i++)
+ if ((r = process_event(m, events + i)) < 0)
+ return r;
+ }
+}