#include <fcntl.h>
#include <fnmatch.h>
+#include "process-util.h"
#include "path-util.h"
-#include "special.h"
+// #include "special.h"
#include "cgroup-util.h"
#include "cgroup.h"
+#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
+
+// UNNEEDED by elogind
+#if 0
void cgroup_context_init(CGroupContext *c) {
assert(c);
c->blockio_weight = (unsigned long) -1;
c->startup_blockio_weight = (unsigned long) -1;
- c->cpu_quota_per_sec_usec = (usec_t) -1;
- c->cpu_quota_usec = (usec_t) -1;
- c->cpu_quota_period_usec = 100*USEC_PER_MSEC;
+ c->cpu_quota_per_sec_usec = USEC_INFINITY;
}
void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
cgroup_context_free_device_allow(c, c->device_allow);
}
-usec_t cgroup_context_get_cpu_quota_usec(CGroupContext *c) {
- assert(c);
-
- /* Returns the absolute CPU quota */
-
- if (c->cpu_quota_usec != (usec_t) -1)
- return c->cpu_quota_usec;
- else if (c->cpu_quota_per_sec_usec != (usec_t) -1)
- return c->cpu_quota_per_sec_usec*c->cpu_quota_period_usec/USEC_PER_SEC;
- else
- return (usec_t) -1;
-}
-
-usec_t cgroup_context_get_cpu_quota_per_sec_usec(CGroupContext *c) {
- assert(c);
-
- /* Returns the CPU quota relative to 1s */
-
- if (c->cpu_quota_usec != (usec_t) -1)
- return c->cpu_quota_usec*USEC_PER_SEC/c->cpu_quota_period_usec;
- else if (c->cpu_quota_per_sec_usec != (usec_t) -1)
- return c->cpu_quota_per_sec_usec;
- else
- return (usec_t) -1;
-}
-
void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
CGroupBlockIODeviceBandwidth *b;
CGroupBlockIODeviceWeight *w;
CGroupDeviceAllow *a;
- char t[FORMAT_TIMESPAN_MAX], s[FORMAT_TIMESPAN_MAX], u[FORMAT_TIMESPAN_MAX];
+ char u[FORMAT_TIMESPAN_MAX];
assert(c);
assert(f);
"%sMemoryAccounting=%s\n"
"%sCPUShares=%lu\n"
"%sStartupCPUShares=%lu\n"
- "%sCPUQuota=%s\n"
"%sCPUQuotaPerSecSec=%s\n"
- "%sCPUQuotaPeriodSec=%s\n"
"%sBlockIOWeight=%lu\n"
"%sStartupBlockIOWeight=%lu\n"
"%sMemoryLimit=%" PRIu64 "\n"
- "%sDevicePolicy=%s\n",
+ "%sDevicePolicy=%s\n"
+ "%sDelegate=%s\n",
prefix, yes_no(c->cpu_accounting),
prefix, yes_no(c->blockio_accounting),
prefix, yes_no(c->memory_accounting),
prefix, c->cpu_shares,
prefix, c->startup_cpu_shares,
- prefix, strna(format_timespan(u, sizeof(u), cgroup_context_get_cpu_quota_usec(c), 1)),
- prefix, strna(format_timespan(t, sizeof(t), cgroup_context_get_cpu_quota_per_sec_usec(c), 1)),
- prefix, strna(format_timespan(s, sizeof(s), c->cpu_quota_period_usec, 1)),
+ prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
prefix, c->blockio_weight,
prefix, c->startup_blockio_weight,
prefix, c->memory_limit,
- prefix, cgroup_device_policy_to_string(c->device_policy));
+ prefix, cgroup_device_policy_to_string(c->device_policy),
+ prefix, yes_no(c->delegate));
LIST_FOREACH(device_allow, a, c->device_allow)
fprintf(f,
assert(dev);
r = stat(p, &st);
- if (r < 0) {
- log_warning("Couldn't stat device %s: %m", p);
- return -errno;
- }
+ if (r < 0)
+ return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
if (S_ISBLK(st.st_mode))
*dev = st.st_rdev;
r = cg_set_attribute("devices", path, "devices.allow", buf);
if (r < 0)
- log_warning("Failed to set devices.allow on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set devices.allow on %s: %m", path);
return r;
}
assert(type == 'b' || type == 'c');
f = fopen("/proc/devices", "re");
- if (!f) {
- log_warning("Cannot open /proc/devices to resolve %s (%c): %m", name, type);
- return -errno;
- }
+ if (!f)
+ return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
FOREACH_LINE(line, f, goto fail) {
char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
r = cg_set_attribute("devices", path, "devices.allow", buf);
if (r < 0)
- log_warning("Failed to set devices.allow on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set devices.allow on %s: %m", path);
}
return 0;
fail:
- log_warning("Failed to read /proc/devices: %m");
+ log_warning_errno(errno, "Failed to read /proc/devices: %m");
return -errno;
}
-void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const char *path, ManagerState state) {
+void cgroup_context_apply(CGroupContext *c, CGroupMask mask, const char *path, ManagerState state) {
bool is_root;
int r;
if (mask == 0)
return;
- /* Some cgroup attributes are not support on the root cgroup,
+ /* Some cgroup attributes are not supported on the root cgroup,
* hence silently ignore */
is_root = isempty(path) || path_equal(path, "/");
+ if (is_root)
+ /* Make sure we don't try to display messages with an empty path. */
+ path = "/";
+
+ /* We generally ignore errors caused by read-only mounted
+ * cgroup trees (assuming we are running in a container then),
+ * and missing cgroups, i.e. EROFS and ENOENT. */
- if ((mask & CGROUP_CPU) && !is_root) {
+ if ((mask & CGROUP_MASK_CPU) && !is_root) {
char buf[MAX(DECIMAL_STR_MAX(unsigned long), DECIMAL_STR_MAX(usec_t)) + 1];
- usec_t q;
sprintf(buf, "%lu\n",
- state == MANAGER_STARTING && c->startup_cpu_shares != (unsigned long) -1 ? c->startup_cpu_shares :
+ IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_cpu_shares != (unsigned long) -1 ? c->startup_cpu_shares :
c->cpu_shares != (unsigned long) -1 ? c->cpu_shares : 1024);
r = cg_set_attribute("cpu", path, "cpu.shares", buf);
if (r < 0)
- log_warning("Failed to set cpu.shares on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set cpu.shares on %s: %m", path);
- sprintf(buf, USEC_FMT "\n", c->cpu_quota_period_usec);
+ sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf);
if (r < 0)
- log_warning("Failed to set cpu.cfs_period_us on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set cpu.cfs_period_us on %s: %m", path);
- q = cgroup_context_get_cpu_quota_usec(c);
- if (q != (usec_t) -1) {
- sprintf(buf, USEC_FMT "\n", q);
+ if (c->cpu_quota_per_sec_usec != USEC_INFINITY) {
+ sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf);
} else
r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1");
if (r < 0)
- log_warning("Failed to set cpu.cfs_quota_us on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set cpu.cfs_quota_us on %s: %m", path);
}
- if (mask & CGROUP_BLKIO) {
+ if (mask & CGROUP_MASK_BLKIO) {
char buf[MAX3(DECIMAL_STR_MAX(unsigned long)+1,
DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(unsigned long)*1,
DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
CGroupBlockIODeviceBandwidth *b;
if (!is_root) {
- sprintf(buf, "%lu\n", state == MANAGER_STARTING && c->startup_blockio_weight != (unsigned long) -1 ? c->startup_blockio_weight :
+ sprintf(buf, "%lu\n", IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_blockio_weight != (unsigned long) -1 ? c->startup_blockio_weight :
c->blockio_weight != (unsigned long) -1 ? c->blockio_weight : 1000);
r = cg_set_attribute("blkio", path, "blkio.weight", buf);
if (r < 0)
- log_warning("Failed to set blkio.weight on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set blkio.weight on %s: %m", path);
/* FIXME: no way to reset this list */
LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
sprintf(buf, "%u:%u %lu", major(dev), minor(dev), w->weight);
r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
if (r < 0)
- log_error("Failed to set blkio.weight_device on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set blkio.weight_device on %s: %m", path);
}
}
sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
r = cg_set_attribute("blkio", path, a, buf);
if (r < 0)
- log_error("Failed to set %s on %s: %s", a, path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set %s on %s: %m", a, path);
}
}
- if (mask & CGROUP_MEMORY) {
+ if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
if (c->memory_limit != (uint64_t) -1) {
char buf[DECIMAL_STR_MAX(uint64_t) + 1];
sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
- r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
- } else
- r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
+
+ if (cg_unified() <= 0)
+ r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
+ else
+ r = cg_set_attribute("memory", path, "memory.max", buf);
+
+ } else {
+ if (cg_unified() <= 0)
+ r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
+ else
+ r = cg_set_attribute("memory", path, "memory.max", "max");
+ }
if (r < 0)
- log_error("Failed to set memory.limit_in_bytes on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path);
}
- if ((mask & CGROUP_DEVICE) && !is_root) {
+ if ((mask & CGROUP_MASK_DEVICE) && !is_root) {
CGroupDeviceAllow *a;
+ /* Changing the devices list of a populated cgroup
+ * might result in EINVAL, hence ignore EINVAL
+ * here. */
+
if (c->device_allow || c->device_policy != CGROUP_AUTO)
r = cg_set_attribute("devices", path, "devices.deny", "a");
else
r = cg_set_attribute("devices", path, "devices.allow", "a");
if (r < 0)
- log_warning("Failed to reset devices.list on %s: %s", path, strerror(-r));
+ log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
+ "Failed to reset devices.list on %s: %m", path);
if (c->device_policy == CGROUP_CLOSED ||
(c->device_policy == CGROUP_AUTO && c->device_allow)) {
}
}
-CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) {
- CGroupControllerMask mask = 0;
+CGroupMask cgroup_context_get_mask(CGroupContext *c) {
+ CGroupMask mask = 0;
/* Figure out which controllers we need */
if (c->cpu_accounting ||
c->cpu_shares != (unsigned long) -1 ||
c->startup_cpu_shares != (unsigned long) -1 ||
- c->cpu_quota_usec != (usec_t) -1 ||
- c->cpu_quota_per_sec_usec != (usec_t) -1)
- mask |= CGROUP_CPUACCT | CGROUP_CPU;
+ c->cpu_quota_per_sec_usec != USEC_INFINITY)
+ mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
if (c->blockio_accounting ||
c->blockio_weight != (unsigned long) -1 ||
c->startup_blockio_weight != (unsigned long) -1 ||
c->blockio_device_weights ||
c->blockio_device_bandwidths)
- mask |= CGROUP_BLKIO;
+ mask |= CGROUP_MASK_BLKIO;
if (c->memory_accounting ||
c->memory_limit != (uint64_t) -1)
- mask |= CGROUP_MEMORY;
+ mask |= CGROUP_MASK_MEMORY;
- if (c->device_allow || c->device_policy != CGROUP_AUTO)
- mask |= CGROUP_DEVICE;
+ if (c->device_allow ||
+ c->device_policy != CGROUP_AUTO)
+ mask |= CGROUP_MASK_DEVICE;
return mask;
}
-CGroupControllerMask unit_get_cgroup_mask(Unit *u) {
+CGroupMask unit_get_own_mask(Unit *u) {
CGroupContext *c;
+ /* Returns the mask of controllers the unit needs for itself */
+
c = unit_get_cgroup_context(u);
if (!c)
return 0;
+ /* If delegation is turned on, then turn on all cgroups,
+ * unless we are on the legacy hierarchy and the process we
+ * fork into it is known to drop privileges, and hence
+ * shouldn't get access to the controllers.
+ *
+ * Note that on the unified hierarchy it is safe to delegate
+ * controllers to unprivileged services. */
+
+ if (c->delegate) {
+ ExecContext *e;
+
+ e = unit_get_exec_context(u);
+ if (!e ||
+ exec_context_maintains_privileges(e) ||
+ cg_unified() > 0)
+ return _CGROUP_MASK_ALL;
+ }
+
return cgroup_context_get_mask(c);
}
-CGroupControllerMask unit_get_members_mask(Unit *u) {
+CGroupMask unit_get_members_mask(Unit *u) {
assert(u);
+ /* Returns the mask of controllers all of the unit's children
+ * require, merged */
+
if (u->cgroup_members_mask_valid)
return u->cgroup_members_mask;
continue;
u->cgroup_members_mask |=
- unit_get_cgroup_mask(member) |
+ unit_get_own_mask(member) |
unit_get_members_mask(member);
}
}
return u->cgroup_members_mask;
}
-CGroupControllerMask unit_get_siblings_mask(Unit *u) {
+CGroupMask unit_get_siblings_mask(Unit *u) {
assert(u);
+ /* Returns the mask of controllers all of the unit's siblings
+ * require, i.e. the members mask of the unit's parent slice
+ * if there is one. */
+
if (UNIT_ISSET(u->slice))
return unit_get_members_mask(UNIT_DEREF(u->slice));
- return unit_get_cgroup_mask(u) | unit_get_members_mask(u);
+ return unit_get_own_mask(u) | unit_get_members_mask(u);
+}
+
+CGroupMask unit_get_subtree_mask(Unit *u) {
+
+ /* Returns the mask of this subtree, meaning of the group
+ * itself and its children. */
+
+ return unit_get_own_mask(u) | unit_get_members_mask(u);
+}
+
+CGroupMask unit_get_target_mask(Unit *u) {
+ CGroupMask mask;
+
+ /* This returns the cgroup mask of all controllers to enable
+ * for a specific cgroup, i.e. everything it needs itself,
+ * plus all that its children need, plus all that its siblings
+ * need. This is primarily useful on the legacy cgroup
+ * hierarchy, where we need to duplicate each cgroup in each
+ * hierarchy that shall be enabled for it. */
+
+ mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
+ mask &= u->manager->cgroup_supported;
+
+ return mask;
}
-CGroupControllerMask unit_get_target_mask(Unit *u) {
- CGroupControllerMask mask;
+CGroupMask unit_get_enable_mask(Unit *u) {
+ CGroupMask mask;
- mask = unit_get_cgroup_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
+ /* This returns the cgroup mask of all controllers to enable
+ * for the children of a specific cgroup. This is primarily
+ * useful for the unified cgroup hierarchy, where each cgroup
+ * controls which controllers are enabled for its children. */
+
+ mask = unit_get_members_mask(u);
mask &= u->manager->cgroup_supported;
return mask;
/* Recurse from a unit up through its containing slices, propagating
* mask bits upward. A unit is also member of itself. */
void unit_update_cgroup_members_masks(Unit *u) {
- CGroupControllerMask m;
+ CGroupMask m;
bool more;
assert(u);
/* Calculate subtree mask */
- m = unit_get_cgroup_mask(u) | unit_get_members_mask(u);
+ m = unit_get_subtree_mask(u);
/* See if anything changed from the previous invocation. If
* not, we're done. */
}
}
-static const char *migrate_callback(CGroupControllerMask mask, void *userdata) {
+static const char *migrate_callback(CGroupMask mask, void *userdata) {
Unit *u = userdata;
assert(mask != 0);
return NULL;
}
-static int unit_create_cgroups(Unit *u, CGroupControllerMask mask) {
- _cleanup_free_ char *path = NULL;
+char *unit_default_cgroup_path(Unit *u) {
+ _cleanup_free_ char *escaped = NULL, *slice = NULL;
+ int r;
+
+ assert(u);
+
+ if (unit_has_name(u, SPECIAL_ROOT_SLICE))
+ return strdup(u->manager->cgroup_root);
+
+ if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
+ r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
+ if (r < 0)
+ return NULL;
+ }
+
+ escaped = cg_escape(u->id);
+ if (!escaped)
+ return NULL;
+
+ if (slice)
+ return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
+ else
+ return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
+}
+
+int unit_set_cgroup_path(Unit *u, const char *path) {
+ _cleanup_free_ char *p = NULL;
int r;
assert(u);
- path = unit_default_cgroup_path(u);
- if (!path)
+ if (path) {
+ p = strdup(path);
+ if (!p)
+ return -ENOMEM;
+ } else
+ p = NULL;
+
+ if (streq_ptr(u->cgroup_path, p))
+ return 0;
+
+ if (p) {
+ r = hashmap_put(u->manager->cgroup_unit, p, u);
+ if (r < 0)
+ return r;
+ }
+
+ unit_release_cgroup(u);
+
+ u->cgroup_path = p;
+ p = NULL;
+
+ return 1;
+}
+
+int unit_watch_cgroup(Unit *u) {
+ _cleanup_free_ char *populated = NULL;
+ int r;
+
+ assert(u);
+
+ if (!u->cgroup_path)
+ return 0;
+
+ if (u->cgroup_inotify_wd >= 0)
+ return 0;
+
+ /* Only applies to the unified hierarchy */
+ r = cg_unified();
+ if (r < 0)
+ return log_unit_error_errno(u, r, "Failed detect wether the unified hierarchy is used: %m");
+ if (r == 0)
+ return 0;
+
+ /* Don't watch the root slice, it's pointless. */
+ if (unit_has_name(u, SPECIAL_ROOT_SLICE))
+ return 0;
+
+ r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
+ if (r < 0)
return log_oom();
- r = hashmap_put(u->manager->cgroup_unit, path, u);
- if (r < 0) {
- log_error(r == -EEXIST ? "cgroup %s exists already: %s" : "hashmap_put failed for %s: %s", path, strerror(-r));
- return r;
+ r = cg_get_path(ELOGIND_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.populated", &populated);
+ if (r < 0)
+ return log_oom();
+
+ u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, populated, IN_MODIFY);
+ if (u->cgroup_inotify_wd < 0) {
+
+ if (errno == ENOENT) /* If the directory is already
+ * gone we don't need to track
+ * it, so this is not an error */
+ return 0;
+
+ return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
}
- if (r > 0) {
- u->cgroup_path = path;
- path = NULL;
+
+ r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
+ if (r < 0)
+ return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
+
+ return 0;
+}
+
+static int unit_create_cgroup(
+ Unit *u,
+ CGroupMask target_mask,
+ CGroupMask enable_mask) {
+
+ CGroupContext *c;
+ int r;
+
+ assert(u);
+
+ c = unit_get_cgroup_context(u);
+ if (!c)
+ return 0;
+
+ if (!u->cgroup_path) {
+ _cleanup_free_ char *path = NULL;
+
+ path = unit_default_cgroup_path(u);
+ if (!path)
+ return log_oom();
+
+ r = unit_set_cgroup_path(u, path);
+ if (r == -EEXIST)
+ return log_unit_error_errno(u, r, "Control group %s exists already.", path);
+ if (r < 0)
+ return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
}
/* First, create our own group */
- r = cg_create_everywhere(u->manager->cgroup_supported, mask, u->cgroup_path);
- if (r < 0) {
- log_error("Failed to create cgroup %s: %s", u->cgroup_path, strerror(-r));
- return r;
- }
+ r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
+ if (r < 0)
+ return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
+
+ /* Start watching it */
+ (void) unit_watch_cgroup(u);
+
+ /* Enable all controllers we need */
+ r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
+ if (r < 0)
+ log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
/* Keep track that this is now realized */
u->cgroup_realized = true;
- u->cgroup_realized_mask = mask;
+ u->cgroup_realized_mask = target_mask;
+
+ if (u->type != UNIT_SLICE && !c->delegate) {
+
+ /* Then, possibly move things over, but not if
+ * subgroups may contain processes, which is the case
+ * for slice and delegation units. */
+ r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
+ if (r < 0)
+ log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
+ }
+
+ return 0;
+}
+
+int unit_attach_pids_to_cgroup(Unit *u) {
+ int r;
+ assert(u);
+
+ r = unit_realize_cgroup(u);
+ if (r < 0)
+ return r;
- /* Then, possibly move things over */
- r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
+ r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
if (r < 0)
- log_warning("Failed to migrate cgroup from to %s: %s", u->cgroup_path, strerror(-r));
+ return r;
return 0;
}
-static bool unit_has_mask_realized(Unit *u, CGroupControllerMask mask) {
+static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask) {
assert(u);
- return u->cgroup_realized && u->cgroup_realized_mask == mask;
+ return u->cgroup_realized && u->cgroup_realized_mask == target_mask;
}
/* Check if necessary controllers and attributes for a unit are in place.
*
* Returns 0 on success and < 0 on failure. */
static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
- CGroupControllerMask mask;
+ CGroupMask target_mask, enable_mask;
int r;
assert(u);
u->in_cgroup_queue = false;
}
- mask = unit_get_target_mask(u);
-
- if (unit_has_mask_realized(u, mask))
+ target_mask = unit_get_target_mask(u);
+ if (unit_has_mask_realized(u, target_mask))
return 0;
/* First, realize parents */
}
/* And then do the real work */
- r = unit_create_cgroups(u, mask);
+ enable_mask = unit_get_enable_mask(u);
+ r = unit_create_cgroup(u, target_mask, enable_mask);
if (r < 0)
return r;
/* Finally, apply the necessary attributes. */
- cgroup_context_apply(unit_get_cgroup_context(u), mask, u->cgroup_path, state);
+ cgroup_context_apply(unit_get_cgroup_context(u), target_mask, u->cgroup_path, state);
return 0;
}
r = unit_realize_cgroup_now(i, state);
if (r < 0)
- log_warning("Failed to realize cgroups for queued unit %s: %s", i->id, strerror(-r));
+ log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
n++;
}
}
int unit_realize_cgroup(Unit *u) {
- CGroupContext *c;
-
assert(u);
- c = unit_get_cgroup_context(u);
- if (!c)
+ if (!UNIT_HAS_CGROUP_CONTEXT(u))
return 0;
/* So, here's the deal: when realizing the cgroups for this
return unit_realize_cgroup_now(u, manager_state(u->manager));
}
-void unit_destroy_cgroup(Unit *u) {
+void unit_release_cgroup(Unit *u) {
+ assert(u);
+
+ /* Forgets all cgroup details for this cgroup */
+
+ if (u->cgroup_path) {
+ (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
+ u->cgroup_path = mfree(u->cgroup_path);
+ }
+
+ if (u->cgroup_inotify_wd >= 0) {
+ if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
+ log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
+
+ (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
+ u->cgroup_inotify_wd = -1;
+ }
+}
+
+void unit_prune_cgroup(Unit *u) {
int r;
+ bool is_root_slice;
assert(u);
+ /* Removes the cgroup, if empty and possible, and stops watching it. */
+
if (!u->cgroup_path)
return;
- r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !unit_has_name(u, SPECIAL_ROOT_SLICE));
- if (r < 0)
- log_debug("Failed to destroy cgroup %s: %s", u->cgroup_path, strerror(-r));
+ is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
- hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
+ r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
+ return;
+ }
+
+ if (is_root_slice)
+ return;
+
+ unit_release_cgroup(u);
- free(u->cgroup_path);
- u->cgroup_path = NULL;
u->cgroup_realized = false;
u->cgroup_realized_mask = 0;
-
}
-pid_t unit_search_main_pid(Unit *u) {
+int unit_search_main_pid(Unit *u, pid_t *ret) {
_cleanup_fclose_ FILE *f = NULL;
pid_t pid = 0, npid, mypid;
+ int r;
assert(u);
+ assert(ret);
if (!u->cgroup_path)
- return 0;
+ return -ENXIO;
- if (cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f) < 0)
- return 0;
+ r = cg_enumerate_processes(ELOGIND_CGROUP_CONTROLLER, u->cgroup_path, &f);
+ if (r < 0)
+ return r;
mypid = getpid();
while (cg_read_pid(f, &npid) > 0) {
if (get_parent_of_pid(npid, &ppid) >= 0 && ppid != mypid)
continue;
- if (pid != 0) {
+ if (pid != 0)
/* Dang, there's more than one daemonized PID
in this group, so we don't know what process
is the main process. */
- pid = 0;
- break;
- }
+
+ return -ENODATA;
pid = npid;
}
- return pid;
+ *ret = pid;
+ return 0;
}
-int manager_setup_cgroup(Manager *m) {
- _cleanup_free_ char *path = NULL;
- char *e;
- int r;
+static int unit_watch_pids_in_path(Unit *u, const char *path) {
+ _cleanup_closedir_ DIR *d = NULL;
+ _cleanup_fclose_ FILE *f = NULL;
+ int ret = 0, r;
- assert(m);
+ assert(u);
+ assert(path);
- /* 1. Determine hierarchy */
- free(m->cgroup_root);
- m->cgroup_root = NULL;
+ r = cg_enumerate_processes(ELOGIND_CGROUP_CONTROLLER, path, &f);
+ if (r < 0)
+ ret = r;
+ else {
+ pid_t pid;
+
+ while ((r = cg_read_pid(f, &pid)) > 0) {
+ r = unit_watch_pid(u, pid);
+ if (r < 0 && ret >= 0)
+ ret = r;
+ }
+
+ if (r < 0 && ret >= 0)
+ ret = r;
+ }
- r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
+ r = cg_enumerate_subgroups(ELOGIND_CGROUP_CONTROLLER, path, &d);
if (r < 0) {
- log_error("Cannot determine cgroup we are running in: %s", strerror(-r));
+ if (ret >= 0)
+ ret = r;
+ } else {
+ char *fn;
+
+ while ((r = cg_read_subgroup(d, &fn)) > 0) {
+ _cleanup_free_ char *p = NULL;
+
+ p = strjoin(path, "/", fn, NULL);
+ free(fn);
+
+ if (!p)
+ return -ENOMEM;
+
+ r = unit_watch_pids_in_path(u, p);
+ if (r < 0 && ret >= 0)
+ ret = r;
+ }
+
+ if (r < 0 && ret >= 0)
+ ret = r;
+ }
+
+ return ret;
+}
+
+int unit_watch_all_pids(Unit *u) {
+ assert(u);
+
+ /* Adds all PIDs from our cgroup to the set of PIDs we
+ * watch. This is a fallback logic for cases where we do not
+ * get reliable cgroup empty notifications: we try to use
+ * SIGCHLD as replacement. */
+
+ if (!u->cgroup_path)
+ return -ENOENT;
+
+ if (cg_unified() > 0) /* On unified we can use proper notifications */
+ return 0;
+
+ return unit_watch_pids_in_path(u, u->cgroup_path);
+}
+
+int unit_notify_cgroup_empty(Unit *u) {
+ int r;
+
+ assert(u);
+
+ if (!u->cgroup_path)
+ return 0;
+
+ r = cg_is_empty_recursive(ELOGIND_CGROUP_CONTROLLER, u->cgroup_path);
+ if (r <= 0)
return r;
+
+ unit_add_to_gc_queue(u);
+
+ if (UNIT_VTABLE(u)->notify_cgroup_empty)
+ UNIT_VTABLE(u)->notify_cgroup_empty(u);
+
+ return 0;
+}
+
+static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
+ Manager *m = userdata;
+
+ assert(s);
+ assert(fd >= 0);
+ assert(m);
+
+ for (;;) {
+ union inotify_event_buffer buffer;
+ struct inotify_event *e;
+ ssize_t l;
+
+ l = read(fd, &buffer, sizeof(buffer));
+ if (l < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ return 0;
+
+ return log_error_errno(errno, "Failed to read control group inotify events: %m");
+ }
+
+ FOREACH_INOTIFY_EVENT(e, buffer, l) {
+ Unit *u;
+
+ if (e->wd < 0)
+ /* Queue overflow has no watch descriptor */
+ continue;
+
+ if (e->mask & IN_IGNORED)
+ /* The watch was just removed */
+ continue;
+
+ u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
+ if (!u) /* Not that inotify might deliver
+ * events for a watch even after it
+ * was removed, because it was queued
+ * before the removal. Let's ignore
+ * this here safely. */
+ continue;
+
+ (void) unit_notify_cgroup_empty(u);
+ }
}
+}
+#endif // 0
- /* LEGACY: Already in /system.slice? If so, let's cut this
- * off. This is to support live upgrades from older systemd
- * versions where PID 1 was moved there. */
- if (m->running_as == SYSTEMD_SYSTEM) {
+int manager_setup_cgroup(Manager *m) {
+ _cleanup_free_ char *path = NULL;
+ CGroupController c;
+ int r, unified;
+ char *e;
+
+ assert(m);
+
+ /* 1. Determine hierarchy */
+ m->cgroup_root = mfree(m->cgroup_root);
+ r = cg_pid_get_path(ELOGIND_CGROUP_CONTROLLER, 0, &m->cgroup_root);
+ if (r < 0)
+ return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
+/// elogind does not support systemd scopes and slices
+#if 0
+ /* Chop off the init scope, if we are already located in it */
+ e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
+
+ /* LEGACY: Also chop off the system slice if we are in
+ * it. This is to support live upgrades from older systemd
+ * versions where PID 1 was moved there. Also see
+ * cg_get_root_path(). */
+ if (!e) {
e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
if (!e)
- e = endswith(m->cgroup_root, "/system");
- if (e)
- *e = 0;
+ e = endswith(m->cgroup_root, "/system"); /* even more legacy */
}
+ if (e)
+ *e = 0;
+#endif // 0
/* And make sure to store away the root value without trailing
* slash, even for the root dir, so that we can easily prepend
* it everywhere. */
- if (streq(m->cgroup_root, "/"))
- m->cgroup_root[0] = 0;
+ while ((e = endswith(m->cgroup_root, "/")))
+ *e = 0;
+ log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
+ ELOGIND_CGROUP_CONTROLLER, m->cgroup_root);
/* 2. Show data */
- r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
- if (r < 0) {
- log_error("Cannot find cgroup mount point: %s", strerror(-r));
- return r;
- }
+ r = cg_get_path(ELOGIND_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
+ if (r < 0)
+ return log_error_errno(r, "Cannot find cgroup mount point: %m");
- log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
+ unified = cg_unified();
+ if (unified < 0)
+ return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
+ if (unified > 0)
+ log_debug("Unified cgroup hierarchy is located at %s.", path);
+ else
+ log_debug("Using cgroup controller " ELOGIND_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
- /* 3. Install agent */
- if (m->running_as == SYSTEMD_SYSTEM) {
- r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
- if (r < 0)
- log_warning("Failed to install release agent, ignoring: %s", strerror(-r));
- else if (r > 0)
- log_debug("Installed release agent.");
+ if (!m->test_run) {
+ const char *scope_path;
+
+ /* 3. Install agent */
+ if (unified) {
+
+ /* In the unified hierarchy we can can get
+ * cgroup empty notifications via inotify. */
+/// elogind does not support the unified hierarchy, yet.
+#if 0
+ m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
+ safe_close(m->cgroup_inotify_fd);
+
+ m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
+ if (m->cgroup_inotify_fd < 0)
+ return log_error_errno(errno, "Failed to create control group inotify object: %m");
+
+ r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
+ if (r < 0)
+ return log_error_errno(r, "Failed to watch control group inotify object: %m");
+
+ r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_IDLE - 5);
+ if (r < 0)
+ return log_error_errno(r, "Failed to set priority of inotify event source: %m");
+
+ (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
+
+#else
+ return log_error_errno(EOPNOTSUPP, "Unified cgroup hierarchy not supported: %m");
+#endif // elogind
+ } else if (m->running_as == MANAGER_SYSTEM) {
+ /* On the legacy hierarchy we only get
+ * notifications via cgroup agents. (Which
+ * isn't really reliable, since it does not
+ * generate events when control groups with
+ * children run empty. */
+
+ r = cg_install_release_agent(ELOGIND_CGROUP_CONTROLLER, ELOGIND_CGROUP_AGENT_PATH);
+ if (r < 0)
+ log_warning_errno(r, "Failed to install release agent, ignoring: %m");
+ else if (r > 0)
+ log_debug("Installed release agent.");
+ else if (r == 0)
+ log_debug("Release agent already installed.");
+ }
+
+/// elogind is not meant to run in systemd init scope
+#if 0
+ /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
+ scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
+ r = cg_create_and_attach(ELOGIND_CGROUP_CONTROLLER, scope_path, 0);
+#else
+ if (streq(m->cgroup_root, "/elogind"))
+ // root already is our cgroup
+ scope_path = strjoina(m->cgroup_root);
else
- log_debug("Release agent already installed.");
- }
+ // we have to create our own group
+ scope_path = strjoina(m->cgroup_root, "/elogind");
+ r = cg_create_and_attach(ELOGIND_CGROUP_CONTROLLER, scope_path, 0);
+#endif // 0
+ if (r < 0)
+ return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
+ log_debug_elogind("Created control group \"%s\"", scope_path);
- /* 4. Make sure we are in the root cgroup */
- r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, 0);
- if (r < 0) {
- log_error("Failed to create root cgroup hierarchy: %s", strerror(-r));
- return r;
- }
+ /* also, move all other userspace processes remaining
+ * in the root cgroup into that scope. */
+ if (!streq(m->cgroup_root, scope_path)) {
+ r = cg_migrate(ELOGIND_CGROUP_CONTROLLER, m->cgroup_root, ELOGIND_CGROUP_CONTROLLER, scope_path, false);
+ if (r < 0)
+ log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
+ }
- /* 5. And pin it, so that it cannot be unmounted */
- safe_close(m->pin_cgroupfs_fd);
+ /* 5. And pin it, so that it cannot be unmounted */
+ safe_close(m->pin_cgroupfs_fd);
+ m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
+ if (m->pin_cgroupfs_fd < 0)
+ return log_error_errno(errno, "Failed to open pin file: %m");
- m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
- if (m->pin_cgroupfs_fd < 0) {
- log_error("Failed to open pin file: %m");
- return -errno;
+ /* 6. Always enable hierarchical support if it exists... */
+ if (!unified)
+ (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
}
- /* 6. Figure out which controllers are supported */
- m->cgroup_supported = cg_mask_supported();
+ /* 7. Figure out which controllers are supported */
+ r = cg_mask_supported(&m->cgroup_supported);
+ if (r < 0)
+ return log_error_errno(r, "Failed to determine supported controllers: %m");
- /* 7. Always enable hierarchial support if it exists... */
- cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
+ for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
+ log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & c));
return 0;
}
/* We can't really delete the group, since we are in it. But
* let's trim it. */
if (delete && m->cgroup_root)
- cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
+ (void) cg_trim(ELOGIND_CGROUP_CONTROLLER, m->cgroup_root, false);
+
+/// elogind does not support the unified hierarchy, yet.
+#if 0
+ m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
+
+ m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
+ m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
+#endif // 0
m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
- free(m->cgroup_root);
- m->cgroup_root = NULL;
+ m->cgroup_root = mfree(m->cgroup_root);
}
+/// UNNEEDED by elogind
+#if 0
Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
char *p;
Unit *u;
char *e;
e = strrchr(p, '/');
- if (e == p || !e)
- return NULL;
+ if (!e || e == p)
+ return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
*e = 0;
}
}
-Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
+Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
_cleanup_free_ char *cgroup = NULL;
int r;
assert(m);
- if (pid <= 1)
+ if (pid <= 0)
return NULL;
- r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
+ r = cg_pid_get_path(ELOGIND_CGROUP_CONTROLLER, pid, &cgroup);
if (r < 0)
return NULL;
return manager_get_unit_by_cgroup(m, cgroup);
}
+Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
+ Unit *u;
+
+ assert(m);
+
+ if (pid <= 0)
+ return NULL;
+
+ if (pid == 1)
+ return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
+
+ u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
+ if (u)
+ return u;
+
+ u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
+ if (u)
+ return u;
+
+ return manager_get_unit_by_pid_cgroup(m, pid);
+}
+
int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
Unit *u;
- int r;
assert(m);
assert(cgroup);
u = manager_get_unit_by_cgroup(m, cgroup);
- if (u) {
- r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, true);
- if (r > 0) {
- if (UNIT_VTABLE(u)->notify_cgroup_empty)
- UNIT_VTABLE(u)->notify_cgroup_empty(u);
+ if (!u)
+ return 0;
- unit_add_to_gc_queue(u);
- }
+ return unit_notify_cgroup_empty(u);
+}
+
+int unit_get_memory_current(Unit *u, uint64_t *ret) {
+ _cleanup_free_ char *v = NULL;
+ int r;
+
+ assert(u);
+ assert(ret);
+
+ if (!u->cgroup_path)
+ return -ENODATA;
+
+ if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
+ return -ENODATA;
+
+ if (cg_unified() <= 0)
+ r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
+ else
+ r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
+ if (r == -ENOENT)
+ return -ENODATA;
+ if (r < 0)
+ return r;
+
+ return safe_atou64(v, ret);
+}
+
+static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
+ _cleanup_free_ char *v = NULL;
+ uint64_t ns;
+ int r;
+
+ assert(u);
+ assert(ret);
+
+ if (!u->cgroup_path)
+ return -ENODATA;
+
+ if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
+ return -ENODATA;
+
+ r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
+ if (r == -ENOENT)
+ return -ENODATA;
+ if (r < 0)
+ return r;
+
+ r = safe_atou64(v, &ns);
+ if (r < 0)
+ return r;
+
+ *ret = ns;
+ return 0;
+}
+
+int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
+ nsec_t ns;
+ int r;
+
+ r = unit_get_cpu_usage_raw(u, &ns);
+ if (r < 0)
+ return r;
+
+ if (ns > u->cpuacct_usage_base)
+ ns -= u->cpuacct_usage_base;
+ else
+ ns = 0;
+
+ *ret = ns;
+ return 0;
+}
+
+int unit_reset_cpu_usage(Unit *u) {
+ nsec_t ns;
+ int r;
+
+ assert(u);
+
+ r = unit_get_cpu_usage_raw(u, &ns);
+ if (r < 0) {
+ u->cpuacct_usage_base = 0;
+ return r;
}
+ u->cpuacct_usage_base = ns;
return 0;
}
+bool unit_cgroup_delegate(Unit *u) {
+ CGroupContext *c;
+
+ assert(u);
+
+ c = unit_get_cgroup_context(u);
+ if (!c)
+ return false;
+
+ return c->delegate;
+}
+
static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
[CGROUP_AUTO] = "auto",
[CGROUP_CLOSED] = "closed",
};
DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);
+#endif // 0