X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?p=elogind.git;a=blobdiff_plain;f=src%2Fcore%2Fcgroup.c;h=b53b35dad0ac5767205fcc7ee08d81a5a6340423;hp=70fc925b4dfedde9bcfde694acf600f545af1b76;hb=e916c1378b697b1f267d021a823bbfcb5ac8376c;hpb=4a62c710b62a5a3c7a8a278b810b9d5b5a0c8f4f diff --git a/src/core/cgroup.c b/src/core/cgroup.c index 70fc925b4..b53b35dad 100644 --- a/src/core/cgroup.c +++ b/src/core/cgroup.c @@ -1,5 +1,3 @@ -/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ - /*** This file is part of systemd. @@ -22,26 +20,63 @@ #include #include -#include "path-util.h" -#include "special.h" +#include "alloc-util.h" #include "cgroup-util.h" #include "cgroup.h" +#include "fd-util.h" +#include "fileio.h" +#include "fs-util.h" +#include "parse-util.h" +#include "path-util.h" +#include "process-util.h" +//#include "special.h" +#include "string-table.h" +#include "string-util.h" +#include "stdio-util.h" #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC) +#if 0 /// UNNEEDED by elogind +static void cgroup_compat_warn(void) { + static bool cgroup_compat_warned = false; + + if (cgroup_compat_warned) + return; + + log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details."); + cgroup_compat_warned = true; +} + +#define log_cgroup_compat(unit, fmt, ...) do { \ + cgroup_compat_warn(); \ + log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \ + } while (false) + void cgroup_context_init(CGroupContext *c) { assert(c); /* Initialize everything to the kernel defaults, assuming the * structure is preinitialized to 0 */ - c->cpu_shares = (unsigned long) -1; - c->startup_cpu_shares = (unsigned long) -1; - c->memory_limit = (uint64_t) -1; - c->blockio_weight = (unsigned long) -1; - c->startup_blockio_weight = (unsigned long) -1; - + c->cpu_weight = CGROUP_WEIGHT_INVALID; + c->startup_cpu_weight = CGROUP_WEIGHT_INVALID; c->cpu_quota_per_sec_usec = USEC_INFINITY; + + c->cpu_shares = CGROUP_CPU_SHARES_INVALID; + c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID; + + c->memory_high = CGROUP_LIMIT_MAX; + c->memory_max = CGROUP_LIMIT_MAX; + + c->memory_limit = CGROUP_LIMIT_MAX; + + c->io_weight = CGROUP_WEIGHT_INVALID; + c->startup_io_weight = CGROUP_WEIGHT_INVALID; + + c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID; + c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID; + + c->tasks_max = (uint64_t) -1; } void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) { @@ -53,6 +88,24 @@ void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) { free(a); } +void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) { + assert(c); + assert(w); + + LIST_REMOVE(device_weights, c->io_device_weights, w); + free(w->path); + free(w); +} + +void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) { + assert(c); + assert(l); + + LIST_REMOVE(device_limits, c->io_device_limits, l); + free(l->path); + free(l); +} + void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) { assert(c); assert(w); @@ -74,6 +127,12 @@ void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockI void cgroup_context_done(CGroupContext *c) { assert(c); + while (c->io_device_weights) + cgroup_context_free_io_device_weight(c, c->io_device_weights); + + while (c->io_device_limits) + cgroup_context_free_io_device_limit(c, c->io_device_limits); + while (c->blockio_device_weights) cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights); @@ -85,6 +144,8 @@ void cgroup_context_done(CGroupContext *c) { } void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { + CGroupIODeviceLimit *il; + CGroupIODeviceWeight *iw; CGroupBlockIODeviceBandwidth *b; CGroupBlockIODeviceWeight *w; CGroupDeviceAllow *a; @@ -97,25 +158,45 @@ void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { fprintf(f, "%sCPUAccounting=%s\n" + "%sIOAccounting=%s\n" "%sBlockIOAccounting=%s\n" "%sMemoryAccounting=%s\n" - "%sCPUShares=%lu\n" - "%sStartupCPUShares=%lu\n" + "%sTasksAccounting=%s\n" + "%sCPUWeight=%" PRIu64 "\n" + "%sStartupCPUWeight=%" PRIu64 "\n" + "%sCPUShares=%" PRIu64 "\n" + "%sStartupCPUShares=%" PRIu64 "\n" "%sCPUQuotaPerSecSec=%s\n" - "%sBlockIOWeight=%lu\n" - "%sStartupBlockIOWeight=%lu\n" + "%sIOWeight=%" PRIu64 "\n" + "%sStartupIOWeight=%" PRIu64 "\n" + "%sBlockIOWeight=%" PRIu64 "\n" + "%sStartupBlockIOWeight=%" PRIu64 "\n" + "%sMemoryLow=%" PRIu64 "\n" + "%sMemoryHigh=%" PRIu64 "\n" + "%sMemoryMax=%" PRIu64 "\n" "%sMemoryLimit=%" PRIu64 "\n" + "%sTasksMax=%" PRIu64 "\n" "%sDevicePolicy=%s\n" "%sDelegate=%s\n", prefix, yes_no(c->cpu_accounting), + prefix, yes_no(c->io_accounting), prefix, yes_no(c->blockio_accounting), prefix, yes_no(c->memory_accounting), + prefix, yes_no(c->tasks_accounting), + prefix, c->cpu_weight, + prefix, c->startup_cpu_weight, prefix, c->cpu_shares, prefix, c->startup_cpu_shares, prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1), + prefix, c->io_weight, + prefix, c->startup_io_weight, prefix, c->blockio_weight, prefix, c->startup_blockio_weight, + prefix, c->memory_low, + prefix, c->memory_high, + prefix, c->memory_max, prefix, c->memory_limit, + prefix, c->tasks_max, prefix, cgroup_device_policy_to_string(c->device_policy), prefix, yes_no(c->delegate)); @@ -126,9 +207,30 @@ void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { a->path, a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : ""); + LIST_FOREACH(device_weights, iw, c->io_device_weights) + fprintf(f, + "%sIODeviceWeight=%s %" PRIu64, + prefix, + iw->path, + iw->weight); + + LIST_FOREACH(device_limits, il, c->io_device_limits) { + char buf[FORMAT_BYTES_MAX]; + CGroupIOLimitType type; + + for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) + if (il->limits[type] != cgroup_io_limit_defaults[type]) + fprintf(f, + "%s%s=%s %s\n", + prefix, + cgroup_io_limit_type_to_string(type), + il->path, + format_bytes(buf, sizeof(buf), il->limits[type])); + } + LIST_FOREACH(device_weights, w, c->blockio_device_weights) fprintf(f, - "%sBlockIODeviceWeight=%s %lu", + "%sBlockIODeviceWeight=%s %" PRIu64, prefix, w->path, w->weight); @@ -136,16 +238,22 @@ void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) { char buf[FORMAT_BYTES_MAX]; - fprintf(f, - "%s%s=%s %s\n", - prefix, - b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth", - b->path, - format_bytes(buf, sizeof(buf), b->bandwidth)); + if (b->rbps != CGROUP_LIMIT_MAX) + fprintf(f, + "%sBlockIOReadBandwidth=%s %s\n", + prefix, + b->path, + format_bytes(buf, sizeof(buf), b->rbps)); + if (b->wbps != CGROUP_LIMIT_MAX) + fprintf(f, + "%sBlockIOWriteBandwidth=%s %s\n", + prefix, + b->path, + format_bytes(buf, sizeof(buf), b->wbps)); } } -static int lookup_blkio_device(const char *p, dev_t *dev) { +static int lookup_block_device(const char *p, dev_t *dev) { struct stat st; int r; @@ -200,7 +308,8 @@ static int whitelist_device(const char *path, const char *node, const char *acc) r = cg_set_attribute("devices", path, "devices.allow", buf); if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set devices.allow on %s: %s", path, strerror(-r)); + log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set devices.allow on %s: %m", path); return r; } @@ -270,7 +379,8 @@ static int whitelist_major(const char *path, const char *name, char type, const r = cg_set_attribute("devices", path, "devices.allow", buf); if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set devices.allow on %s: %s", path, strerror(-r)); + log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set devices.allow on %s: %m", path); } return 0; @@ -280,113 +390,515 @@ fail: return -errno; } -void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const char *path, ManagerState state) { +static bool cgroup_context_has_cpu_weight(CGroupContext *c) { + return c->cpu_weight != CGROUP_WEIGHT_INVALID || + c->startup_cpu_weight != CGROUP_WEIGHT_INVALID; +} + +static bool cgroup_context_has_cpu_shares(CGroupContext *c) { + return c->cpu_shares != CGROUP_CPU_SHARES_INVALID || + c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID; +} + +static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) { + if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && + c->startup_cpu_weight != CGROUP_WEIGHT_INVALID) + return c->startup_cpu_weight; + else if (c->cpu_weight != CGROUP_WEIGHT_INVALID) + return c->cpu_weight; + else + return CGROUP_WEIGHT_DEFAULT; +} + +static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) { + if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && + c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID) + return c->startup_cpu_shares; + else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID) + return c->cpu_shares; + else + return CGROUP_CPU_SHARES_DEFAULT; +} + +static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) { + char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)]; + int r; + + xsprintf(buf, "%" PRIu64 "\n", weight); + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set cpu.weight: %m"); + + if (quota != USEC_INFINITY) + xsprintf(buf, USEC_FMT " " USEC_FMT "\n", + quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC); + else + xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC); + + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf); + + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set cpu.max: %m"); +} + +static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) { + char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1]; + int r; + + xsprintf(buf, "%" PRIu64 "\n", shares); + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set cpu.shares: %m"); + + xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC); + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set cpu.cfs_period_us: %m"); + + if (quota != USEC_INFINITY) { + xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC); + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf); + } else + r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1"); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set cpu.cfs_quota_us: %m"); +} + +static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) { + return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT, + CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); +} + +static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) { + return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT, + CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX); +} + +static bool cgroup_context_has_io_config(CGroupContext *c) { + return c->io_accounting || + c->io_weight != CGROUP_WEIGHT_INVALID || + c->startup_io_weight != CGROUP_WEIGHT_INVALID || + c->io_device_weights || + c->io_device_limits; +} + +static bool cgroup_context_has_blockio_config(CGroupContext *c) { + return c->blockio_accounting || + c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID || + c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID || + c->blockio_device_weights || + c->blockio_device_bandwidths; +} + +static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) { + if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && + c->startup_io_weight != CGROUP_WEIGHT_INVALID) + return c->startup_io_weight; + else if (c->io_weight != CGROUP_WEIGHT_INVALID) + return c->io_weight; + else + return CGROUP_WEIGHT_DEFAULT; +} + +static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) { + if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && + c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID) + return c->startup_blockio_weight; + else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID) + return c->blockio_weight; + else + return CGROUP_BLKIO_WEIGHT_DEFAULT; +} + +static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) { + return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT, + CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); +} + +static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) { + return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT, + CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX); +} + +static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) { + char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1]; + dev_t dev; + int r; + + r = lookup_block_device(dev_path, &dev); + if (r < 0) + return; + + xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight); + r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set io.weight: %m"); +} + +static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) { + char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1]; + dev_t dev; + int r; + + r = lookup_block_device(dev_path, &dev); + if (r < 0) + return; + + xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight); + r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set blkio.weight_device: %m"); +} + +static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) { + char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)]; + char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4]; + CGroupIOLimitType type; + dev_t dev; + unsigned n = 0; + int r; + + r = lookup_block_device(dev_path, &dev); + if (r < 0) + return 0; + + for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) { + if (limits[type] != cgroup_io_limit_defaults[type]) { + xsprintf(limit_bufs[type], "%" PRIu64, limits[type]); + n++; + } else { + xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0"); + } + } + + xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev), + limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX], + limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]); + r = cg_set_attribute("io", u->cgroup_path, "io.max", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set io.max: %m"); + return n; +} + +static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) { + char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1]; + dev_t dev; + unsigned n = 0; + int r; + + r = lookup_block_device(dev_path, &dev); + if (r < 0) + return 0; + + if (rbps != CGROUP_LIMIT_MAX) + n++; + sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps); + r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set blkio.throttle.read_bps_device: %m"); + + if (wbps != CGROUP_LIMIT_MAX) + n++; + sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps); + r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set blkio.throttle.write_bps_device: %m"); + + return n; +} + +static bool cgroup_context_has_unified_memory_config(CGroupContext *c) { + return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX; +} + +static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) { + char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max"; + int r; + + if (v != CGROUP_LIMIT_MAX) + xsprintf(buf, "%" PRIu64 "\n", v); + + r = cg_set_attribute("memory", u->cgroup_path, file, buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set %s: %m", file); +} + +static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) { + const char *path; + CGroupContext *c; bool is_root; int r; + assert(u); + + c = unit_get_cgroup_context(u); + path = u->cgroup_path; + assert(c); assert(path); if (mask == 0) return; - /* Some cgroup attributes are not support on the root cgroup, + /* Some cgroup attributes are not supported on the root cgroup, * hence silently ignore */ is_root = isempty(path) || path_equal(path, "/"); + if (is_root) + /* Make sure we don't try to display messages with an empty path. */ + path = "/"; - if ((mask & CGROUP_CPU) && !is_root) { - char buf[MAX(DECIMAL_STR_MAX(unsigned long), DECIMAL_STR_MAX(usec_t)) + 1]; + /* We generally ignore errors caused by read-only mounted + * cgroup trees (assuming we are running in a container then), + * and missing cgroups, i.e. EROFS and ENOENT. */ - sprintf(buf, "%lu\n", - IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_cpu_shares != (unsigned long) -1 ? c->startup_cpu_shares : - c->cpu_shares != (unsigned long) -1 ? c->cpu_shares : 1024); - r = cg_set_attribute("cpu", path, "cpu.shares", buf); - if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.shares on %s: %s", path, strerror(-r)); + if ((mask & CGROUP_MASK_CPU) && !is_root) { + bool has_weight = cgroup_context_has_cpu_weight(c); + bool has_shares = cgroup_context_has_cpu_shares(c); - sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC); - r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf); - if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.cfs_period_us on %s: %s", path, strerror(-r)); + if (cg_unified() > 0) { + uint64_t weight; - if (c->cpu_quota_per_sec_usec != USEC_INFINITY) { - sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC); - r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf); - } else - r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1"); - if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.cfs_quota_us on %s: %s", path, strerror(-r)); + if (has_weight) + weight = cgroup_context_cpu_weight(c, state); + else if (has_shares) { + uint64_t shares = cgroup_context_cpu_shares(c, state); + + weight = cgroup_cpu_shares_to_weight(shares); + + log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s", + shares, weight, path); + } else + weight = CGROUP_WEIGHT_DEFAULT; + + cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec); + } else { + uint64_t shares; + + if (has_shares) + shares = cgroup_context_cpu_shares(c, state); + else if (has_weight) { + uint64_t weight = cgroup_context_cpu_weight(c, state); + + shares = cgroup_cpu_weight_to_shares(weight); + + log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s", + weight, shares, path); + } else + shares = CGROUP_CPU_SHARES_DEFAULT; + + cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec); + } } - if (mask & CGROUP_BLKIO) { - char buf[MAX3(DECIMAL_STR_MAX(unsigned long)+1, - DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(unsigned long)*1, - DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)]; - CGroupBlockIODeviceWeight *w; - CGroupBlockIODeviceBandwidth *b; + if (mask & CGROUP_MASK_IO) { + bool has_io = cgroup_context_has_io_config(c); + bool has_blockio = cgroup_context_has_blockio_config(c); if (!is_root) { - sprintf(buf, "%lu\n", IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_blockio_weight != (unsigned long) -1 ? c->startup_blockio_weight : - c->blockio_weight != (unsigned long) -1 ? c->blockio_weight : 1000); - r = cg_set_attribute("blkio", path, "blkio.weight", buf); + char buf[8+DECIMAL_STR_MAX(uint64_t)+1]; + uint64_t weight; + + if (has_io) + weight = cgroup_context_io_weight(c, state); + else if (has_blockio) { + uint64_t blkio_weight = cgroup_context_blkio_weight(c, state); + + weight = cgroup_weight_blkio_to_io(blkio_weight); + + log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64, + blkio_weight, weight); + } else + weight = CGROUP_WEIGHT_DEFAULT; + + xsprintf(buf, "default %" PRIu64 "\n", weight); + r = cg_set_attribute("io", path, "io.weight", buf); if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set blkio.weight on %s: %s", path, strerror(-r)); + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set io.weight: %m"); - /* FIXME: no way to reset this list */ - LIST_FOREACH(device_weights, w, c->blockio_device_weights) { - dev_t dev; + if (has_io) { + CGroupIODeviceWeight *w; - r = lookup_blkio_device(w->path, &dev); - if (r < 0) - continue; + /* FIXME: no way to reset this list */ + LIST_FOREACH(device_weights, w, c->io_device_weights) + cgroup_apply_io_device_weight(u, w->path, w->weight); + } else if (has_blockio) { + CGroupBlockIODeviceWeight *w; - sprintf(buf, "%u:%u %lu", major(dev), minor(dev), w->weight); - r = cg_set_attribute("blkio", path, "blkio.weight_device", buf); - if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set blkio.weight_device on %s: %s", path, strerror(-r)); + /* FIXME: no way to reset this list */ + LIST_FOREACH(device_weights, w, c->blockio_device_weights) { + weight = cgroup_weight_blkio_to_io(w->weight); + + log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s", + w->weight, weight, w->path); + + cgroup_apply_io_device_weight(u, w->path, weight); + } } } - /* FIXME: no way to reset this list */ - LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) { - const char *a; - dev_t dev; + /* Apply limits and free ones without config. */ + if (has_io) { + CGroupIODeviceLimit *l, *next; - r = lookup_blkio_device(b->path, &dev); - if (r < 0) - continue; + LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) { + if (!cgroup_apply_io_device_limit(u, l->path, l->limits)) + cgroup_context_free_io_device_limit(c, l); + } + } else if (has_blockio) { + CGroupBlockIODeviceBandwidth *b, *next; + + LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) { + uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX]; + CGroupIOLimitType type; + + for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) + limits[type] = cgroup_io_limit_defaults[type]; + + limits[CGROUP_IO_RBPS_MAX] = b->rbps; + limits[CGROUP_IO_WBPS_MAX] = b->wbps; - a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device"; + log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s", + b->rbps, b->wbps, b->path); - sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth); - r = cg_set_attribute("blkio", path, a, buf); + if (!cgroup_apply_io_device_limit(u, b->path, limits)) + cgroup_context_free_blockio_device_bandwidth(c, b); + } + } + } + + if (mask & CGROUP_MASK_BLKIO) { + bool has_io = cgroup_context_has_io_config(c); + bool has_blockio = cgroup_context_has_blockio_config(c); + + if (!is_root) { + char buf[DECIMAL_STR_MAX(uint64_t)+1]; + uint64_t weight; + + if (has_blockio) + weight = cgroup_context_blkio_weight(c, state); + else if (has_io) { + uint64_t io_weight = cgroup_context_io_weight(c, state); + + weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state)); + + log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64, + io_weight, weight); + } else + weight = CGROUP_BLKIO_WEIGHT_DEFAULT; + + xsprintf(buf, "%" PRIu64 "\n", weight); + r = cg_set_attribute("blkio", path, "blkio.weight", buf); if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set %s on %s: %s", a, path, strerror(-r)); + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set blkio.weight: %m"); + + if (has_blockio) { + CGroupBlockIODeviceWeight *w; + + /* FIXME: no way to reset this list */ + LIST_FOREACH(device_weights, w, c->blockio_device_weights) + cgroup_apply_blkio_device_weight(u, w->path, w->weight); + } else if (has_io) { + CGroupIODeviceWeight *w; + + /* FIXME: no way to reset this list */ + LIST_FOREACH(device_weights, w, c->io_device_weights) { + weight = cgroup_weight_io_to_blkio(w->weight); + + log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s", + w->weight, weight, w->path); + + cgroup_apply_blkio_device_weight(u, w->path, weight); + } + } + } + + /* Apply limits and free ones without config. */ + if (has_blockio) { + CGroupBlockIODeviceBandwidth *b, *next; + + LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) { + if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps)) + cgroup_context_free_blockio_device_bandwidth(c, b); + } + } else if (has_io) { + CGroupIODeviceLimit *l, *next; + + LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) { + log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s", + l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path); + + if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX])) + cgroup_context_free_io_device_limit(c, l); + } } } - if (mask & CGROUP_MEMORY) { - if (c->memory_limit != (uint64_t) -1) { + if ((mask & CGROUP_MASK_MEMORY) && !is_root) { + if (cg_unified() > 0) { + uint64_t max = c->memory_max; + + if (cgroup_context_has_unified_memory_config(c)) + max = c->memory_max; + else { + max = c->memory_limit; + + if (max != CGROUP_LIMIT_MAX) + log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max); + } + + cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low); + cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high); + cgroup_apply_unified_memory_limit(u, "memory.max", max); + } else { char buf[DECIMAL_STR_MAX(uint64_t) + 1]; + uint64_t val = c->memory_limit; - sprintf(buf, "%" PRIu64 "\n", c->memory_limit); - r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf); - } else - r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1"); + if (val == CGROUP_LIMIT_MAX) { + val = c->memory_max; - if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set memory.limit_in_bytes on %s: %s", path, strerror(-r)); + if (val != CGROUP_LIMIT_MAX) + log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", c->memory_max); + } + + if (val == CGROUP_LIMIT_MAX) + strncpy(buf, "-1\n", sizeof(buf)); + else + xsprintf(buf, "%" PRIu64 "\n", val); + + r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf); + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set memory.limit_in_bytes: %m"); + } } - if ((mask & CGROUP_DEVICE) && !is_root) { + if ((mask & CGROUP_MASK_DEVICES) && !is_root) { CGroupDeviceAllow *a; + /* Changing the devices list of a populated cgroup + * might result in EINVAL, hence ignore EINVAL + * here. */ + if (c->device_allow || c->device_policy != CGROUP_AUTO) r = cg_set_attribute("devices", path, "devices.deny", "a"); else r = cg_set_attribute("devices", path, "devices.allow", "a"); if (r < 0) - log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to reset devices.list on %s: %s", path, strerror(-r)); + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to reset devices.list: %m"); if (c->device_policy == CGROUP_CLOSED || (c->device_policy == CGROUP_AUTO && c->device_allow)) { @@ -397,7 +909,11 @@ void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const cha "/dev/random\0" "rwm\0" "/dev/urandom\0" "rwm\0" "/dev/tty\0" "rwm\0" - "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */ + "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */ + /* Allow /run/elogind/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */ + /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */ + "/run/systemd/inaccessible/chr\0" "rwm\0" + "/run/systemd/inaccessible/blk\0" "rwm\0"; const char *x, *y; @@ -432,66 +948,92 @@ void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const cha else if (startswith(a->path, "char-")) whitelist_major(path, a->path + 5, 'c', acc); else - log_debug("Ignoring device %s while writing cgroup attribute.", a->path); + log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path); } } + + if ((mask & CGROUP_MASK_PIDS) && !is_root) { + + if (c->tasks_max != (uint64_t) -1) { + char buf[DECIMAL_STR_MAX(uint64_t) + 2]; + + sprintf(buf, "%" PRIu64 "\n", c->tasks_max); + r = cg_set_attribute("pids", path, "pids.max", buf); + } else + r = cg_set_attribute("pids", path, "pids.max", "max"); + + if (r < 0) + log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, + "Failed to set pids.max: %m"); + } } -CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) { - CGroupControllerMask mask = 0; +CGroupMask cgroup_context_get_mask(CGroupContext *c) { + CGroupMask mask = 0; /* Figure out which controllers we need */ if (c->cpu_accounting || - c->cpu_shares != (unsigned long) -1 || - c->startup_cpu_shares != (unsigned long) -1 || + cgroup_context_has_cpu_weight(c) || + cgroup_context_has_cpu_shares(c) || c->cpu_quota_per_sec_usec != USEC_INFINITY) - mask |= CGROUP_CPUACCT | CGROUP_CPU; + mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU; - if (c->blockio_accounting || - c->blockio_weight != (unsigned long) -1 || - c->startup_blockio_weight != (unsigned long) -1 || - c->blockio_device_weights || - c->blockio_device_bandwidths) - mask |= CGROUP_BLKIO; + if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c)) + mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO; if (c->memory_accounting || - c->memory_limit != (uint64_t) -1) - mask |= CGROUP_MEMORY; + c->memory_limit != CGROUP_LIMIT_MAX || + cgroup_context_has_unified_memory_config(c)) + mask |= CGROUP_MASK_MEMORY; if (c->device_allow || c->device_policy != CGROUP_AUTO) - mask |= CGROUP_DEVICE; + mask |= CGROUP_MASK_DEVICES; + + if (c->tasks_accounting || + c->tasks_max != (uint64_t) -1) + mask |= CGROUP_MASK_PIDS; return mask; } -CGroupControllerMask unit_get_cgroup_mask(Unit *u) { +CGroupMask unit_get_own_mask(Unit *u) { CGroupContext *c; + /* Returns the mask of controllers the unit needs for itself */ + c = unit_get_cgroup_context(u); if (!c) return 0; /* If delegation is turned on, then turn on all cgroups, - * unless the process we fork into it is known to drop - * privileges anyway, and shouldn't get access to the - * controllers anyway. */ + * unless we are on the legacy hierarchy and the process we + * fork into it is known to drop privileges, and hence + * shouldn't get access to the controllers. + * + * Note that on the unified hierarchy it is safe to delegate + * controllers to unprivileged services. */ if (c->delegate) { ExecContext *e; e = unit_get_exec_context(u); - if (!e || exec_context_maintains_privileges(e)) - return _CGROUP_CONTROLLER_MASK_ALL; + if (!e || + exec_context_maintains_privileges(e) || + cg_unified() > 0) + return _CGROUP_MASK_ALL; } return cgroup_context_get_mask(c); } -CGroupControllerMask unit_get_members_mask(Unit *u) { +CGroupMask unit_get_members_mask(Unit *u) { assert(u); + /* Returns the mask of controllers all of the unit's children + * require, merged */ + if (u->cgroup_members_mask_valid) return u->cgroup_members_mask; @@ -510,7 +1052,7 @@ CGroupControllerMask unit_get_members_mask(Unit *u) { continue; u->cgroup_members_mask |= - unit_get_cgroup_mask(member) | + unit_get_own_mask(member) | unit_get_members_mask(member); } } @@ -519,19 +1061,52 @@ CGroupControllerMask unit_get_members_mask(Unit *u) { return u->cgroup_members_mask; } -CGroupControllerMask unit_get_siblings_mask(Unit *u) { +CGroupMask unit_get_siblings_mask(Unit *u) { assert(u); + /* Returns the mask of controllers all of the unit's siblings + * require, i.e. the members mask of the unit's parent slice + * if there is one. */ + if (UNIT_ISSET(u->slice)) return unit_get_members_mask(UNIT_DEREF(u->slice)); - return unit_get_cgroup_mask(u) | unit_get_members_mask(u); + return unit_get_own_mask(u) | unit_get_members_mask(u); +} + +CGroupMask unit_get_subtree_mask(Unit *u) { + + /* Returns the mask of this subtree, meaning of the group + * itself and its children. */ + + return unit_get_own_mask(u) | unit_get_members_mask(u); +} + +CGroupMask unit_get_target_mask(Unit *u) { + CGroupMask mask; + + /* This returns the cgroup mask of all controllers to enable + * for a specific cgroup, i.e. everything it needs itself, + * plus all that its children need, plus all that its siblings + * need. This is primarily useful on the legacy cgroup + * hierarchy, where we need to duplicate each cgroup in each + * hierarchy that shall be enabled for it. */ + + mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u); + mask &= u->manager->cgroup_supported; + + return mask; } -CGroupControllerMask unit_get_target_mask(Unit *u) { - CGroupControllerMask mask; +CGroupMask unit_get_enable_mask(Unit *u) { + CGroupMask mask; + + /* This returns the cgroup mask of all controllers to enable + * for the children of a specific cgroup. This is primarily + * useful for the unified cgroup hierarchy, where each cgroup + * controls which controllers are enabled for its children. */ - mask = unit_get_cgroup_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u); + mask = unit_get_members_mask(u); mask &= u->manager->cgroup_supported; return mask; @@ -540,13 +1115,13 @@ CGroupControllerMask unit_get_target_mask(Unit *u) { /* Recurse from a unit up through its containing slices, propagating * mask bits upward. A unit is also member of itself. */ void unit_update_cgroup_members_masks(Unit *u) { - CGroupControllerMask m; + CGroupMask m; bool more; assert(u); /* Calculate subtree mask */ - m = unit_get_cgroup_mask(u) | unit_get_members_mask(u); + m = unit_get_subtree_mask(u); /* See if anything changed from the previous invocation. If * not, we're done. */ @@ -586,7 +1161,7 @@ void unit_update_cgroup_members_masks(Unit *u) { } } -static const char *migrate_callback(CGroupControllerMask mask, void *userdata) { +static const char *migrate_callback(CGroupMask mask, void *userdata) { Unit *u = userdata; assert(mask != 0); @@ -604,57 +1179,198 @@ static const char *migrate_callback(CGroupControllerMask mask, void *userdata) { return NULL; } -static int unit_create_cgroups(Unit *u, CGroupControllerMask mask) { - _cleanup_free_ char *path = NULL; +char *unit_default_cgroup_path(Unit *u) { + _cleanup_free_ char *escaped = NULL, *slice = NULL; int r; assert(u); - path = unit_default_cgroup_path(u); - if (!path) - return log_oom(); + if (unit_has_name(u, SPECIAL_ROOT_SLICE)) + return strdup(u->manager->cgroup_root); - r = hashmap_put(u->manager->cgroup_unit, path, u); - if (r < 0) { - log_error(r == -EEXIST ? "cgroup %s exists already: %s" : "hashmap_put failed for %s: %s", path, strerror(-r)); - return r; - } - if (r > 0) { - u->cgroup_path = path; - path = NULL; + if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) { + r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice); + if (r < 0) + return NULL; } - /* First, create our own group */ - r = cg_create_everywhere(u->manager->cgroup_supported, mask, u->cgroup_path); - if (r < 0) - return log_error_errno(r, "Failed to create cgroup %s: %m", u->cgroup_path); - - /* Keep track that this is now realized */ - u->cgroup_realized = true; - u->cgroup_realized_mask = mask; - - /* Then, possibly move things over */ - r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u); - if (r < 0) - log_warning_errno(r, "Failed to migrate cgroup from to %s: %m", u->cgroup_path); + escaped = cg_escape(u->id); + if (!escaped) + return NULL; - return 0; + if (slice) + return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL); + else + return strjoin(u->manager->cgroup_root, "/", escaped, NULL); } -static bool unit_has_mask_realized(Unit *u, CGroupControllerMask mask) { +int unit_set_cgroup_path(Unit *u, const char *path) { + _cleanup_free_ char *p = NULL; + int r; + assert(u); - return u->cgroup_realized && u->cgroup_realized_mask == mask; + if (path) { + p = strdup(path); + if (!p) + return -ENOMEM; + } else + p = NULL; + + if (streq_ptr(u->cgroup_path, p)) + return 0; + + if (p) { + r = hashmap_put(u->manager->cgroup_unit, p, u); + if (r < 0) + return r; + } + + unit_release_cgroup(u); + + u->cgroup_path = p; + p = NULL; + + return 1; } -/* Check if necessary controllers and attributes for a unit are in place. - * - * If so, do nothing. - * If not, create paths, move processes over, and set attributes. - * - * Returns 0 on success and < 0 on failure. */ -static int unit_realize_cgroup_now(Unit *u, ManagerState state) { - CGroupControllerMask mask; +int unit_watch_cgroup(Unit *u) { + _cleanup_free_ char *events = NULL; + int r; + + assert(u); + + if (!u->cgroup_path) + return 0; + + if (u->cgroup_inotify_wd >= 0) + return 0; + + /* Only applies to the unified hierarchy */ + r = cg_unified(); + if (r < 0) + return log_unit_error_errno(u, r, "Failed detect whether the unified hierarchy is used: %m"); + if (r == 0) + return 0; + + /* Don't watch the root slice, it's pointless. */ + if (unit_has_name(u, SPECIAL_ROOT_SLICE)) + return 0; + + r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops); + if (r < 0) + return log_oom(); + + r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events); + if (r < 0) + return log_oom(); + + u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY); + if (u->cgroup_inotify_wd < 0) { + + if (errno == ENOENT) /* If the directory is already + * gone we don't need to track + * it, so this is not an error */ + return 0; + + return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path); + } + + r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u); + if (r < 0) + return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m"); + + return 0; +} + +static int unit_create_cgroup( + Unit *u, + CGroupMask target_mask, + CGroupMask enable_mask) { + + CGroupContext *c; + int r; + + assert(u); + + c = unit_get_cgroup_context(u); + if (!c) + return 0; + + if (!u->cgroup_path) { + _cleanup_free_ char *path = NULL; + + path = unit_default_cgroup_path(u); + if (!path) + return log_oom(); + + r = unit_set_cgroup_path(u, path); + if (r == -EEXIST) + return log_unit_error_errno(u, r, "Control group %s exists already.", path); + if (r < 0) + return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path); + } + + /* First, create our own group */ + r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path); + if (r < 0) + return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path); + + /* Start watching it */ + (void) unit_watch_cgroup(u); + + /* Enable all controllers we need */ + r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path); + if (r < 0) + log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path); + + /* Keep track that this is now realized */ + u->cgroup_realized = true; + u->cgroup_realized_mask = target_mask; + u->cgroup_enabled_mask = enable_mask; + + if (u->type != UNIT_SLICE && !c->delegate) { + + /* Then, possibly move things over, but not if + * subgroups may contain processes, which is the case + * for slice and delegation units. */ + r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u); + if (r < 0) + log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path); + } + + return 0; +} + +int unit_attach_pids_to_cgroup(Unit *u) { + int r; + assert(u); + + r = unit_realize_cgroup(u); + if (r < 0) + return r; + + r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u); + if (r < 0) + return r; + + return 0; +} + +static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask enable_mask) { + assert(u); + + return u->cgroup_realized && u->cgroup_realized_mask == target_mask && u->cgroup_enabled_mask == enable_mask; +} + +/* Check if necessary controllers and attributes for a unit are in place. + * + * If so, do nothing. + * If not, create paths, move processes over, and set attributes. + * + * Returns 0 on success and < 0 on failure. */ +static int unit_realize_cgroup_now(Unit *u, ManagerState state) { + CGroupMask target_mask, enable_mask; int r; assert(u); @@ -664,9 +1380,10 @@ static int unit_realize_cgroup_now(Unit *u, ManagerState state) { u->in_cgroup_queue = false; } - mask = unit_get_target_mask(u); + target_mask = unit_get_target_mask(u); + enable_mask = unit_get_enable_mask(u); - if (unit_has_mask_realized(u, mask)) + if (unit_has_mask_realized(u, target_mask, enable_mask)) return 0; /* First, realize parents */ @@ -677,12 +1394,12 @@ static int unit_realize_cgroup_now(Unit *u, ManagerState state) { } /* And then do the real work */ - r = unit_create_cgroups(u, mask); + r = unit_create_cgroup(u, target_mask, enable_mask); if (r < 0) return r; /* Finally, apply the necessary attributes. */ - cgroup_context_apply(unit_get_cgroup_context(u), mask, u->cgroup_path, state); + cgroup_context_apply(u, target_mask, state); return 0; } @@ -709,7 +1426,7 @@ unsigned manager_dispatch_cgroup_queue(Manager *m) { r = unit_realize_cgroup_now(i, state); if (r < 0) - log_warning_errno(r, "Failed to realize cgroups for queued unit %s: %m", i->id); + log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id); n++; } @@ -745,7 +1462,7 @@ static void unit_queue_siblings(Unit *u) { /* If the unit doesn't need any new controllers * and has current ones realized, it doesn't need * any changes. */ - if (unit_has_mask_realized(m, unit_get_target_mask(m))) + if (unit_has_mask_realized(m, unit_get_target_mask(m), unit_get_enable_mask(m))) continue; unit_add_to_cgroup_queue(m); @@ -756,12 +1473,9 @@ static void unit_queue_siblings(Unit *u) { } int unit_realize_cgroup(Unit *u) { - CGroupContext *c; - assert(u); - c = unit_get_cgroup_context(u); - if (!c) + if (!UNIT_HAS_CGROUP_CONTEXT(u)) return 0; /* So, here's the deal: when realizing the cgroups for this @@ -782,38 +1496,68 @@ int unit_realize_cgroup(Unit *u) { return unit_realize_cgroup_now(u, manager_state(u->manager)); } -void unit_destroy_cgroup(Unit *u) { +void unit_release_cgroup(Unit *u) { + assert(u); + + /* Forgets all cgroup details for this cgroup */ + + if (u->cgroup_path) { + (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path); + u->cgroup_path = mfree(u->cgroup_path); + } + + if (u->cgroup_inotify_wd >= 0) { + if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0) + log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id); + + (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd)); + u->cgroup_inotify_wd = -1; + } +} + +void unit_prune_cgroup(Unit *u) { int r; + bool is_root_slice; assert(u); + /* Removes the cgroup, if empty and possible, and stops watching it. */ + if (!u->cgroup_path) return; - r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !unit_has_name(u, SPECIAL_ROOT_SLICE)); - if (r < 0) - log_debug_errno(r, "Failed to destroy cgroup %s: %m", u->cgroup_path); + is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE); - hashmap_remove(u->manager->cgroup_unit, u->cgroup_path); + r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice); + if (r < 0) { + log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path); + return; + } + + if (is_root_slice) + return; + + unit_release_cgroup(u); - free(u->cgroup_path); - u->cgroup_path = NULL; u->cgroup_realized = false; u->cgroup_realized_mask = 0; - + u->cgroup_enabled_mask = 0; } -pid_t unit_search_main_pid(Unit *u) { +int unit_search_main_pid(Unit *u, pid_t *ret) { _cleanup_fclose_ FILE *f = NULL; pid_t pid = 0, npid, mypid; + int r; assert(u); + assert(ret); if (!u->cgroup_path) - return 0; + return -ENXIO; - if (cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f) < 0) - return 0; + r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f); + if (r < 0) + return r; mypid = getpid(); while (cg_read_pid(f, &npid) > 0) { @@ -823,93 +1567,302 @@ pid_t unit_search_main_pid(Unit *u) { continue; /* Ignore processes that aren't our kids */ - if (get_parent_of_pid(npid, &ppid) >= 0 && ppid != mypid) + if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid) continue; - if (pid != 0) { + if (pid != 0) /* Dang, there's more than one daemonized PID in this group, so we don't know what process is the main process. */ - pid = 0; - break; - } + + return -ENODATA; pid = npid; } - return pid; + *ret = pid; + return 0; +} + +static int unit_watch_pids_in_path(Unit *u, const char *path) { + _cleanup_closedir_ DIR *d = NULL; + _cleanup_fclose_ FILE *f = NULL; + int ret = 0, r; + + assert(u); + assert(path); + + r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f); + if (r < 0) + ret = r; + else { + pid_t pid; + + while ((r = cg_read_pid(f, &pid)) > 0) { + r = unit_watch_pid(u, pid); + if (r < 0 && ret >= 0) + ret = r; + } + + if (r < 0 && ret >= 0) + ret = r; + } + + r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d); + if (r < 0) { + if (ret >= 0) + ret = r; + } else { + char *fn; + + while ((r = cg_read_subgroup(d, &fn)) > 0) { + _cleanup_free_ char *p = NULL; + + p = strjoin(path, "/", fn, NULL); + free(fn); + + if (!p) + return -ENOMEM; + + r = unit_watch_pids_in_path(u, p); + if (r < 0 && ret >= 0) + ret = r; + } + + if (r < 0 && ret >= 0) + ret = r; + } + + return ret; +} + +int unit_watch_all_pids(Unit *u) { + assert(u); + + /* Adds all PIDs from our cgroup to the set of PIDs we + * watch. This is a fallback logic for cases where we do not + * get reliable cgroup empty notifications: we try to use + * SIGCHLD as replacement. */ + + if (!u->cgroup_path) + return -ENOENT; + + if (cg_unified() > 0) /* On unified we can use proper notifications */ + return 0; + + return unit_watch_pids_in_path(u, u->cgroup_path); +} + +int unit_notify_cgroup_empty(Unit *u) { + int r; + + assert(u); + + if (!u->cgroup_path) + return 0; + + r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path); + if (r <= 0) + return r; + + unit_add_to_gc_queue(u); + + if (UNIT_VTABLE(u)->notify_cgroup_empty) + UNIT_VTABLE(u)->notify_cgroup_empty(u); + + return 0; +} + +static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) { + Manager *m = userdata; + + assert(s); + assert(fd >= 0); + assert(m); + + for (;;) { + union inotify_event_buffer buffer; + struct inotify_event *e; + ssize_t l; + + l = read(fd, &buffer, sizeof(buffer)); + if (l < 0) { + if (errno == EINTR || errno == EAGAIN) + return 0; + + return log_error_errno(errno, "Failed to read control group inotify events: %m"); + } + + FOREACH_INOTIFY_EVENT(e, buffer, l) { + Unit *u; + + if (e->wd < 0) + /* Queue overflow has no watch descriptor */ + continue; + + if (e->mask & IN_IGNORED) + /* The watch was just removed */ + continue; + + u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd)); + if (!u) /* Not that inotify might deliver + * events for a watch even after it + * was removed, because it was queued + * before the removal. Let's ignore + * this here safely. */ + continue; + + (void) unit_notify_cgroup_empty(u); + } + } } +#endif // 0 int manager_setup_cgroup(Manager *m) { _cleanup_free_ char *path = NULL; - int r; + CGroupController c; + int r, unified; + char *e; assert(m); /* 1. Determine hierarchy */ - free(m->cgroup_root); - m->cgroup_root = NULL; - + m->cgroup_root = mfree(m->cgroup_root); r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root); if (r < 0) return log_error_errno(r, "Cannot determine cgroup we are running in: %m"); - /* LEGACY: Already in /system.slice? If so, let's cut this - * off. This is to support live upgrades from older systemd - * versions where PID 1 was moved there. */ - if (m->running_as == SYSTEMD_SYSTEM) { - char *e; +#if 0 /// elogind does not support systemd scopes and slices + /* Chop off the init scope, if we are already located in it */ + e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE); + /* LEGACY: Also chop off the system slice if we are in + * it. This is to support live upgrades from older systemd + * versions where PID 1 was moved there. Also see + * cg_get_root_path(). */ + if (!e && MANAGER_IS_SYSTEM(m)) { e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE); if (!e) - e = endswith(m->cgroup_root, "/system"); - if (e) - *e = 0; + e = endswith(m->cgroup_root, "/system"); /* even more legacy */ } + if (e) + *e = 0; +#endif // 0 /* And make sure to store away the root value without trailing * slash, even for the root dir, so that we can easily prepend * it everywhere. */ - if (streq(m->cgroup_root, "/")) - m->cgroup_root[0] = 0; + while ((e = endswith(m->cgroup_root, "/"))) + *e = 0; + log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"", + SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root); /* 2. Show data */ r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path); if (r < 0) return log_error_errno(r, "Cannot find cgroup mount point: %m"); - log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path); + unified = cg_unified(); + if (unified < 0) + return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m"); + if (unified > 0) + log_debug("Unified cgroup hierarchy is located at %s.", path); + else + log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path); + if (!m->test_run) { + const char *scope_path; /* 3. Install agent */ - if (m->running_as == SYSTEMD_SYSTEM) { + if (unified) { + + /* In the unified hierarchy we can get + * cgroup empty notifications via inotify. */ + +#if 0 /// elogind does not support the unified hierarchy, yet. + m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source); + safe_close(m->cgroup_inotify_fd); + + m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC); + if (m->cgroup_inotify_fd < 0) + return log_error_errno(errno, "Failed to create control group inotify object: %m"); + + r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m); + if (r < 0) + return log_error_errno(r, "Failed to watch control group inotify object: %m"); + + /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also + * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */ + r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-5); + if (r < 0) + return log_error_errno(r, "Failed to set priority of inotify event source: %m"); + + (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify"); + +#else + return log_error_errno(EOPNOTSUPP, "Unified cgroup hierarchy not supported: %m"); +#endif // 0 + } else if (MANAGER_IS_SYSTEM(m)) { + + /* On the legacy hierarchy we only get + * notifications via cgroup agents. (Which + * isn't really reliable, since it does not + * generate events when control groups with + * children run empty. */ + r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH); if (r < 0) log_warning_errno(r, "Failed to install release agent, ignoring: %m"); else if (r > 0) log_debug("Installed release agent."); - else + else if (r == 0) log_debug("Release agent already installed."); } - /* 4. Make sure we are in the root cgroup */ - r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, 0); +#if 0 /// elogind is not meant to run in systemd init scope + /* 4. Make sure we are in the special "init.scope" unit in the root slice. */ + scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE); + r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0); +#else + if (streq(SYSTEMD_CGROUP_CONTROLLER, "name=elogind")) + // we are our own cgroup controller + scope_path = strjoina(""); + else if (streq(m->cgroup_root, "/elogind")) + // root already is our cgroup + scope_path = strjoina(m->cgroup_root); + else + // we have to create our own group + scope_path = strjoina(m->cgroup_root, "/elogind"); + r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0); +#endif // 0 + if (r < 0) + return log_error_errno(r, "Failed to create %s control group: %m", scope_path); + log_debug_elogind("Created control group \"%s\"", scope_path); + + /* also, move all other userspace processes remaining + * in the root cgroup into that scope. */ + r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0); if (r < 0) - return log_error_errno(r, "Failed to create root cgroup hierarchy: %m"); + log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m"); /* 5. And pin it, so that it cannot be unmounted */ safe_close(m->pin_cgroupfs_fd); - m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK); if (m->pin_cgroupfs_fd < 0) return log_error_errno(errno, "Failed to open pin file: %m"); - /* 6. Always enable hierarchial support if it exists... */ - cg_set_attribute("memory", "/", "memory.use_hierarchy", "1"); + /* 6. Always enable hierarchical support if it exists... */ + if (!unified) + (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1"); } /* 7. Figure out which controllers are supported */ - m->cgroup_supported = cg_mask_supported(); + r = cg_mask_supported(&m->cgroup_supported); + if (r < 0) + return log_error_errno(r, "Failed to determine supported controllers: %m"); + + for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) + log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c))); return 0; } @@ -920,14 +1873,21 @@ void manager_shutdown_cgroup(Manager *m, bool delete) { /* We can't really delete the group, since we are in it. But * let's trim it. */ if (delete && m->cgroup_root) - cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false); + (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false); + +#if 0 /// elogind does not support the unified hierarchy, yet. + m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit); + + m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source); + m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd); +#endif // 0 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd); - free(m->cgroup_root); - m->cgroup_root = NULL; + m->cgroup_root = mfree(m->cgroup_root); } +#if 0 /// UNNEEDED by elogind Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) { char *p; Unit *u; @@ -944,8 +1904,8 @@ Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) { char *e; e = strrchr(p, '/'); - if (e == p || !e) - return NULL; + if (!e || e == p) + return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE); *e = 0; @@ -955,13 +1915,13 @@ Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) { } } -Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) { +Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) { _cleanup_free_ char *cgroup = NULL; int r; assert(m); - if (pid <= 1) + if (pid <= 0) return NULL; r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup); @@ -971,27 +1931,235 @@ Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) { return manager_get_unit_by_cgroup(m, cgroup); } +Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) { + Unit *u; + + assert(m); + + if (pid <= 0) + return NULL; + + if (pid == 1) + return hashmap_get(m->units, SPECIAL_INIT_SCOPE); + + u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid)); + if (u) + return u; + + u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid)); + if (u) + return u; + + return manager_get_unit_by_pid_cgroup(m, pid); +} +#endif // 0 + +#if 0 /// elogind must substitute this with its own variant int manager_notify_cgroup_empty(Manager *m, const char *cgroup) { Unit *u; - int r; assert(m); assert(cgroup); + log_debug("Got cgroup empty notification for: %s", cgroup); + u = manager_get_unit_by_cgroup(m, cgroup); - if (u) { - r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, true); - if (r > 0) { - if (UNIT_VTABLE(u)->notify_cgroup_empty) - UNIT_VTABLE(u)->notify_cgroup_empty(u); + if (!u) + return 0; - unit_add_to_gc_queue(u); - } + return unit_notify_cgroup_empty(u); +} +#else +int manager_notify_cgroup_empty(Manager *m, const char *cgroup) { + Session *s; + + assert(m); + assert(cgroup); + + log_debug("Got cgroup empty notification for: %s", cgroup); + + s = hashmap_get(m->sessions, cgroup); + + if (s) { + session_finalize(s); + session_free(s); + } else + log_warning("Session not found: %s", cgroup); + + return 0; +} +#endif // 0 + +#if 0 /// UNNEEDED by elogind +int unit_get_memory_current(Unit *u, uint64_t *ret) { + _cleanup_free_ char *v = NULL; + int r; + + assert(u); + assert(ret); + + if (!u->cgroup_path) + return -ENODATA; + + if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0) + return -ENODATA; + + if (cg_unified() <= 0) + r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v); + else + r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v); + if (r == -ENOENT) + return -ENODATA; + if (r < 0) + return r; + + return safe_atou64(v, ret); +} + +int unit_get_tasks_current(Unit *u, uint64_t *ret) { + _cleanup_free_ char *v = NULL; + int r; + + assert(u); + assert(ret); + + if (!u->cgroup_path) + return -ENODATA; + + if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0) + return -ENODATA; + + r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v); + if (r == -ENOENT) + return -ENODATA; + if (r < 0) + return r; + + return safe_atou64(v, ret); +} + +static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) { + _cleanup_free_ char *v = NULL; + uint64_t ns; + int r; + + assert(u); + assert(ret); + + if (!u->cgroup_path) + return -ENODATA; + + if (cg_unified() > 0) { + const char *keys[] = { "usage_usec", NULL }; + _cleanup_free_ char *val = NULL; + uint64_t us; + + if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0) + return -ENODATA; + + r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val); + if (r < 0) + return r; + + r = safe_atou64(val, &us); + if (r < 0) + return r; + + ns = us * NSEC_PER_USEC; + } else { + if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0) + return -ENODATA; + + r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v); + if (r == -ENOENT) + return -ENODATA; + if (r < 0) + return r; + + r = safe_atou64(v, &ns); + if (r < 0) + return r; + } + + *ret = ns; + return 0; +} + +int unit_get_cpu_usage(Unit *u, nsec_t *ret) { + nsec_t ns; + int r; + + r = unit_get_cpu_usage_raw(u, &ns); + if (r < 0) + return r; + + if (ns > u->cpu_usage_base) + ns -= u->cpu_usage_base; + else + ns = 0; + + *ret = ns; + return 0; +} + +int unit_reset_cpu_usage(Unit *u) { + nsec_t ns; + int r; + + assert(u); + + r = unit_get_cpu_usage_raw(u, &ns); + if (r < 0) { + u->cpu_usage_base = 0; + return r; } + u->cpu_usage_base = ns; return 0; } +bool unit_cgroup_delegate(Unit *u) { + CGroupContext *c; + + assert(u); + + c = unit_get_cgroup_context(u); + if (!c) + return false; + + return c->delegate; +} + +void unit_invalidate_cgroup(Unit *u, CGroupMask m) { + assert(u); + + if (!UNIT_HAS_CGROUP_CONTEXT(u)) + return; + + if (m == 0) + return; + + /* always invalidate compat pairs together */ + if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO)) + m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO; + + if ((u->cgroup_realized_mask & m) == 0) + return; + + u->cgroup_realized_mask &= ~m; + unit_add_to_cgroup_queue(u); +} + +void manager_invalidate_startup_units(Manager *m) { + Iterator i; + Unit *u; + + assert(m); + + SET_FOREACH(u, m->startup_units, i) + unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO); +} + static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = { [CGROUP_AUTO] = "auto", [CGROUP_CLOSED] = "closed", @@ -999,3 +2167,4 @@ static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = }; DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy); +#endif // 0