X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~ianmdlvl/git?a=blobdiff_plain;f=src%2Fcore%2Fcgroup.c;h=1c05c23449b06818ddec1a315ce3f9f7d4ef8c5a;hb=4ab72d6fb499c2b4d8baced9fa94a8bbfa5a4b3d;hp=1327486509a6eaaf54af54119087f077f8429f42;hpb=01efdf13a6ee9a14fd6d8b41a5d522d5917e1fbc;p=elogind.git diff --git a/src/core/cgroup.c b/src/core/cgroup.c index 132748650..1c05c2344 100644 --- a/src/core/cgroup.c +++ b/src/core/cgroup.c @@ -20,6 +20,7 @@ ***/ #include +#include #include "path-util.h" #include "special.h" @@ -35,6 +36,10 @@ void cgroup_context_init(CGroupContext *c) { c->cpu_shares = 1024; c->memory_limit = (uint64_t) -1; c->blockio_weight = 1000; + + c->cpu_quota_per_sec_usec = (usec_t) -1; + c->cpu_quota_usec = (usec_t) -1; + c->cpu_quota_period_usec = 100*USEC_PER_MSEC; } void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) { @@ -77,10 +82,37 @@ void cgroup_context_done(CGroupContext *c) { cgroup_context_free_device_allow(c, c->device_allow); } +usec_t cgroup_context_get_cpu_quota_usec(CGroupContext *c) { + assert(c); + + /* Returns the absolute CPU quota */ + + if (c->cpu_quota_usec != (usec_t) -1) + return c->cpu_quota_usec; + else if (c->cpu_quota_per_sec_usec != (usec_t) -1) + return c->cpu_quota_per_sec_usec*c->cpu_quota_period_usec/USEC_PER_SEC; + else + return (usec_t) -1; +} + +usec_t cgroup_context_get_cpu_quota_per_sec_usec(CGroupContext *c) { + assert(c); + + /* Returns the CPU quota relative to 1s */ + + if (c->cpu_quota_usec != (usec_t) -1) + return c->cpu_quota_usec*USEC_PER_SEC/c->cpu_quota_period_usec; + else if (c->cpu_quota_per_sec_usec != (usec_t) -1) + return c->cpu_quota_per_sec_usec; + else + return (usec_t) -1; +} + void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { CGroupBlockIODeviceBandwidth *b; CGroupBlockIODeviceWeight *w; CGroupDeviceAllow *a; + char t[FORMAT_TIMESPAN_MAX], s[FORMAT_TIMESPAN_MAX], u[FORMAT_TIMESPAN_MAX]; assert(c); assert(f); @@ -92,6 +124,9 @@ void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { "%sBlockIOAccounting=%s\n" "%sMemoryAccounting=%s\n" "%sCPUShares=%lu\n" + "%sCPUQuota=%s\n" + "%sCPUQuotaPerSecSec=%s\n" + "%sCPUQuotaPeriodSec=%s\n" "%sBlockIOWeight=%lu\n" "%sMemoryLimit=%" PRIu64 "\n" "%sDevicePolicy=%s\n", @@ -99,6 +134,9 @@ void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) { prefix, yes_no(c->blockio_accounting), prefix, yes_no(c->memory_accounting), prefix, c->cpu_shares, + prefix, strna(format_timespan(u, sizeof(u), cgroup_context_get_cpu_quota_usec(c), 1)), + prefix, strna(format_timespan(t, sizeof(t), cgroup_context_get_cpu_quota_per_sec_usec(c), 1)), + prefix, strna(format_timespan(s, sizeof(s), c->cpu_quota_period_usec, 1)), prefix, c->blockio_weight, prefix, c->memory_limit, prefix, cgroup_device_policy_to_string(c->device_policy)); @@ -246,7 +284,8 @@ static int whitelist_major(const char *path, const char *name, char type, const w++; w += strspn(w, WHITESPACE); - if (!streq(w, name)) + + if (fnmatch(name, w, 0) != 0) continue; sprintf(buf, @@ -282,12 +321,27 @@ void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const cha is_root = isempty(path) || path_equal(path, "/"); if ((mask & CGROUP_CPU) && !is_root) { - char buf[DECIMAL_STR_MAX(unsigned long) + 1]; + char buf[MAX(DECIMAL_STR_MAX(unsigned long), DECIMAL_STR_MAX(usec_t)) + 1]; + usec_t q; sprintf(buf, "%lu\n", c->cpu_shares); r = cg_set_attribute("cpu", path, "cpu.shares", buf); if (r < 0) log_warning("Failed to set cpu.shares on %s: %s", path, strerror(-r)); + + sprintf(buf, USEC_FMT "\n", c->cpu_quota_period_usec); + r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf); + if (r < 0) + log_warning("Failed to set cpu.cfs_period_us on %s: %s", path, strerror(-r)); + + q = cgroup_context_get_cpu_quota_usec(c); + if (q != (usec_t) -1) { + sprintf(buf, USEC_FMT "\n", q); + r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf); + } else + r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1"); + if (r < 0) + log_warning("Failed to set cpu.cfs_quota_us on %s: %s", path, strerror(-r)); } if (mask & CGROUP_BLKIO) { @@ -362,16 +416,22 @@ void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const cha if (c->device_policy == CGROUP_CLOSED || (c->device_policy == CGROUP_AUTO && c->device_allow)) { static const char auto_devices[] = - "/dev/null\0" "rw\0" - "/dev/zero\0" "rw\0" - "/dev/full\0" "rw\0" - "/dev/random\0" "rw\0" - "/dev/urandom\0" "rw\0"; + "/dev/null\0" "rwm\0" + "/dev/zero\0" "rwm\0" + "/dev/full\0" "rwm\0" + "/dev/random\0" "rwm\0" + "/dev/urandom\0" "rwm\0" + "/dev/tty\0" "rwm\0" + "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */ const char *x, *y; NULSTR_FOREACH_PAIR(x, y, auto_devices) whitelist_device(path, x, y); + + whitelist_major(path, "pts", 'c', "rw"); + whitelist_major(path, "kdbus", 'c', "rw"); + whitelist_major(path, "kdbus/*", 'c', "rw"); } LIST_FOREACH(device_allow, a, c->device_allow) { @@ -407,7 +467,10 @@ CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) { /* Figure out which controllers we need */ - if (c->cpu_accounting || c->cpu_shares != 1024) + if (c->cpu_accounting || + c->cpu_shares != 1024 || + c->cpu_quota_usec != (usec_t) -1 || + c->cpu_quota_per_sec_usec != (usec_t) -1) mask |= CGROUP_CPUACCT | CGROUP_CPU; if (c->blockio_accounting || @@ -860,8 +923,7 @@ int manager_setup_cgroup(Manager *m) { } /* 5. And pin it, so that it cannot be unmounted */ - if (m->pin_cgroupfs_fd >= 0) - close_nointr_nofail(m->pin_cgroupfs_fd); + safe_close(m->pin_cgroupfs_fd); m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK); if (r < 0) { @@ -886,10 +948,7 @@ void manager_shutdown_cgroup(Manager *m, bool delete) { if (delete && m->cgroup_root) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false); - if (m->pin_cgroupfs_fd >= 0) { - close_nointr_nofail(m->pin_cgroupfs_fd); - m->pin_cgroupfs_fd = -1; - } + m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd); free(m->cgroup_root); m->cgroup_root = NULL;