2 This file is part of systemd.
4 Copyright 2013 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include "alloc-util.h"
24 #include "cgroup-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
32 //#include "special.h"
33 #include "string-table.h"
34 #include "string-util.h"
35 #include "stdio-util.h"
37 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
39 #if 0 /// UNNEEDED by elogind
40 static void cgroup_compat_warn(void) {
41 static bool cgroup_compat_warned = false;
43 if (cgroup_compat_warned)
46 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
47 cgroup_compat_warned = true;
50 #define log_cgroup_compat(unit, fmt, ...) do { \
51 cgroup_compat_warn(); \
52 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
55 void cgroup_context_init(CGroupContext *c) {
58 /* Initialize everything to the kernel defaults, assuming the
59 * structure is preinitialized to 0 */
61 c->cpu_weight = CGROUP_WEIGHT_INVALID;
62 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
63 c->cpu_quota_per_sec_usec = USEC_INFINITY;
65 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
66 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
68 c->memory_high = CGROUP_LIMIT_MAX;
69 c->memory_max = CGROUP_LIMIT_MAX;
71 c->memory_limit = CGROUP_LIMIT_MAX;
73 c->io_weight = CGROUP_WEIGHT_INVALID;
74 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
76 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
77 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
79 c->tasks_max = (uint64_t) -1;
82 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
86 LIST_REMOVE(device_allow, c->device_allow, a);
91 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
95 LIST_REMOVE(device_weights, c->io_device_weights, w);
100 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
104 LIST_REMOVE(device_limits, c->io_device_limits, l);
109 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
113 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
118 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
122 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
127 void cgroup_context_done(CGroupContext *c) {
130 while (c->io_device_weights)
131 cgroup_context_free_io_device_weight(c, c->io_device_weights);
133 while (c->io_device_limits)
134 cgroup_context_free_io_device_limit(c, c->io_device_limits);
136 while (c->blockio_device_weights)
137 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
139 while (c->blockio_device_bandwidths)
140 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
142 while (c->device_allow)
143 cgroup_context_free_device_allow(c, c->device_allow);
146 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
147 CGroupIODeviceLimit *il;
148 CGroupIODeviceWeight *iw;
149 CGroupBlockIODeviceBandwidth *b;
150 CGroupBlockIODeviceWeight *w;
151 CGroupDeviceAllow *a;
152 char u[FORMAT_TIMESPAN_MAX];
157 prefix = strempty(prefix);
160 "%sCPUAccounting=%s\n"
161 "%sIOAccounting=%s\n"
162 "%sBlockIOAccounting=%s\n"
163 "%sMemoryAccounting=%s\n"
164 "%sTasksAccounting=%s\n"
165 "%sCPUWeight=%" PRIu64 "\n"
166 "%sStartupCPUWeight=%" PRIu64 "\n"
167 "%sCPUShares=%" PRIu64 "\n"
168 "%sStartupCPUShares=%" PRIu64 "\n"
169 "%sCPUQuotaPerSecSec=%s\n"
170 "%sIOWeight=%" PRIu64 "\n"
171 "%sStartupIOWeight=%" PRIu64 "\n"
172 "%sBlockIOWeight=%" PRIu64 "\n"
173 "%sStartupBlockIOWeight=%" PRIu64 "\n"
174 "%sMemoryLow=%" PRIu64 "\n"
175 "%sMemoryHigh=%" PRIu64 "\n"
176 "%sMemoryMax=%" PRIu64 "\n"
177 "%sMemoryLimit=%" PRIu64 "\n"
178 "%sTasksMax=%" PRIu64 "\n"
179 "%sDevicePolicy=%s\n"
181 prefix, yes_no(c->cpu_accounting),
182 prefix, yes_no(c->io_accounting),
183 prefix, yes_no(c->blockio_accounting),
184 prefix, yes_no(c->memory_accounting),
185 prefix, yes_no(c->tasks_accounting),
186 prefix, c->cpu_weight,
187 prefix, c->startup_cpu_weight,
188 prefix, c->cpu_shares,
189 prefix, c->startup_cpu_shares,
190 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
191 prefix, c->io_weight,
192 prefix, c->startup_io_weight,
193 prefix, c->blockio_weight,
194 prefix, c->startup_blockio_weight,
195 prefix, c->memory_low,
196 prefix, c->memory_high,
197 prefix, c->memory_max,
198 prefix, c->memory_limit,
199 prefix, c->tasks_max,
200 prefix, cgroup_device_policy_to_string(c->device_policy),
201 prefix, yes_no(c->delegate));
203 LIST_FOREACH(device_allow, a, c->device_allow)
205 "%sDeviceAllow=%s %s%s%s\n",
208 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
210 LIST_FOREACH(device_weights, iw, c->io_device_weights)
212 "%sIODeviceWeight=%s %" PRIu64,
217 LIST_FOREACH(device_limits, il, c->io_device_limits) {
218 char buf[FORMAT_BYTES_MAX];
219 CGroupIOLimitType type;
221 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
222 if (il->limits[type] != cgroup_io_limit_defaults[type])
226 cgroup_io_limit_type_to_string(type),
228 format_bytes(buf, sizeof(buf), il->limits[type]));
231 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
233 "%sBlockIODeviceWeight=%s %" PRIu64,
238 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
239 char buf[FORMAT_BYTES_MAX];
241 if (b->rbps != CGROUP_LIMIT_MAX)
243 "%sBlockIOReadBandwidth=%s %s\n",
246 format_bytes(buf, sizeof(buf), b->rbps));
247 if (b->wbps != CGROUP_LIMIT_MAX)
249 "%sBlockIOWriteBandwidth=%s %s\n",
252 format_bytes(buf, sizeof(buf), b->wbps));
256 static int lookup_block_device(const char *p, dev_t *dev) {
265 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
267 if (S_ISBLK(st.st_mode))
269 else if (major(st.st_dev) != 0) {
270 /* If this is not a device node then find the block
271 * device this file is stored on */
274 /* If this is a partition, try to get the originating
276 block_get_whole_disk(*dev, dev);
278 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
285 static int whitelist_device(const char *path, const char *node, const char *acc) {
286 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
293 if (stat(node, &st) < 0) {
294 log_warning("Couldn't stat device %s", node);
298 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
299 log_warning("%s is not a device.", node);
305 S_ISCHR(st.st_mode) ? 'c' : 'b',
306 major(st.st_rdev), minor(st.st_rdev),
309 r = cg_set_attribute("devices", path, "devices.allow", buf);
311 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
312 "Failed to set devices.allow on %s: %m", path);
317 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
318 _cleanup_fclose_ FILE *f = NULL;
325 assert(type == 'b' || type == 'c');
327 f = fopen("/proc/devices", "re");
329 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
331 FOREACH_LINE(line, f, goto fail) {
332 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
337 if (type == 'c' && streq(line, "Character devices:")) {
342 if (type == 'b' && streq(line, "Block devices:")) {
357 w = strpbrk(p, WHITESPACE);
362 r = safe_atou(p, &maj);
369 w += strspn(w, WHITESPACE);
371 if (fnmatch(name, w, 0) != 0)
380 r = cg_set_attribute("devices", path, "devices.allow", buf);
382 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
383 "Failed to set devices.allow on %s: %m", path);
389 log_warning_errno(errno, "Failed to read /proc/devices: %m");
393 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
394 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
395 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
398 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
399 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
400 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
403 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
404 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
405 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
406 return c->startup_cpu_weight;
407 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
408 return c->cpu_weight;
410 return CGROUP_WEIGHT_DEFAULT;
413 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
414 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
415 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
416 return c->startup_cpu_shares;
417 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
418 return c->cpu_shares;
420 return CGROUP_CPU_SHARES_DEFAULT;
423 static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
424 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
427 xsprintf(buf, "%" PRIu64 "\n", weight);
428 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
430 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
431 "Failed to set cpu.weight: %m");
433 if (quota != USEC_INFINITY)
434 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
435 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
437 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
439 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
442 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
443 "Failed to set cpu.max: %m");
446 static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
447 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
450 xsprintf(buf, "%" PRIu64 "\n", shares);
451 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
453 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
454 "Failed to set cpu.shares: %m");
456 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
457 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
459 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
460 "Failed to set cpu.cfs_period_us: %m");
462 if (quota != USEC_INFINITY) {
463 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
464 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
466 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
468 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
469 "Failed to set cpu.cfs_quota_us: %m");
472 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
473 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
474 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
477 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
478 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
479 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
482 static bool cgroup_context_has_io_config(CGroupContext *c) {
483 return c->io_accounting ||
484 c->io_weight != CGROUP_WEIGHT_INVALID ||
485 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
486 c->io_device_weights ||
490 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
491 return c->blockio_accounting ||
492 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
493 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
494 c->blockio_device_weights ||
495 c->blockio_device_bandwidths;
498 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
499 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
500 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
501 return c->startup_io_weight;
502 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
505 return CGROUP_WEIGHT_DEFAULT;
508 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
509 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
510 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
511 return c->startup_blockio_weight;
512 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
513 return c->blockio_weight;
515 return CGROUP_BLKIO_WEIGHT_DEFAULT;
518 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
519 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
520 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
523 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
524 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
525 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
528 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
529 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
533 r = lookup_block_device(dev_path, &dev);
537 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
538 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
540 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
541 "Failed to set io.weight: %m");
544 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
545 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
549 r = lookup_block_device(dev_path, &dev);
553 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
554 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
556 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
557 "Failed to set blkio.weight_device: %m");
560 static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
561 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
562 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
563 CGroupIOLimitType type;
568 r = lookup_block_device(dev_path, &dev);
572 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
573 if (limits[type] != cgroup_io_limit_defaults[type]) {
574 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
577 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
581 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
582 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
583 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
584 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
586 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
587 "Failed to set io.max: %m");
591 static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
592 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
597 r = lookup_block_device(dev_path, &dev);
601 if (rbps != CGROUP_LIMIT_MAX)
603 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
604 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
606 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
607 "Failed to set blkio.throttle.read_bps_device: %m");
609 if (wbps != CGROUP_LIMIT_MAX)
611 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
612 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
614 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
615 "Failed to set blkio.throttle.write_bps_device: %m");
620 static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
621 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX;
624 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
625 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
628 if (v != CGROUP_LIMIT_MAX)
629 xsprintf(buf, "%" PRIu64 "\n", v);
631 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
633 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
634 "Failed to set %s: %m", file);
637 static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
645 c = unit_get_cgroup_context(u);
646 path = u->cgroup_path;
654 /* Some cgroup attributes are not supported on the root cgroup,
655 * hence silently ignore */
656 is_root = isempty(path) || path_equal(path, "/");
658 /* Make sure we don't try to display messages with an empty path. */
661 /* We generally ignore errors caused by read-only mounted
662 * cgroup trees (assuming we are running in a container then),
663 * and missing cgroups, i.e. EROFS and ENOENT. */
665 if ((mask & CGROUP_MASK_CPU) && !is_root) {
666 bool has_weight = cgroup_context_has_cpu_weight(c);
667 bool has_shares = cgroup_context_has_cpu_shares(c);
669 if (cg_all_unified() > 0) {
673 weight = cgroup_context_cpu_weight(c, state);
674 else if (has_shares) {
675 uint64_t shares = cgroup_context_cpu_shares(c, state);
677 weight = cgroup_cpu_shares_to_weight(shares);
679 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
680 shares, weight, path);
682 weight = CGROUP_WEIGHT_DEFAULT;
684 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
689 shares = cgroup_context_cpu_shares(c, state);
690 else if (has_weight) {
691 uint64_t weight = cgroup_context_cpu_weight(c, state);
693 shares = cgroup_cpu_weight_to_shares(weight);
695 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
696 weight, shares, path);
698 shares = CGROUP_CPU_SHARES_DEFAULT;
700 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
704 if (mask & CGROUP_MASK_IO) {
705 bool has_io = cgroup_context_has_io_config(c);
706 bool has_blockio = cgroup_context_has_blockio_config(c);
709 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
713 weight = cgroup_context_io_weight(c, state);
714 else if (has_blockio) {
715 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
717 weight = cgroup_weight_blkio_to_io(blkio_weight);
719 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
720 blkio_weight, weight);
722 weight = CGROUP_WEIGHT_DEFAULT;
724 xsprintf(buf, "default %" PRIu64 "\n", weight);
725 r = cg_set_attribute("io", path, "io.weight", buf);
727 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
728 "Failed to set io.weight: %m");
731 CGroupIODeviceWeight *w;
733 /* FIXME: no way to reset this list */
734 LIST_FOREACH(device_weights, w, c->io_device_weights)
735 cgroup_apply_io_device_weight(u, w->path, w->weight);
736 } else if (has_blockio) {
737 CGroupBlockIODeviceWeight *w;
739 /* FIXME: no way to reset this list */
740 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
741 weight = cgroup_weight_blkio_to_io(w->weight);
743 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
744 w->weight, weight, w->path);
746 cgroup_apply_io_device_weight(u, w->path, weight);
751 /* Apply limits and free ones without config. */
753 CGroupIODeviceLimit *l, *next;
755 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
756 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
757 cgroup_context_free_io_device_limit(c, l);
759 } else if (has_blockio) {
760 CGroupBlockIODeviceBandwidth *b, *next;
762 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
763 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
764 CGroupIOLimitType type;
766 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
767 limits[type] = cgroup_io_limit_defaults[type];
769 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
770 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
772 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
773 b->rbps, b->wbps, b->path);
775 if (!cgroup_apply_io_device_limit(u, b->path, limits))
776 cgroup_context_free_blockio_device_bandwidth(c, b);
781 if (mask & CGROUP_MASK_BLKIO) {
782 bool has_io = cgroup_context_has_io_config(c);
783 bool has_blockio = cgroup_context_has_blockio_config(c);
786 char buf[DECIMAL_STR_MAX(uint64_t)+1];
790 weight = cgroup_context_blkio_weight(c, state);
792 uint64_t io_weight = cgroup_context_io_weight(c, state);
794 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
796 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
799 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
801 xsprintf(buf, "%" PRIu64 "\n", weight);
802 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
804 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
805 "Failed to set blkio.weight: %m");
808 CGroupBlockIODeviceWeight *w;
810 /* FIXME: no way to reset this list */
811 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
812 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
814 CGroupIODeviceWeight *w;
816 /* FIXME: no way to reset this list */
817 LIST_FOREACH(device_weights, w, c->io_device_weights) {
818 weight = cgroup_weight_io_to_blkio(w->weight);
820 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
821 w->weight, weight, w->path);
823 cgroup_apply_blkio_device_weight(u, w->path, weight);
828 /* Apply limits and free ones without config. */
830 CGroupBlockIODeviceBandwidth *b, *next;
832 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
833 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
834 cgroup_context_free_blockio_device_bandwidth(c, b);
837 CGroupIODeviceLimit *l, *next;
839 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
840 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
841 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
843 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
844 cgroup_context_free_io_device_limit(c, l);
849 if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
850 if (cg_all_unified() > 0) {
851 uint64_t max = c->memory_max;
853 if (cgroup_context_has_unified_memory_config(c))
856 max = c->memory_limit;
858 if (max != CGROUP_LIMIT_MAX)
859 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
862 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
863 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
864 cgroup_apply_unified_memory_limit(u, "memory.max", max);
866 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
867 uint64_t val = c->memory_limit;
869 if (val == CGROUP_LIMIT_MAX) {
872 if (val != CGROUP_LIMIT_MAX)
873 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", c->memory_max);
876 if (val == CGROUP_LIMIT_MAX)
877 strncpy(buf, "-1\n", sizeof(buf));
879 xsprintf(buf, "%" PRIu64 "\n", val);
881 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
883 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
884 "Failed to set memory.limit_in_bytes: %m");
888 if ((mask & CGROUP_MASK_DEVICES) && !is_root) {
889 CGroupDeviceAllow *a;
891 /* Changing the devices list of a populated cgroup
892 * might result in EINVAL, hence ignore EINVAL
895 if (c->device_allow || c->device_policy != CGROUP_AUTO)
896 r = cg_set_attribute("devices", path, "devices.deny", "a");
898 r = cg_set_attribute("devices", path, "devices.allow", "a");
900 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
901 "Failed to reset devices.list: %m");
903 if (c->device_policy == CGROUP_CLOSED ||
904 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
905 static const char auto_devices[] =
906 "/dev/null\0" "rwm\0"
907 "/dev/zero\0" "rwm\0"
908 "/dev/full\0" "rwm\0"
909 "/dev/random\0" "rwm\0"
910 "/dev/urandom\0" "rwm\0"
912 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
913 /* Allow /run/elogind/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
914 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
915 "/run/systemd/inaccessible/chr\0" "rwm\0"
916 "/run/systemd/inaccessible/blk\0" "rwm\0";
920 NULSTR_FOREACH_PAIR(x, y, auto_devices)
921 whitelist_device(path, x, y);
923 whitelist_major(path, "pts", 'c', "rw");
924 whitelist_major(path, "kdbus", 'c', "rw");
925 whitelist_major(path, "kdbus/*", 'c', "rw");
928 LIST_FOREACH(device_allow, a, c->device_allow) {
944 if (startswith(a->path, "/dev/"))
945 whitelist_device(path, a->path, acc);
946 else if (startswith(a->path, "block-"))
947 whitelist_major(path, a->path + 6, 'b', acc);
948 else if (startswith(a->path, "char-"))
949 whitelist_major(path, a->path + 5, 'c', acc);
951 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
955 if ((mask & CGROUP_MASK_PIDS) && !is_root) {
957 if (c->tasks_max != (uint64_t) -1) {
958 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
960 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
961 r = cg_set_attribute("pids", path, "pids.max", buf);
963 r = cg_set_attribute("pids", path, "pids.max", "max");
966 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
967 "Failed to set pids.max: %m");
971 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
974 /* Figure out which controllers we need */
976 if (c->cpu_accounting ||
977 cgroup_context_has_cpu_weight(c) ||
978 cgroup_context_has_cpu_shares(c) ||
979 c->cpu_quota_per_sec_usec != USEC_INFINITY)
980 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
982 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
983 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
985 if (c->memory_accounting ||
986 c->memory_limit != CGROUP_LIMIT_MAX ||
987 cgroup_context_has_unified_memory_config(c))
988 mask |= CGROUP_MASK_MEMORY;
990 if (c->device_allow ||
991 c->device_policy != CGROUP_AUTO)
992 mask |= CGROUP_MASK_DEVICES;
994 if (c->tasks_accounting ||
995 c->tasks_max != (uint64_t) -1)
996 mask |= CGROUP_MASK_PIDS;
1001 CGroupMask unit_get_own_mask(Unit *u) {
1004 /* Returns the mask of controllers the unit needs for itself */
1006 c = unit_get_cgroup_context(u);
1010 /* If delegation is turned on, then turn on all cgroups,
1011 * unless we are on the legacy hierarchy and the process we
1012 * fork into it is known to drop privileges, and hence
1013 * shouldn't get access to the controllers.
1015 * Note that on the unified hierarchy it is safe to delegate
1016 * controllers to unprivileged services. */
1021 e = unit_get_exec_context(u);
1023 exec_context_maintains_privileges(e) ||
1024 cg_all_unified() > 0)
1025 return _CGROUP_MASK_ALL;
1028 return cgroup_context_get_mask(c);
1031 CGroupMask unit_get_members_mask(Unit *u) {
1034 /* Returns the mask of controllers all of the unit's children
1035 * require, merged */
1037 if (u->cgroup_members_mask_valid)
1038 return u->cgroup_members_mask;
1040 u->cgroup_members_mask = 0;
1042 if (u->type == UNIT_SLICE) {
1046 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
1051 if (UNIT_DEREF(member->slice) != u)
1054 u->cgroup_members_mask |=
1055 unit_get_own_mask(member) |
1056 unit_get_members_mask(member);
1060 u->cgroup_members_mask_valid = true;
1061 return u->cgroup_members_mask;
1064 CGroupMask unit_get_siblings_mask(Unit *u) {
1067 /* Returns the mask of controllers all of the unit's siblings
1068 * require, i.e. the members mask of the unit's parent slice
1069 * if there is one. */
1071 if (UNIT_ISSET(u->slice))
1072 return unit_get_members_mask(UNIT_DEREF(u->slice));
1074 return unit_get_own_mask(u) | unit_get_members_mask(u);
1077 CGroupMask unit_get_subtree_mask(Unit *u) {
1079 /* Returns the mask of this subtree, meaning of the group
1080 * itself and its children. */
1082 return unit_get_own_mask(u) | unit_get_members_mask(u);
1085 CGroupMask unit_get_target_mask(Unit *u) {
1088 /* This returns the cgroup mask of all controllers to enable
1089 * for a specific cgroup, i.e. everything it needs itself,
1090 * plus all that its children need, plus all that its siblings
1091 * need. This is primarily useful on the legacy cgroup
1092 * hierarchy, where we need to duplicate each cgroup in each
1093 * hierarchy that shall be enabled for it. */
1095 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1096 mask &= u->manager->cgroup_supported;
1101 CGroupMask unit_get_enable_mask(Unit *u) {
1104 /* This returns the cgroup mask of all controllers to enable
1105 * for the children of a specific cgroup. This is primarily
1106 * useful for the unified cgroup hierarchy, where each cgroup
1107 * controls which controllers are enabled for its children. */
1109 mask = unit_get_members_mask(u);
1110 mask &= u->manager->cgroup_supported;
1115 /* Recurse from a unit up through its containing slices, propagating
1116 * mask bits upward. A unit is also member of itself. */
1117 void unit_update_cgroup_members_masks(Unit *u) {
1123 /* Calculate subtree mask */
1124 m = unit_get_subtree_mask(u);
1126 /* See if anything changed from the previous invocation. If
1127 * not, we're done. */
1128 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1132 u->cgroup_subtree_mask_valid &&
1133 ((m & ~u->cgroup_subtree_mask) != 0) &&
1134 ((~m & u->cgroup_subtree_mask) == 0);
1136 u->cgroup_subtree_mask = m;
1137 u->cgroup_subtree_mask_valid = true;
1139 if (UNIT_ISSET(u->slice)) {
1140 Unit *s = UNIT_DEREF(u->slice);
1143 /* There's more set now than before. We
1144 * propagate the new mask to the parent's mask
1145 * (not caring if it actually was valid or
1148 s->cgroup_members_mask |= m;
1151 /* There's less set now than before (or we
1152 * don't know), we need to recalculate
1153 * everything, so let's invalidate the
1154 * parent's members mask */
1156 s->cgroup_members_mask_valid = false;
1158 /* And now make sure that this change also hits our
1160 unit_update_cgroup_members_masks(s);
1164 static const char *migrate_callback(CGroupMask mask, void *userdata) {
1171 if (u->cgroup_path &&
1172 u->cgroup_realized &&
1173 (u->cgroup_realized_mask & mask) == mask)
1174 return u->cgroup_path;
1176 u = UNIT_DEREF(u->slice);
1182 char *unit_default_cgroup_path(Unit *u) {
1183 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1188 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1189 return strdup(u->manager->cgroup_root);
1191 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1192 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1197 escaped = cg_escape(u->id);
1202 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
1204 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
1207 int unit_set_cgroup_path(Unit *u, const char *path) {
1208 _cleanup_free_ char *p = NULL;
1220 if (streq_ptr(u->cgroup_path, p))
1224 r = hashmap_put(u->manager->cgroup_unit, p, u);
1229 unit_release_cgroup(u);
1237 int unit_watch_cgroup(Unit *u) {
1238 _cleanup_free_ char *events = NULL;
1243 if (!u->cgroup_path)
1246 if (u->cgroup_inotify_wd >= 0)
1249 /* Only applies to the unified hierarchy */
1250 r = cg_all_unified();
1252 return log_unit_error_errno(u, r, "Failed detect whether the unified hierarchy is used: %m");
1256 /* Don't watch the root slice, it's pointless. */
1257 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1260 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1264 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
1268 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1269 if (u->cgroup_inotify_wd < 0) {
1271 if (errno == ENOENT) /* If the directory is already
1272 * gone we don't need to track
1273 * it, so this is not an error */
1276 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1279 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1281 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1286 static int unit_create_cgroup(
1288 CGroupMask target_mask,
1289 CGroupMask enable_mask) {
1296 c = unit_get_cgroup_context(u);
1300 if (!u->cgroup_path) {
1301 _cleanup_free_ char *path = NULL;
1303 path = unit_default_cgroup_path(u);
1307 r = unit_set_cgroup_path(u, path);
1309 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1311 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1314 /* First, create our own group */
1315 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
1317 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1319 /* Start watching it */
1320 (void) unit_watch_cgroup(u);
1322 /* Enable all controllers we need */
1323 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1325 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
1327 /* Keep track that this is now realized */
1328 u->cgroup_realized = true;
1329 u->cgroup_realized_mask = target_mask;
1330 u->cgroup_enabled_mask = enable_mask;
1332 if (u->type != UNIT_SLICE && !c->delegate) {
1334 /* Then, possibly move things over, but not if
1335 * subgroups may contain processes, which is the case
1336 * for slice and delegation units. */
1337 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1339 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
1345 int unit_attach_pids_to_cgroup(Unit *u) {
1349 r = unit_realize_cgroup(u);
1353 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
1360 static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask enable_mask) {
1363 return u->cgroup_realized && u->cgroup_realized_mask == target_mask && u->cgroup_enabled_mask == enable_mask;
1366 /* Check if necessary controllers and attributes for a unit are in place.
1368 * If so, do nothing.
1369 * If not, create paths, move processes over, and set attributes.
1371 * Returns 0 on success and < 0 on failure. */
1372 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
1373 CGroupMask target_mask, enable_mask;
1378 if (u->in_cgroup_queue) {
1379 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
1380 u->in_cgroup_queue = false;
1383 target_mask = unit_get_target_mask(u);
1384 enable_mask = unit_get_enable_mask(u);
1386 if (unit_has_mask_realized(u, target_mask, enable_mask))
1389 /* First, realize parents */
1390 if (UNIT_ISSET(u->slice)) {
1391 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
1396 /* And then do the real work */
1397 r = unit_create_cgroup(u, target_mask, enable_mask);
1401 /* Finally, apply the necessary attributes. */
1402 cgroup_context_apply(u, target_mask, state);
1407 static void unit_add_to_cgroup_queue(Unit *u) {
1409 if (u->in_cgroup_queue)
1412 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
1413 u->in_cgroup_queue = true;
1416 unsigned manager_dispatch_cgroup_queue(Manager *m) {
1422 state = manager_state(m);
1424 while ((i = m->cgroup_queue)) {
1425 assert(i->in_cgroup_queue);
1427 r = unit_realize_cgroup_now(i, state);
1429 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
1437 static void unit_queue_siblings(Unit *u) {
1440 /* This adds the siblings of the specified unit and the
1441 * siblings of all parent units to the cgroup queue. (But
1442 * neither the specified unit itself nor the parents.) */
1444 while ((slice = UNIT_DEREF(u->slice))) {
1448 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
1452 /* Skip units that have a dependency on the slice
1453 * but aren't actually in it. */
1454 if (UNIT_DEREF(m->slice) != slice)
1457 /* No point in doing cgroup application for units
1458 * without active processes. */
1459 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1462 /* If the unit doesn't need any new controllers
1463 * and has current ones realized, it doesn't need
1465 if (unit_has_mask_realized(m, unit_get_target_mask(m), unit_get_enable_mask(m)))
1468 unit_add_to_cgroup_queue(m);
1475 int unit_realize_cgroup(Unit *u) {
1478 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1481 /* So, here's the deal: when realizing the cgroups for this
1482 * unit, we need to first create all parents, but there's more
1483 * actually: for the weight-based controllers we also need to
1484 * make sure that all our siblings (i.e. units that are in the
1485 * same slice as we are) have cgroups, too. Otherwise, things
1486 * would become very uneven as each of their processes would
1487 * get as much resources as all our group together. This call
1488 * will synchronously create the parent cgroups, but will
1489 * defer work on the siblings to the next event loop
1492 /* Add all sibling slices to the cgroup queue. */
1493 unit_queue_siblings(u);
1495 /* And realize this one now (and apply the values) */
1496 return unit_realize_cgroup_now(u, manager_state(u->manager));
1499 void unit_release_cgroup(Unit *u) {
1502 /* Forgets all cgroup details for this cgroup */
1504 if (u->cgroup_path) {
1505 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1506 u->cgroup_path = mfree(u->cgroup_path);
1509 if (u->cgroup_inotify_wd >= 0) {
1510 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1511 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1513 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1514 u->cgroup_inotify_wd = -1;
1518 void unit_prune_cgroup(Unit *u) {
1524 /* Removes the cgroup, if empty and possible, and stops watching it. */
1526 if (!u->cgroup_path)
1529 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1531 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1533 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1540 unit_release_cgroup(u);
1542 u->cgroup_realized = false;
1543 u->cgroup_realized_mask = 0;
1544 u->cgroup_enabled_mask = 0;
1547 int unit_search_main_pid(Unit *u, pid_t *ret) {
1548 _cleanup_fclose_ FILE *f = NULL;
1549 pid_t pid = 0, npid, mypid;
1555 if (!u->cgroup_path)
1558 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1563 while (cg_read_pid(f, &npid) > 0) {
1569 /* Ignore processes that aren't our kids */
1570 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1574 /* Dang, there's more than one daemonized PID
1575 in this group, so we don't know what process
1576 is the main process. */
1587 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1588 _cleanup_closedir_ DIR *d = NULL;
1589 _cleanup_fclose_ FILE *f = NULL;
1595 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1601 while ((r = cg_read_pid(f, &pid)) > 0) {
1602 r = unit_watch_pid(u, pid);
1603 if (r < 0 && ret >= 0)
1607 if (r < 0 && ret >= 0)
1611 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1618 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1619 _cleanup_free_ char *p = NULL;
1621 p = strjoin(path, "/", fn, NULL);
1627 r = unit_watch_pids_in_path(u, p);
1628 if (r < 0 && ret >= 0)
1632 if (r < 0 && ret >= 0)
1639 int unit_watch_all_pids(Unit *u) {
1642 /* Adds all PIDs from our cgroup to the set of PIDs we
1643 * watch. This is a fallback logic for cases where we do not
1644 * get reliable cgroup empty notifications: we try to use
1645 * SIGCHLD as replacement. */
1647 if (!u->cgroup_path)
1650 if (cg_all_unified() > 0) /* On unified we can use proper notifications */
1653 return unit_watch_pids_in_path(u, u->cgroup_path);
1656 int unit_notify_cgroup_empty(Unit *u) {
1661 if (!u->cgroup_path)
1664 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1668 unit_add_to_gc_queue(u);
1670 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1671 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1676 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1677 Manager *m = userdata;
1684 union inotify_event_buffer buffer;
1685 struct inotify_event *e;
1688 l = read(fd, &buffer, sizeof(buffer));
1690 if (errno == EINTR || errno == EAGAIN)
1693 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1696 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1700 /* Queue overflow has no watch descriptor */
1703 if (e->mask & IN_IGNORED)
1704 /* The watch was just removed */
1707 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1708 if (!u) /* Not that inotify might deliver
1709 * events for a watch even after it
1710 * was removed, because it was queued
1711 * before the removal. Let's ignore
1712 * this here safely. */
1715 (void) unit_notify_cgroup_empty(u);
1721 int manager_setup_cgroup(Manager *m) {
1722 _cleanup_free_ char *path = NULL;
1729 /* 1. Determine hierarchy */
1730 m->cgroup_root = mfree(m->cgroup_root);
1731 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
1733 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
1735 #if 0 /// elogind does not support systemd scopes and slices
1736 /* Chop off the init scope, if we are already located in it */
1737 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1739 /* LEGACY: Also chop off the system slice if we are in
1740 * it. This is to support live upgrades from older systemd
1741 * versions where PID 1 was moved there. Also see
1742 * cg_get_root_path(). */
1743 if (!e && MANAGER_IS_SYSTEM(m)) {
1744 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
1746 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
1752 /* And make sure to store away the root value without trailing
1753 * slash, even for the root dir, so that we can easily prepend
1755 while ((e = endswith(m->cgroup_root, "/")))
1757 log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
1758 SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root);
1761 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
1763 return log_error_errno(r, "Cannot find cgroup mount point: %m");
1765 unified = cg_all_unified();
1767 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
1769 log_debug("Unified cgroup hierarchy is located at %s.", path);
1771 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
1774 const char *scope_path;
1776 /* 3. Install agent */
1779 /* In the unified hierarchy we can get
1780 * cgroup empty notifications via inotify. */
1782 #if 0 /// elogind does not support the unified hierarchy, yet.
1783 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1784 safe_close(m->cgroup_inotify_fd);
1786 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
1787 if (m->cgroup_inotify_fd < 0)
1788 return log_error_errno(errno, "Failed to create control group inotify object: %m");
1790 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
1792 return log_error_errno(r, "Failed to watch control group inotify object: %m");
1794 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
1795 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
1796 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-5);
1798 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
1800 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
1803 return log_error_errno(EOPNOTSUPP, "Unified cgroup hierarchy not supported: %m");
1805 } else if (MANAGER_IS_SYSTEM(m)) {
1807 /* On the legacy hierarchy we only get
1808 * notifications via cgroup agents. (Which
1809 * isn't really reliable, since it does not
1810 * generate events when control groups with
1811 * children run empty. */
1813 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
1815 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
1817 log_debug("Installed release agent.");
1819 log_debug("Release agent already installed.");
1822 #if 0 /// elogind is not meant to run in systemd init scope
1823 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1824 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1825 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1827 if (streq(SYSTEMD_CGROUP_CONTROLLER, "name=elogind"))
1828 // we are our own cgroup controller
1829 scope_path = strjoina("");
1830 else if (streq(m->cgroup_root, "/elogind"))
1831 // root already is our cgroup
1832 scope_path = strjoina(m->cgroup_root);
1834 // we have to create our own group
1835 scope_path = strjoina(m->cgroup_root, "/elogind");
1836 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1839 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
1840 log_debug_elogind("Created control group \"%s\"", scope_path);
1842 /* also, move all other userspace processes remaining
1843 * in the root cgroup into that scope. */
1844 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1846 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
1848 /* 5. And pin it, so that it cannot be unmounted */
1849 safe_close(m->pin_cgroupfs_fd);
1850 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
1851 if (m->pin_cgroupfs_fd < 0)
1852 return log_error_errno(errno, "Failed to open pin file: %m");
1854 /* 6. Always enable hierarchical support if it exists... */
1856 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1859 /* 7. Figure out which controllers are supported */
1860 r = cg_mask_supported(&m->cgroup_supported);
1862 return log_error_errno(r, "Failed to determine supported controllers: %m");
1864 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
1865 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
1870 void manager_shutdown_cgroup(Manager *m, bool delete) {
1873 /* We can't really delete the group, since we are in it. But
1875 if (delete && m->cgroup_root)
1876 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
1878 #if 0 /// elogind does not support the unified hierarchy, yet.
1879 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
1881 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1882 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
1885 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
1887 m->cgroup_root = mfree(m->cgroup_root);
1890 #if 0 /// UNNEEDED by elogind
1891 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
1898 u = hashmap_get(m->cgroup_unit, cgroup);
1902 p = strdupa(cgroup);
1906 e = strrchr(p, '/');
1908 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
1912 u = hashmap_get(m->cgroup_unit, p);
1918 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
1919 _cleanup_free_ char *cgroup = NULL;
1927 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
1931 return manager_get_unit_by_cgroup(m, cgroup);
1934 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
1943 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
1945 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
1949 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
1953 return manager_get_unit_by_pid_cgroup(m, pid);
1957 #if 0 /// elogind must substitute this with its own variant
1958 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1964 log_debug("Got cgroup empty notification for: %s", cgroup);
1966 u = manager_get_unit_by_cgroup(m, cgroup);
1970 return unit_notify_cgroup_empty(u);
1973 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1979 log_debug("Got cgroup empty notification for: %s", cgroup);
1981 s = hashmap_get(m->sessions, cgroup);
1984 session_finalize(s);
1987 log_warning("Session not found: %s", cgroup);
1993 #if 0 /// UNNEEDED by elogind
1994 int unit_get_memory_current(Unit *u, uint64_t *ret) {
1995 _cleanup_free_ char *v = NULL;
2001 if (!u->cgroup_path)
2004 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
2007 if (cg_all_unified() <= 0)
2008 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
2010 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
2016 return safe_atou64(v, ret);
2019 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2020 _cleanup_free_ char *v = NULL;
2026 if (!u->cgroup_path)
2029 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2032 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2038 return safe_atou64(v, ret);
2041 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2042 _cleanup_free_ char *v = NULL;
2049 if (!u->cgroup_path)
2052 if (cg_all_unified() > 0) {
2053 const char *keys[] = { "usage_usec", NULL };
2054 _cleanup_free_ char *val = NULL;
2057 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2060 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val);
2064 r = safe_atou64(val, &us);
2068 ns = us * NSEC_PER_USEC;
2070 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2073 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2079 r = safe_atou64(v, &ns);
2088 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2092 r = unit_get_cpu_usage_raw(u, &ns);
2096 if (ns > u->cpu_usage_base)
2097 ns -= u->cpu_usage_base;
2105 int unit_reset_cpu_usage(Unit *u) {
2111 r = unit_get_cpu_usage_raw(u, &ns);
2113 u->cpu_usage_base = 0;
2117 u->cpu_usage_base = ns;
2121 bool unit_cgroup_delegate(Unit *u) {
2126 c = unit_get_cgroup_context(u);
2133 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2136 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2142 /* always invalidate compat pairs together */
2143 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2144 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2146 if ((u->cgroup_realized_mask & m) == 0)
2149 u->cgroup_realized_mask &= ~m;
2150 unit_add_to_cgroup_queue(u);
2153 void manager_invalidate_startup_units(Manager *m) {
2159 SET_FOREACH(u, m->startup_units, i)
2160 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
2163 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2164 [CGROUP_AUTO] = "auto",
2165 [CGROUP_CLOSED] = "closed",
2166 [CGROUP_STRICT] = "strict",
2169 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);