1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2013 Lennart Poettering
11 #include "alloc-util.h"
12 //#include "blockdev-util.h"
13 //#include "bpf-firewall.h"
14 //#include "btrfs-util.h"
15 //#include "bus-error.h"
16 #include "cgroup-util.h"
21 #include "parse-util.h"
22 #include "path-util.h"
23 #include "process-util.h"
24 //#include "procfs-util.h"
25 //#include "special.h"
26 #include "stdio-util.h"
27 #include "string-table.h"
28 #include "string-util.h"
31 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
33 bool manager_owns_root_cgroup(Manager *m) {
36 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
37 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
38 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
39 * we run in any kind of container virtualization. */
41 if (detect_container() > 0)
44 return empty_or_root(m->cgroup_root);
47 #if 0 /// UNNEEDED by elogind
48 bool unit_has_root_cgroup(Unit *u) {
51 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
52 * the manager manages the root cgroup. */
54 if (!manager_owns_root_cgroup(u->manager))
57 return unit_has_name(u, SPECIAL_ROOT_SLICE);
60 static void cgroup_compat_warn(void) {
61 static bool cgroup_compat_warned = false;
63 if (cgroup_compat_warned)
66 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
67 "See cgroup-compat debug messages for details.");
69 cgroup_compat_warned = true;
72 #define log_cgroup_compat(unit, fmt, ...) do { \
73 cgroup_compat_warn(); \
74 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
77 void cgroup_context_init(CGroupContext *c) {
80 /* Initialize everything to the kernel defaults, assuming the
81 * structure is preinitialized to 0 */
83 c->cpu_weight = CGROUP_WEIGHT_INVALID;
84 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
85 c->cpu_quota_per_sec_usec = USEC_INFINITY;
87 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
88 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
90 c->memory_high = CGROUP_LIMIT_MAX;
91 c->memory_max = CGROUP_LIMIT_MAX;
92 c->memory_swap_max = CGROUP_LIMIT_MAX;
94 c->memory_limit = CGROUP_LIMIT_MAX;
96 c->io_weight = CGROUP_WEIGHT_INVALID;
97 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
99 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
100 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
102 c->tasks_max = (uint64_t) -1;
105 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
109 LIST_REMOVE(device_allow, c->device_allow, a);
114 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
118 LIST_REMOVE(device_weights, c->io_device_weights, w);
123 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
127 LIST_REMOVE(device_limits, c->io_device_limits, l);
132 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
136 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
141 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
145 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
150 void cgroup_context_done(CGroupContext *c) {
153 while (c->io_device_weights)
154 cgroup_context_free_io_device_weight(c, c->io_device_weights);
156 while (c->io_device_limits)
157 cgroup_context_free_io_device_limit(c, c->io_device_limits);
159 while (c->blockio_device_weights)
160 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
162 while (c->blockio_device_bandwidths)
163 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
165 while (c->device_allow)
166 cgroup_context_free_device_allow(c, c->device_allow);
168 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
169 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
172 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
173 CGroupIODeviceLimit *il;
174 CGroupIODeviceWeight *iw;
175 CGroupBlockIODeviceBandwidth *b;
176 CGroupBlockIODeviceWeight *w;
177 CGroupDeviceAllow *a;
178 IPAddressAccessItem *iaai;
179 char u[FORMAT_TIMESPAN_MAX];
184 prefix = strempty(prefix);
187 "%sCPUAccounting=%s\n"
188 "%sIOAccounting=%s\n"
189 "%sBlockIOAccounting=%s\n"
190 "%sMemoryAccounting=%s\n"
191 "%sTasksAccounting=%s\n"
192 "%sIPAccounting=%s\n"
193 "%sCPUWeight=%" PRIu64 "\n"
194 "%sStartupCPUWeight=%" PRIu64 "\n"
195 "%sCPUShares=%" PRIu64 "\n"
196 "%sStartupCPUShares=%" PRIu64 "\n"
197 "%sCPUQuotaPerSecSec=%s\n"
198 "%sIOWeight=%" PRIu64 "\n"
199 "%sStartupIOWeight=%" PRIu64 "\n"
200 "%sBlockIOWeight=%" PRIu64 "\n"
201 "%sStartupBlockIOWeight=%" PRIu64 "\n"
202 "%sMemoryLow=%" PRIu64 "\n"
203 "%sMemoryHigh=%" PRIu64 "\n"
204 "%sMemoryMax=%" PRIu64 "\n"
205 "%sMemorySwapMax=%" PRIu64 "\n"
206 "%sMemoryLimit=%" PRIu64 "\n"
207 "%sTasksMax=%" PRIu64 "\n"
208 "%sDevicePolicy=%s\n"
210 prefix, yes_no(c->cpu_accounting),
211 prefix, yes_no(c->io_accounting),
212 prefix, yes_no(c->blockio_accounting),
213 prefix, yes_no(c->memory_accounting),
214 prefix, yes_no(c->tasks_accounting),
215 prefix, yes_no(c->ip_accounting),
216 prefix, c->cpu_weight,
217 prefix, c->startup_cpu_weight,
218 prefix, c->cpu_shares,
219 prefix, c->startup_cpu_shares,
220 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
221 prefix, c->io_weight,
222 prefix, c->startup_io_weight,
223 prefix, c->blockio_weight,
224 prefix, c->startup_blockio_weight,
225 prefix, c->memory_low,
226 prefix, c->memory_high,
227 prefix, c->memory_max,
228 prefix, c->memory_swap_max,
229 prefix, c->memory_limit,
230 prefix, c->tasks_max,
231 prefix, cgroup_device_policy_to_string(c->device_policy),
232 prefix, yes_no(c->delegate));
235 _cleanup_free_ char *t = NULL;
237 (void) cg_mask_to_string(c->delegate_controllers, &t);
239 fprintf(f, "%sDelegateControllers=%s\n",
244 LIST_FOREACH(device_allow, a, c->device_allow)
246 "%sDeviceAllow=%s %s%s%s\n",
249 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
251 LIST_FOREACH(device_weights, iw, c->io_device_weights)
253 "%sIODeviceWeight=%s %" PRIu64,
258 LIST_FOREACH(device_limits, il, c->io_device_limits) {
259 char buf[FORMAT_BYTES_MAX];
260 CGroupIOLimitType type;
262 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
263 if (il->limits[type] != cgroup_io_limit_defaults[type])
267 cgroup_io_limit_type_to_string(type),
269 format_bytes(buf, sizeof(buf), il->limits[type]));
272 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
274 "%sBlockIODeviceWeight=%s %" PRIu64,
279 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
280 char buf[FORMAT_BYTES_MAX];
282 if (b->rbps != CGROUP_LIMIT_MAX)
284 "%sBlockIOReadBandwidth=%s %s\n",
287 format_bytes(buf, sizeof(buf), b->rbps));
288 if (b->wbps != CGROUP_LIMIT_MAX)
290 "%sBlockIOWriteBandwidth=%s %s\n",
293 format_bytes(buf, sizeof(buf), b->wbps));
296 LIST_FOREACH(items, iaai, c->ip_address_allow) {
297 _cleanup_free_ char *k = NULL;
299 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
300 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
303 LIST_FOREACH(items, iaai, c->ip_address_deny) {
304 _cleanup_free_ char *k = NULL;
306 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
307 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
311 static int lookup_block_device(const char *p, dev_t *ret) {
318 if (stat(p, &st) < 0)
319 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
321 if (S_ISBLK(st.st_mode))
323 else if (major(st.st_dev) != 0)
324 *ret = st.st_dev; /* If this is not a device node then use the block device this file is stored on */
326 /* If this is btrfs, getting the backing block device is a bit harder */
327 r = btrfs_get_block_device(p, ret);
328 if (r < 0 && r != -ENOTTY)
329 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
331 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
336 /* If this is a LUKS device, try to get the originating block device */
337 (void) block_get_originating(*ret, ret);
339 /* If this is a partition, try to get the originating block device */
340 (void) block_get_whole_disk(*ret, ret);
344 static int whitelist_device(const char *path, const char *node, const char *acc) {
345 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
347 bool ignore_notfound;
353 if (node[0] == '-') {
354 /* Non-existent paths starting with "-" must be silently ignored */
356 ignore_notfound = true;
358 ignore_notfound = false;
360 if (stat(node, &st) < 0) {
361 if (errno == ENOENT && ignore_notfound)
364 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
367 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
368 log_warning("%s is not a device.", node);
374 S_ISCHR(st.st_mode) ? 'c' : 'b',
375 major(st.st_rdev), minor(st.st_rdev),
378 r = cg_set_attribute("devices", path, "devices.allow", buf);
380 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
381 "Failed to set devices.allow on %s: %m", path);
386 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
387 _cleanup_fclose_ FILE *f = NULL;
394 assert(IN_SET(type, 'b', 'c'));
396 f = fopen("/proc/devices", "re");
398 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
400 FOREACH_LINE(line, f, goto fail) {
401 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
406 if (type == 'c' && streq(line, "Character devices:")) {
411 if (type == 'b' && streq(line, "Block devices:")) {
426 w = strpbrk(p, WHITESPACE);
431 r = safe_atou(p, &maj);
438 w += strspn(w, WHITESPACE);
440 if (fnmatch(name, w, 0) != 0)
449 r = cg_set_attribute("devices", path, "devices.allow", buf);
451 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
452 "Failed to set devices.allow on %s: %m", path);
458 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
461 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
462 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
463 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
466 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
467 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
468 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
471 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
472 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
473 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
474 return c->startup_cpu_weight;
475 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
476 return c->cpu_weight;
478 return CGROUP_WEIGHT_DEFAULT;
481 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
482 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
483 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
484 return c->startup_cpu_shares;
485 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
486 return c->cpu_shares;
488 return CGROUP_CPU_SHARES_DEFAULT;
491 static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
492 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
495 xsprintf(buf, "%" PRIu64 "\n", weight);
496 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
498 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
499 "Failed to set cpu.weight: %m");
501 if (quota != USEC_INFINITY)
502 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
503 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
505 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
507 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
510 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
511 "Failed to set cpu.max: %m");
514 static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
515 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
518 xsprintf(buf, "%" PRIu64 "\n", shares);
519 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
521 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
522 "Failed to set cpu.shares: %m");
524 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
525 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
527 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
528 "Failed to set cpu.cfs_period_us: %m");
530 if (quota != USEC_INFINITY) {
531 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
532 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
534 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
536 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
537 "Failed to set cpu.cfs_quota_us: %m");
540 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
541 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
542 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
545 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
546 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
547 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
550 static bool cgroup_context_has_io_config(CGroupContext *c) {
551 return c->io_accounting ||
552 c->io_weight != CGROUP_WEIGHT_INVALID ||
553 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
554 c->io_device_weights ||
558 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
559 return c->blockio_accounting ||
560 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
561 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
562 c->blockio_device_weights ||
563 c->blockio_device_bandwidths;
566 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
567 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
568 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
569 return c->startup_io_weight;
570 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
573 return CGROUP_WEIGHT_DEFAULT;
576 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
577 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
578 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
579 return c->startup_blockio_weight;
580 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
581 return c->blockio_weight;
583 return CGROUP_BLKIO_WEIGHT_DEFAULT;
586 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
587 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
588 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
591 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
592 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
593 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
596 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
597 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
601 r = lookup_block_device(dev_path, &dev);
605 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
606 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
608 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
609 "Failed to set io.weight: %m");
612 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
613 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
617 r = lookup_block_device(dev_path, &dev);
621 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
622 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
624 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
625 "Failed to set blkio.weight_device: %m");
628 static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
629 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
630 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
631 CGroupIOLimitType type;
635 r = lookup_block_device(dev_path, &dev);
639 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
640 if (limits[type] != cgroup_io_limit_defaults[type])
641 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
643 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
645 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
646 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
647 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
648 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
650 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
651 "Failed to set io.max: %m");
654 static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
655 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
659 r = lookup_block_device(dev_path, &dev);
663 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
664 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
666 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
667 "Failed to set blkio.throttle.read_bps_device: %m");
669 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
670 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
672 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
673 "Failed to set blkio.throttle.write_bps_device: %m");
676 static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
677 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
680 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
681 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
684 if (v != CGROUP_LIMIT_MAX)
685 xsprintf(buf, "%" PRIu64 "\n", v);
687 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
689 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
690 "Failed to set %s: %m", file);
693 static void cgroup_apply_firewall(Unit *u) {
696 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
698 if (bpf_firewall_compile(u) < 0)
701 (void) bpf_firewall_install(u);
704 static void cgroup_context_apply(
706 CGroupMask apply_mask,
708 ManagerState state) {
717 /* Nothing to do? Exit early! */
718 if (apply_mask == 0 && !apply_bpf)
721 /* Some cgroup attributes are not supported on the root cgroup, hence silently ignore */
722 is_root = unit_has_root_cgroup(u);
724 assert_se(c = unit_get_cgroup_context(u));
725 assert_se(path = u->cgroup_path);
727 if (is_root) /* Make sure we don't try to display messages with an empty path. */
730 /* We generally ignore errors caused by read-only mounted
731 * cgroup trees (assuming we are running in a container then),
732 * and missing cgroups, i.e. EROFS and ENOENT. */
734 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
735 bool has_weight, has_shares;
737 has_weight = cgroup_context_has_cpu_weight(c);
738 has_shares = cgroup_context_has_cpu_shares(c);
740 if (cg_all_unified() > 0) {
744 weight = cgroup_context_cpu_weight(c, state);
745 else if (has_shares) {
746 uint64_t shares = cgroup_context_cpu_shares(c, state);
748 weight = cgroup_cpu_shares_to_weight(shares);
750 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
751 shares, weight, path);
753 weight = CGROUP_WEIGHT_DEFAULT;
755 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
760 uint64_t weight = cgroup_context_cpu_weight(c, state);
762 shares = cgroup_cpu_weight_to_shares(weight);
764 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
765 weight, shares, path);
766 } else if (has_shares)
767 shares = cgroup_context_cpu_shares(c, state);
769 shares = CGROUP_CPU_SHARES_DEFAULT;
771 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
775 if (apply_mask & CGROUP_MASK_IO) {
776 bool has_io = cgroup_context_has_io_config(c);
777 bool has_blockio = cgroup_context_has_blockio_config(c);
780 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
784 weight = cgroup_context_io_weight(c, state);
785 else if (has_blockio) {
786 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
788 weight = cgroup_weight_blkio_to_io(blkio_weight);
790 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
791 blkio_weight, weight);
793 weight = CGROUP_WEIGHT_DEFAULT;
795 xsprintf(buf, "default %" PRIu64 "\n", weight);
796 r = cg_set_attribute("io", path, "io.weight", buf);
798 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
799 "Failed to set io.weight: %m");
802 CGroupIODeviceWeight *w;
804 /* FIXME: no way to reset this list */
805 LIST_FOREACH(device_weights, w, c->io_device_weights)
806 cgroup_apply_io_device_weight(u, w->path, w->weight);
807 } else if (has_blockio) {
808 CGroupBlockIODeviceWeight *w;
810 /* FIXME: no way to reset this list */
811 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
812 weight = cgroup_weight_blkio_to_io(w->weight);
814 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
815 w->weight, weight, w->path);
817 cgroup_apply_io_device_weight(u, w->path, weight);
822 /* Apply limits and free ones without config. */
824 CGroupIODeviceLimit *l;
826 LIST_FOREACH(device_limits, l, c->io_device_limits)
827 cgroup_apply_io_device_limit(u, l->path, l->limits);
829 } else if (has_blockio) {
830 CGroupBlockIODeviceBandwidth *b;
832 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
833 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
834 CGroupIOLimitType type;
836 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
837 limits[type] = cgroup_io_limit_defaults[type];
839 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
840 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
842 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
843 b->rbps, b->wbps, b->path);
845 cgroup_apply_io_device_limit(u, b->path, limits);
850 if (apply_mask & CGROUP_MASK_BLKIO) {
851 bool has_io = cgroup_context_has_io_config(c);
852 bool has_blockio = cgroup_context_has_blockio_config(c);
855 char buf[DECIMAL_STR_MAX(uint64_t)+1];
859 uint64_t io_weight = cgroup_context_io_weight(c, state);
861 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
863 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
865 } else if (has_blockio)
866 weight = cgroup_context_blkio_weight(c, state);
868 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
870 xsprintf(buf, "%" PRIu64 "\n", weight);
871 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
873 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
874 "Failed to set blkio.weight: %m");
877 CGroupIODeviceWeight *w;
879 /* FIXME: no way to reset this list */
880 LIST_FOREACH(device_weights, w, c->io_device_weights) {
881 weight = cgroup_weight_io_to_blkio(w->weight);
883 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
884 w->weight, weight, w->path);
886 cgroup_apply_blkio_device_weight(u, w->path, weight);
888 } else if (has_blockio) {
889 CGroupBlockIODeviceWeight *w;
891 /* FIXME: no way to reset this list */
892 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
893 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
897 /* Apply limits and free ones without config. */
899 CGroupIODeviceLimit *l;
901 LIST_FOREACH(device_limits, l, c->io_device_limits) {
902 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
903 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
905 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
907 } else if (has_blockio) {
908 CGroupBlockIODeviceBandwidth *b;
910 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
911 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
915 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
916 if (cg_all_unified() > 0) {
917 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
919 if (cgroup_context_has_unified_memory_config(c)) {
921 swap_max = c->memory_swap_max;
923 max = c->memory_limit;
925 if (max != CGROUP_LIMIT_MAX)
926 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
929 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
930 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
931 cgroup_apply_unified_memory_limit(u, "memory.max", max);
932 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
934 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
937 if (cgroup_context_has_unified_memory_config(c)) {
939 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
941 val = c->memory_limit;
943 if (val == CGROUP_LIMIT_MAX)
944 strncpy(buf, "-1\n", sizeof(buf));
946 xsprintf(buf, "%" PRIu64 "\n", val);
948 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
950 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
951 "Failed to set memory.limit_in_bytes: %m");
955 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
956 CGroupDeviceAllow *a;
958 /* Changing the devices list of a populated cgroup
959 * might result in EINVAL, hence ignore EINVAL
962 if (c->device_allow || c->device_policy != CGROUP_AUTO)
963 r = cg_set_attribute("devices", path, "devices.deny", "a");
965 r = cg_set_attribute("devices", path, "devices.allow", "a");
967 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
968 "Failed to reset devices.list: %m");
970 if (c->device_policy == CGROUP_CLOSED ||
971 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
972 static const char auto_devices[] =
973 "/dev/null\0" "rwm\0"
974 "/dev/zero\0" "rwm\0"
975 "/dev/full\0" "rwm\0"
976 "/dev/random\0" "rwm\0"
977 "/dev/urandom\0" "rwm\0"
979 "/dev/ptmx\0" "rwm\0"
980 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
981 "-/run/systemd/inaccessible/chr\0" "rwm\0"
982 "-/run/systemd/inaccessible/blk\0" "rwm\0";
986 NULSTR_FOREACH_PAIR(x, y, auto_devices)
987 whitelist_device(path, x, y);
989 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
990 whitelist_major(path, "pts", 'c', "rw");
993 LIST_FOREACH(device_allow, a, c->device_allow) {
1009 if (path_startswith(a->path, "/dev/"))
1010 whitelist_device(path, a->path, acc);
1011 else if ((val = startswith(a->path, "block-")))
1012 whitelist_major(path, val, 'b', acc);
1013 else if ((val = startswith(a->path, "char-")))
1014 whitelist_major(path, val, 'c', acc);
1016 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
1020 if (apply_mask & CGROUP_MASK_PIDS) {
1023 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1024 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1025 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1026 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1027 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1028 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1029 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1030 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1031 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1032 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1033 * which is desirable so that there's an offical way to release control of the sysctl from
1034 * systemd: set the limit to unbounded and reload. */
1036 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1037 u->manager->sysctl_pid_max_changed = true;
1038 r = procfs_tasks_set_limit(c->tasks_max);
1039 } else if (u->manager->sysctl_pid_max_changed)
1040 r = procfs_tasks_set_limit(TASKS_MAX);
1045 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1046 "Failed to write to tasks limit sysctls: %m");
1049 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1050 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1052 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1053 r = cg_set_attribute("pids", path, "pids.max", buf);
1055 r = cg_set_attribute("pids", path, "pids.max", "max");
1057 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1058 "Failed to set pids.max: %m");
1063 cgroup_apply_firewall(u);
1066 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1067 CGroupMask mask = 0;
1069 /* Figure out which controllers we need */
1071 if (c->cpu_accounting ||
1072 cgroup_context_has_cpu_weight(c) ||
1073 cgroup_context_has_cpu_shares(c) ||
1074 c->cpu_quota_per_sec_usec != USEC_INFINITY)
1075 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
1077 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1078 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
1080 if (c->memory_accounting ||
1081 c->memory_limit != CGROUP_LIMIT_MAX ||
1082 cgroup_context_has_unified_memory_config(c))
1083 mask |= CGROUP_MASK_MEMORY;
1085 if (c->device_allow ||
1086 c->device_policy != CGROUP_AUTO)
1087 mask |= CGROUP_MASK_DEVICES;
1089 if (c->tasks_accounting ||
1090 c->tasks_max != CGROUP_LIMIT_MAX)
1091 mask |= CGROUP_MASK_PIDS;
1096 CGroupMask unit_get_own_mask(Unit *u) {
1099 /* Returns the mask of controllers the unit needs for itself */
1101 c = unit_get_cgroup_context(u);
1105 return cgroup_context_get_mask(c) | unit_get_delegate_mask(u);
1108 CGroupMask unit_get_delegate_mask(Unit *u) {
1111 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1112 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1114 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1116 if (!unit_cgroup_delegate(u))
1119 if (cg_all_unified() <= 0) {
1122 e = unit_get_exec_context(u);
1123 if (e && !exec_context_maintains_privileges(e))
1127 assert_se(c = unit_get_cgroup_context(u));
1128 return c->delegate_controllers;
1131 CGroupMask unit_get_members_mask(Unit *u) {
1134 /* Returns the mask of controllers all of the unit's children require, merged */
1136 if (u->cgroup_members_mask_valid)
1137 return u->cgroup_members_mask;
1139 u->cgroup_members_mask = 0;
1141 if (u->type == UNIT_SLICE) {
1146 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
1151 if (UNIT_DEREF(member->slice) != u)
1154 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
1158 u->cgroup_members_mask_valid = true;
1159 return u->cgroup_members_mask;
1162 CGroupMask unit_get_siblings_mask(Unit *u) {
1165 /* Returns the mask of controllers all of the unit's siblings
1166 * require, i.e. the members mask of the unit's parent slice
1167 * if there is one. */
1169 if (UNIT_ISSET(u->slice))
1170 return unit_get_members_mask(UNIT_DEREF(u->slice));
1172 return unit_get_subtree_mask(u); /* we are the top-level slice */
1175 CGroupMask unit_get_subtree_mask(Unit *u) {
1177 /* Returns the mask of this subtree, meaning of the group
1178 * itself and its children. */
1180 return unit_get_own_mask(u) | unit_get_members_mask(u);
1183 CGroupMask unit_get_target_mask(Unit *u) {
1186 /* This returns the cgroup mask of all controllers to enable
1187 * for a specific cgroup, i.e. everything it needs itself,
1188 * plus all that its children need, plus all that its siblings
1189 * need. This is primarily useful on the legacy cgroup
1190 * hierarchy, where we need to duplicate each cgroup in each
1191 * hierarchy that shall be enabled for it. */
1193 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1194 mask &= u->manager->cgroup_supported;
1199 CGroupMask unit_get_enable_mask(Unit *u) {
1202 /* This returns the cgroup mask of all controllers to enable
1203 * for the children of a specific cgroup. This is primarily
1204 * useful for the unified cgroup hierarchy, where each cgroup
1205 * controls which controllers are enabled for its children. */
1207 mask = unit_get_members_mask(u);
1208 mask &= u->manager->cgroup_supported;
1213 bool unit_get_needs_bpf(Unit *u) {
1218 c = unit_get_cgroup_context(u);
1222 if (c->ip_accounting ||
1223 c->ip_address_allow ||
1227 /* If any parent slice has an IP access list defined, it applies too */
1228 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1229 c = unit_get_cgroup_context(p);
1233 if (c->ip_address_allow ||
1241 /* Recurse from a unit up through its containing slices, propagating
1242 * mask bits upward. A unit is also member of itself. */
1243 void unit_update_cgroup_members_masks(Unit *u) {
1249 /* Calculate subtree mask */
1250 m = unit_get_subtree_mask(u);
1252 /* See if anything changed from the previous invocation. If
1253 * not, we're done. */
1254 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1258 u->cgroup_subtree_mask_valid &&
1259 ((m & ~u->cgroup_subtree_mask) != 0) &&
1260 ((~m & u->cgroup_subtree_mask) == 0);
1262 u->cgroup_subtree_mask = m;
1263 u->cgroup_subtree_mask_valid = true;
1265 if (UNIT_ISSET(u->slice)) {
1266 Unit *s = UNIT_DEREF(u->slice);
1269 /* There's more set now than before. We
1270 * propagate the new mask to the parent's mask
1271 * (not caring if it actually was valid or
1274 s->cgroup_members_mask |= m;
1277 /* There's less set now than before (or we
1278 * don't know), we need to recalculate
1279 * everything, so let's invalidate the
1280 * parent's members mask */
1282 s->cgroup_members_mask_valid = false;
1284 /* And now make sure that this change also hits our
1286 unit_update_cgroup_members_masks(s);
1290 const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
1292 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1296 if (u->cgroup_path &&
1297 u->cgroup_realized &&
1298 FLAGS_SET(u->cgroup_realized_mask, mask))
1299 return u->cgroup_path;
1301 u = UNIT_DEREF(u->slice);
1307 static const char *migrate_callback(CGroupMask mask, void *userdata) {
1308 return unit_get_realized_cgroup_path(userdata, mask);
1311 char *unit_default_cgroup_path(Unit *u) {
1312 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1317 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1318 return strdup(u->manager->cgroup_root);
1320 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1321 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1326 escaped = cg_escape(u->id);
1331 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1334 return strjoin(u->manager->cgroup_root, "/", escaped);
1337 int unit_set_cgroup_path(Unit *u, const char *path) {
1338 _cleanup_free_ char *p = NULL;
1350 if (streq_ptr(u->cgroup_path, p))
1354 r = hashmap_put(u->manager->cgroup_unit, p, u);
1359 unit_release_cgroup(u);
1361 u->cgroup_path = TAKE_PTR(p);
1366 int unit_watch_cgroup(Unit *u) {
1367 _cleanup_free_ char *events = NULL;
1372 if (!u->cgroup_path)
1375 if (u->cgroup_inotify_wd >= 0)
1378 /* Only applies to the unified hierarchy */
1379 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1381 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1385 /* Don't watch the root slice, it's pointless. */
1386 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1389 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1393 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
1397 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1398 if (u->cgroup_inotify_wd < 0) {
1400 if (errno == ENOENT) /* If the directory is already
1401 * gone we don't need to track
1402 * it, so this is not an error */
1405 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1408 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1410 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1415 int unit_pick_cgroup_path(Unit *u) {
1416 _cleanup_free_ char *path = NULL;
1424 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1427 path = unit_default_cgroup_path(u);
1431 r = unit_set_cgroup_path(u, path);
1433 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1435 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1440 static int unit_create_cgroup(
1442 CGroupMask target_mask,
1443 CGroupMask enable_mask,
1452 c = unit_get_cgroup_context(u);
1456 /* Figure out our cgroup path */
1457 r = unit_pick_cgroup_path(u);
1461 /* First, create our own group */
1462 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
1464 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1467 /* Start watching it */
1468 (void) unit_watch_cgroup(u);
1470 /* Preserve enabled controllers in delegated units, adjust others. */
1471 if (created || !unit_cgroup_delegate(u)) {
1473 /* Enable all controllers we need */
1474 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1476 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m",
1480 /* Keep track that this is now realized */
1481 u->cgroup_realized = true;
1482 u->cgroup_realized_mask = target_mask;
1483 u->cgroup_enabled_mask = enable_mask;
1484 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
1486 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
1488 /* Then, possibly move things over, but not if
1489 * subgroups may contain processes, which is the case
1490 * for slice and delegation units. */
1491 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1493 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
1499 static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1500 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1506 if (MANAGER_IS_SYSTEM(u->manager))
1509 if (!u->manager->system_bus)
1512 if (!u->cgroup_path)
1515 /* Determine this unit's cgroup path relative to our cgroup root */
1516 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1520 pp = strjoina("/", pp, suffix_path);
1521 path_simplify(pp, false);
1523 r = sd_bus_call_method(u->manager->system_bus,
1524 "org.freedesktop.systemd1",
1525 "/org/freedesktop/systemd1",
1526 "org.freedesktop.systemd1.Manager",
1527 "AttachProcessesToUnit",
1530 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
1532 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1537 int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1538 CGroupMask delegated_mask;
1546 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1549 if (set_isempty(pids))
1552 r = unit_realize_cgroup(u);
1556 if (isempty(suffix_path))
1559 p = strjoina(u->cgroup_path, "/", suffix_path);
1561 delegated_mask = unit_get_delegate_mask(u);
1564 SET_FOREACH(pidp, pids, i) {
1565 pid_t pid = PTR_TO_PID(pidp);
1568 /* First, attach the PID to the main cgroup hierarchy */
1569 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1571 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1573 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1576 /* If we are in a user instance, and we can't move the process ourselves due to
1577 * permission problems, let's ask the system instance about it instead. Since it's more
1578 * privileged it might be able to move the process across the leaves of a subtree who's
1579 * top node is not owned by us. */
1581 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1583 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1585 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1589 r = q; /* Remember first error */
1594 q = cg_all_unified();
1600 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1601 * innermost realized one */
1603 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1604 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1605 const char *realized;
1607 if (!(u->manager->cgroup_supported & bit))
1610 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1611 if (delegated_mask & u->cgroup_realized_mask & bit) {
1612 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1614 continue; /* Success! */
1616 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1617 pid, p, cgroup_controller_to_string(c));
1620 /* So this controller is either not delegate or realized, or something else weird happened. In
1621 * that case let's attach the PID at least to the closest cgroup up the tree that is
1623 realized = unit_get_realized_cgroup_path(u, bit);
1625 continue; /* Not even realized in the root slice? Then let's not bother */
1627 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1629 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1630 pid, realized, cgroup_controller_to_string(c));
1637 static void cgroup_xattr_apply(Unit *u) {
1638 char ids[SD_ID128_STRING_MAX];
1643 if (!MANAGER_IS_SYSTEM(u->manager))
1646 if (sd_id128_is_null(u->invocation_id))
1649 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1650 "trusted.invocation_id",
1651 sd_id128_to_string(u->invocation_id, ids), 32,
1654 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
1657 static bool unit_has_mask_realized(
1659 CGroupMask target_mask,
1660 CGroupMask enable_mask,
1665 return u->cgroup_realized &&
1666 u->cgroup_realized_mask == target_mask &&
1667 u->cgroup_enabled_mask == enable_mask &&
1668 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1669 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
1672 static void unit_add_to_cgroup_realize_queue(Unit *u) {
1675 if (u->in_cgroup_realize_queue)
1678 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1679 u->in_cgroup_realize_queue = true;
1682 static void unit_remove_from_cgroup_realize_queue(Unit *u) {
1685 if (!u->in_cgroup_realize_queue)
1688 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1689 u->in_cgroup_realize_queue = false;
1692 /* Check if necessary controllers and attributes for a unit are in place.
1694 * If so, do nothing.
1695 * If not, create paths, move processes over, and set attributes.
1697 * Returns 0 on success and < 0 on failure. */
1698 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
1699 CGroupMask target_mask, enable_mask;
1700 bool needs_bpf, apply_bpf;
1705 unit_remove_from_cgroup_realize_queue(u);
1707 target_mask = unit_get_target_mask(u);
1708 enable_mask = unit_get_enable_mask(u);
1709 needs_bpf = unit_get_needs_bpf(u);
1711 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
1714 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1715 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1716 * this will trickle down properly to cgroupfs. */
1717 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1719 /* First, realize parents */
1720 if (UNIT_ISSET(u->slice)) {
1721 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
1726 /* And then do the real work */
1727 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
1731 /* Finally, apply the necessary attributes. */
1732 cgroup_context_apply(u, target_mask, apply_bpf, state);
1733 cgroup_xattr_apply(u);
1738 unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
1746 state = manager_state(m);
1748 while ((i = m->cgroup_realize_queue)) {
1749 assert(i->in_cgroup_realize_queue);
1751 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
1752 /* Maybe things changed, and the unit is not actually active anymore? */
1753 unit_remove_from_cgroup_realize_queue(i);
1757 r = unit_realize_cgroup_now(i, state);
1759 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
1767 static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
1770 /* This adds the siblings of the specified unit and the
1771 * siblings of all parent units to the cgroup queue. (But
1772 * neither the specified unit itself nor the parents.) */
1774 while ((slice = UNIT_DEREF(u->slice))) {
1779 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
1783 /* Skip units that have a dependency on the slice
1784 * but aren't actually in it. */
1785 if (UNIT_DEREF(m->slice) != slice)
1788 /* No point in doing cgroup application for units
1789 * without active processes. */
1790 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1793 /* If the unit doesn't need any new controllers
1794 * and has current ones realized, it doesn't need
1796 if (unit_has_mask_realized(m,
1797 unit_get_target_mask(m),
1798 unit_get_enable_mask(m),
1799 unit_get_needs_bpf(m)))
1802 unit_add_to_cgroup_realize_queue(m);
1809 int unit_realize_cgroup(Unit *u) {
1812 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1815 /* So, here's the deal: when realizing the cgroups for this
1816 * unit, we need to first create all parents, but there's more
1817 * actually: for the weight-based controllers we also need to
1818 * make sure that all our siblings (i.e. units that are in the
1819 * same slice as we are) have cgroups, too. Otherwise, things
1820 * would become very uneven as each of their processes would
1821 * get as much resources as all our group together. This call
1822 * will synchronously create the parent cgroups, but will
1823 * defer work on the siblings to the next event loop
1826 /* Add all sibling slices to the cgroup queue. */
1827 unit_add_siblings_to_cgroup_realize_queue(u);
1829 /* And realize this one now (and apply the values) */
1830 return unit_realize_cgroup_now(u, manager_state(u->manager));
1833 void unit_release_cgroup(Unit *u) {
1836 /* Forgets all cgroup details for this cgroup */
1838 if (u->cgroup_path) {
1839 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1840 u->cgroup_path = mfree(u->cgroup_path);
1843 if (u->cgroup_inotify_wd >= 0) {
1844 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1845 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1847 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1848 u->cgroup_inotify_wd = -1;
1852 void unit_prune_cgroup(Unit *u) {
1858 /* Removes the cgroup, if empty and possible, and stops watching it. */
1860 if (!u->cgroup_path)
1863 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1865 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1867 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1869 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1876 unit_release_cgroup(u);
1878 u->cgroup_realized = false;
1879 u->cgroup_realized_mask = 0;
1880 u->cgroup_enabled_mask = 0;
1883 int unit_search_main_pid(Unit *u, pid_t *ret) {
1884 _cleanup_fclose_ FILE *f = NULL;
1885 pid_t pid = 0, npid, mypid;
1891 if (!u->cgroup_path)
1894 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1898 mypid = getpid_cached();
1899 while (cg_read_pid(f, &npid) > 0) {
1905 /* Ignore processes that aren't our kids */
1906 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1910 /* Dang, there's more than one daemonized PID
1911 in this group, so we don't know what process
1912 is the main process. */
1923 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1924 _cleanup_closedir_ DIR *d = NULL;
1925 _cleanup_fclose_ FILE *f = NULL;
1931 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1937 while ((r = cg_read_pid(f, &pid)) > 0) {
1938 r = unit_watch_pid(u, pid);
1939 if (r < 0 && ret >= 0)
1943 if (r < 0 && ret >= 0)
1947 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1954 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1955 _cleanup_free_ char *p = NULL;
1957 p = strjoin(path, "/", fn);
1963 r = unit_watch_pids_in_path(u, p);
1964 if (r < 0 && ret >= 0)
1968 if (r < 0 && ret >= 0)
1975 int unit_synthesize_cgroup_empty_event(Unit *u) {
1980 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
1981 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
1982 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
1984 if (!u->cgroup_path)
1987 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1990 if (r > 0) /* On unified we have reliable notifications, and don't need this */
1993 if (!set_isempty(u->pids))
1996 unit_add_to_cgroup_empty_queue(u);
2000 int unit_watch_all_pids(Unit *u) {
2005 /* Adds all PIDs from our cgroup to the set of PIDs we
2006 * watch. This is a fallback logic for cases where we do not
2007 * get reliable cgroup empty notifications: we try to use
2008 * SIGCHLD as replacement. */
2010 if (!u->cgroup_path)
2013 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2016 if (r > 0) /* On unified we can use proper notifications */
2019 return unit_watch_pids_in_path(u, u->cgroup_path);
2022 static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2023 Manager *m = userdata;
2030 u = m->cgroup_empty_queue;
2034 assert(u->in_cgroup_empty_queue);
2035 u->in_cgroup_empty_queue = false;
2036 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2038 if (m->cgroup_empty_queue) {
2039 /* More stuff queued, let's make sure we remain enabled */
2040 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2042 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
2045 unit_add_to_gc_queue(u);
2047 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2048 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2053 void unit_add_to_cgroup_empty_queue(Unit *u) {
2058 /* Note that there are four different ways how cgroup empty events reach us:
2060 * 1. On the unified hierarchy we get an inotify event on the cgroup
2062 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2064 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2066 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2067 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2069 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2070 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2071 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2072 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2073 * case for scope units). */
2075 if (u->in_cgroup_empty_queue)
2078 /* Let's verify that the cgroup is really empty */
2079 if (!u->cgroup_path)
2081 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2083 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2089 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2090 u->in_cgroup_empty_queue = true;
2092 /* Trigger the defer event */
2093 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2095 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2098 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2099 Manager *m = userdata;
2106 union inotify_event_buffer buffer;
2107 struct inotify_event *e;
2110 l = read(fd, &buffer, sizeof(buffer));
2112 if (IN_SET(errno, EINTR, EAGAIN))
2115 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2118 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2122 /* Queue overflow has no watch descriptor */
2125 if (e->mask & IN_IGNORED)
2126 /* The watch was just removed */
2129 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
2130 if (!u) /* Not that inotify might deliver
2131 * events for a watch even after it
2132 * was removed, because it was queued
2133 * before the removal. Let's ignore
2134 * this here safely. */
2137 unit_add_to_cgroup_empty_queue(u);
2143 int manager_setup_cgroup(Manager *m) {
2144 _cleanup_free_ char *path = NULL;
2145 const char *scope_path;
2148 #if 0 /// UNNEEDED by elogind
2154 /* 1. Determine hierarchy */
2155 m->cgroup_root = mfree(m->cgroup_root);
2156 #if 0 /// elogind is not init and must therefore search for PID 1 instead of self.
2157 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
2159 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 1, &m->cgroup_root);
2162 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
2164 #if 0 /// elogind does not support systemd scopes and slices
2165 /* Chop off the init scope, if we are already located in it */
2166 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2168 /* LEGACY: Also chop off the system slice if we are in
2169 * it. This is to support live upgrades from older systemd
2170 * versions where PID 1 was moved there. Also see
2171 * cg_get_root_path(). */
2172 if (!e && MANAGER_IS_SYSTEM(m)) {
2173 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
2175 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
2181 log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
2182 SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root);
2183 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2184 * easily prepend it everywhere. */
2185 delete_trailing_chars(m->cgroup_root, "/");
2188 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
2190 return log_error_errno(r, "Cannot find cgroup mount point: %m");
2192 r = cg_unified_flush();
2194 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
2196 all_unified = cg_all_unified();
2197 if (all_unified < 0)
2198 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2199 if (all_unified > 0)
2200 log_debug("Unified cgroup hierarchy is located at %s.", path);
2202 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2204 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2206 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2208 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2211 #if 0 /// elogind is not init, and does not install the agent here.
2212 /* 3. Allocate cgroup empty defer event source */
2213 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2214 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2216 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2218 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2220 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2222 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2224 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2226 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2228 /* 4. Install notifier inotify object, or agent */
2229 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
2231 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2233 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2234 safe_close(m->cgroup_inotify_fd);
2236 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2237 if (m->cgroup_inotify_fd < 0)
2238 return log_error_errno(errno, "Failed to create control group inotify object: %m");
2240 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2242 return log_error_errno(r, "Failed to watch control group inotify object: %m");
2244 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2245 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2246 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
2248 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
2250 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
2252 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
2254 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2255 * since it does not generate events when control groups with children run empty. */
2257 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
2259 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2261 log_debug("Installed release agent.");
2263 log_debug("Release agent already installed.");
2266 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2267 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2268 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2270 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2271 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2273 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
2276 * This method is in core, and normally called by systemd
2277 * being init. As elogind is never init, we can not install
2278 * our agent here. We do so when mounting our cgroup file
2279 * system, so only if elogind is its own tiny controller.
2280 * Further, elogind is not meant to run in systemd init scope. */
2281 if (MANAGER_IS_SYSTEM(m))
2282 // we are our own cgroup controller
2283 scope_path = strjoina("");
2284 else if (streq(m->cgroup_root, "/elogind"))
2285 // root already is our cgroup
2286 scope_path = strjoina(m->cgroup_root);
2288 // we have to create our own group
2289 scope_path = strjoina(m->cgroup_root, "/elogind");
2290 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2292 log_debug_elogind("Created control group \"%s\"", scope_path);
2294 /* 6. And pin it, so that it cannot be unmounted */
2295 safe_close(m->pin_cgroupfs_fd);
2296 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2297 if (m->pin_cgroupfs_fd < 0)
2298 return log_error_errno(errno, "Failed to open pin file: %m");
2300 #if 0 /// this is from the cgroup migration above that elogind does not need.
2301 } else if (r < 0 && !m->test_run_flags)
2302 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
2305 /* 7. Always enable hierarchical support if it exists... */
2306 if (!all_unified && m->test_run_flags == 0)
2307 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2309 /* 8. Figure out which controllers are supported, and log about it */
2310 r = cg_mask_supported(&m->cgroup_supported);
2312 return log_error_errno(r, "Failed to determine supported controllers: %m");
2313 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
2314 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
2319 void manager_shutdown_cgroup(Manager *m, bool delete) {
2322 #if 0 /// elogind is not init
2323 /* We can't really delete the group, since we are in it. But
2325 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
2326 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2328 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2330 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2332 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2333 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
2336 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
2338 m->cgroup_root = mfree(m->cgroup_root);
2341 #if 0 /// UNNEEDED by elogind
2342 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
2349 u = hashmap_get(m->cgroup_unit, cgroup);
2353 p = strdupa(cgroup);
2357 e = strrchr(p, '/');
2359 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
2363 u = hashmap_get(m->cgroup_unit, p);
2369 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
2370 _cleanup_free_ char *cgroup = NULL;
2374 if (!pid_is_valid(pid))
2377 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
2380 return manager_get_unit_by_cgroup(m, cgroup);
2383 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
2388 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2389 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2390 * relevant one as children of the process will be assigned to that one, too, before all else. */
2392 if (!pid_is_valid(pid))
2395 if (pid == getpid_cached())
2396 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2398 u = manager_get_unit_by_pid_cgroup(m, pid);
2402 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
2406 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
2414 #if 0 /// elogind must substitute this with its own variant
2415 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2421 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2422 * or from the --system instance */
2424 log_debug("Got cgroup empty notification for: %s", cgroup);
2426 u = manager_get_unit_by_cgroup(m, cgroup);
2430 unit_add_to_cgroup_empty_queue(u);
2434 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2440 log_debug("Got cgroup empty notification for: %s", cgroup);
2442 s = hashmap_get(m->sessions, cgroup);
2445 session_finalize(s);
2448 log_warning("Session not found: %s", cgroup);
2453 #if 0 /// UNNEEDED by elogind
2454 int unit_get_memory_current(Unit *u, uint64_t *ret) {
2455 _cleanup_free_ char *v = NULL;
2461 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
2464 if (!u->cgroup_path)
2467 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2468 if (unit_has_root_cgroup(u))
2469 return procfs_memory_get_current(ret);
2471 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
2474 r = cg_all_unified();
2478 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
2480 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
2486 return safe_atou64(v, ret);
2489 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2490 _cleanup_free_ char *v = NULL;
2496 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
2499 if (!u->cgroup_path)
2502 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2503 if (unit_has_root_cgroup(u))
2504 return procfs_tasks_get_current(ret);
2506 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2509 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2515 return safe_atou64(v, ret);
2518 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2519 _cleanup_free_ char *v = NULL;
2526 if (!u->cgroup_path)
2529 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2530 if (unit_has_root_cgroup(u))
2531 return procfs_cpu_get_usage(ret);
2533 r = cg_all_unified();
2537 _cleanup_free_ char *val = NULL;
2540 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2543 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
2546 if (IN_SET(r, -ENOENT, -ENXIO))
2549 r = safe_atou64(val, &us);
2553 ns = us * NSEC_PER_USEC;
2555 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2558 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2564 r = safe_atou64(v, &ns);
2573 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2579 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2580 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2581 * call this function with a NULL return value. */
2583 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
2586 r = unit_get_cpu_usage_raw(u, &ns);
2587 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2588 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2592 *ret = u->cpu_usage_last;
2598 if (ns > u->cpu_usage_base)
2599 ns -= u->cpu_usage_base;
2603 u->cpu_usage_last = ns;
2610 int unit_get_ip_accounting(
2612 CGroupIPAccountingMetric metric,
2619 assert(metric >= 0);
2620 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2623 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
2626 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2627 u->ip_accounting_ingress_map_fd :
2628 u->ip_accounting_egress_map_fd;
2632 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2633 r = bpf_firewall_read_accounting(fd, &value, NULL);
2635 r = bpf_firewall_read_accounting(fd, NULL, &value);
2639 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2640 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2641 * ip_accounting_extra[] field, and add them in here transparently. */
2643 *ret = value + u->ip_accounting_extra[metric];
2648 int unit_reset_cpu_accounting(Unit *u) {
2654 u->cpu_usage_last = NSEC_INFINITY;
2656 r = unit_get_cpu_usage_raw(u, &ns);
2658 u->cpu_usage_base = 0;
2662 u->cpu_usage_base = ns;
2666 int unit_reset_ip_accounting(Unit *u) {
2671 if (u->ip_accounting_ingress_map_fd >= 0)
2672 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2674 if (u->ip_accounting_egress_map_fd >= 0)
2675 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2677 zero(u->ip_accounting_extra);
2679 return r < 0 ? r : q;
2682 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2685 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2691 /* always invalidate compat pairs together */
2692 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2693 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2695 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2696 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2698 if ((u->cgroup_realized_mask & m) == 0) /* NOP? */
2701 u->cgroup_realized_mask &= ~m;
2702 unit_add_to_cgroup_realize_queue(u);
2705 void unit_invalidate_cgroup_bpf(Unit *u) {
2708 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2711 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED) /* NOP? */
2714 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
2715 unit_add_to_cgroup_realize_queue(u);
2717 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2718 * list of our children includes our own. */
2719 if (u->type == UNIT_SLICE) {
2724 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
2728 if (UNIT_DEREF(member->slice) != u)
2731 unit_invalidate_cgroup_bpf(member);
2736 bool unit_cgroup_delegate(Unit *u) {
2741 if (!UNIT_VTABLE(u)->can_delegate)
2744 c = unit_get_cgroup_context(u);
2751 void manager_invalidate_startup_units(Manager *m) {
2757 SET_FOREACH(u, m->startup_units, i)
2758 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
2761 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2762 [CGROUP_AUTO] = "auto",
2763 [CGROUP_CLOSED] = "closed",
2764 [CGROUP_STRICT] = "strict",
2767 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);