2 This file is part of systemd.
4 Copyright 2013 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include "alloc-util.h"
24 //#include "bpf-firewall.h"
25 #include "cgroup-util.h"
30 #include "parse-util.h"
31 #include "path-util.h"
32 #include "process-util.h"
33 //#include "special.h"
34 #include "stdio-util.h"
35 #include "string-table.h"
36 #include "string-util.h"
38 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
40 #if 0 /// UNNEEDED by elogind
41 static void cgroup_compat_warn(void) {
42 static bool cgroup_compat_warned = false;
44 if (cgroup_compat_warned)
47 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
48 cgroup_compat_warned = true;
51 #define log_cgroup_compat(unit, fmt, ...) do { \
52 cgroup_compat_warn(); \
53 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
56 void cgroup_context_init(CGroupContext *c) {
59 /* Initialize everything to the kernel defaults, assuming the
60 * structure is preinitialized to 0 */
62 c->cpu_weight = CGROUP_WEIGHT_INVALID;
63 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
64 c->cpu_quota_per_sec_usec = USEC_INFINITY;
66 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
67 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
69 c->memory_high = CGROUP_LIMIT_MAX;
70 c->memory_max = CGROUP_LIMIT_MAX;
71 c->memory_swap_max = CGROUP_LIMIT_MAX;
73 c->memory_limit = CGROUP_LIMIT_MAX;
75 c->io_weight = CGROUP_WEIGHT_INVALID;
76 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
78 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
79 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
81 c->tasks_max = (uint64_t) -1;
84 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
88 LIST_REMOVE(device_allow, c->device_allow, a);
93 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
97 LIST_REMOVE(device_weights, c->io_device_weights, w);
102 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
106 LIST_REMOVE(device_limits, c->io_device_limits, l);
111 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
115 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
120 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
124 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
129 void cgroup_context_done(CGroupContext *c) {
132 while (c->io_device_weights)
133 cgroup_context_free_io_device_weight(c, c->io_device_weights);
135 while (c->io_device_limits)
136 cgroup_context_free_io_device_limit(c, c->io_device_limits);
138 while (c->blockio_device_weights)
139 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
141 while (c->blockio_device_bandwidths)
142 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
144 while (c->device_allow)
145 cgroup_context_free_device_allow(c, c->device_allow);
147 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
148 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
151 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
152 CGroupIODeviceLimit *il;
153 CGroupIODeviceWeight *iw;
154 CGroupBlockIODeviceBandwidth *b;
155 CGroupBlockIODeviceWeight *w;
156 CGroupDeviceAllow *a;
157 IPAddressAccessItem *iaai;
158 char u[FORMAT_TIMESPAN_MAX];
163 prefix = strempty(prefix);
166 "%sCPUAccounting=%s\n"
167 "%sIOAccounting=%s\n"
168 "%sBlockIOAccounting=%s\n"
169 "%sMemoryAccounting=%s\n"
170 "%sTasksAccounting=%s\n"
171 "%sIPAccounting=%s\n"
172 "%sCPUWeight=%" PRIu64 "\n"
173 "%sStartupCPUWeight=%" PRIu64 "\n"
174 "%sCPUShares=%" PRIu64 "\n"
175 "%sStartupCPUShares=%" PRIu64 "\n"
176 "%sCPUQuotaPerSecSec=%s\n"
177 "%sIOWeight=%" PRIu64 "\n"
178 "%sStartupIOWeight=%" PRIu64 "\n"
179 "%sBlockIOWeight=%" PRIu64 "\n"
180 "%sStartupBlockIOWeight=%" PRIu64 "\n"
181 "%sMemoryLow=%" PRIu64 "\n"
182 "%sMemoryHigh=%" PRIu64 "\n"
183 "%sMemoryMax=%" PRIu64 "\n"
184 "%sMemorySwapMax=%" PRIu64 "\n"
185 "%sMemoryLimit=%" PRIu64 "\n"
186 "%sTasksMax=%" PRIu64 "\n"
187 "%sDevicePolicy=%s\n"
189 prefix, yes_no(c->cpu_accounting),
190 prefix, yes_no(c->io_accounting),
191 prefix, yes_no(c->blockio_accounting),
192 prefix, yes_no(c->memory_accounting),
193 prefix, yes_no(c->tasks_accounting),
194 prefix, yes_no(c->ip_accounting),
195 prefix, c->cpu_weight,
196 prefix, c->startup_cpu_weight,
197 prefix, c->cpu_shares,
198 prefix, c->startup_cpu_shares,
199 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
200 prefix, c->io_weight,
201 prefix, c->startup_io_weight,
202 prefix, c->blockio_weight,
203 prefix, c->startup_blockio_weight,
204 prefix, c->memory_low,
205 prefix, c->memory_high,
206 prefix, c->memory_max,
207 prefix, c->memory_swap_max,
208 prefix, c->memory_limit,
209 prefix, c->tasks_max,
210 prefix, cgroup_device_policy_to_string(c->device_policy),
211 prefix, yes_no(c->delegate));
214 _cleanup_free_ char *t = NULL;
216 (void) cg_mask_to_string(c->delegate_controllers, &t);
218 fprintf(f, "%sDelegateControllers=%s\n",
223 LIST_FOREACH(device_allow, a, c->device_allow)
225 "%sDeviceAllow=%s %s%s%s\n",
228 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
230 LIST_FOREACH(device_weights, iw, c->io_device_weights)
232 "%sIODeviceWeight=%s %" PRIu64,
237 LIST_FOREACH(device_limits, il, c->io_device_limits) {
238 char buf[FORMAT_BYTES_MAX];
239 CGroupIOLimitType type;
241 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
242 if (il->limits[type] != cgroup_io_limit_defaults[type])
246 cgroup_io_limit_type_to_string(type),
248 format_bytes(buf, sizeof(buf), il->limits[type]));
251 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
253 "%sBlockIODeviceWeight=%s %" PRIu64,
258 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
259 char buf[FORMAT_BYTES_MAX];
261 if (b->rbps != CGROUP_LIMIT_MAX)
263 "%sBlockIOReadBandwidth=%s %s\n",
266 format_bytes(buf, sizeof(buf), b->rbps));
267 if (b->wbps != CGROUP_LIMIT_MAX)
269 "%sBlockIOWriteBandwidth=%s %s\n",
272 format_bytes(buf, sizeof(buf), b->wbps));
275 LIST_FOREACH(items, iaai, c->ip_address_allow) {
276 _cleanup_free_ char *k = NULL;
278 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
279 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
282 LIST_FOREACH(items, iaai, c->ip_address_deny) {
283 _cleanup_free_ char *k = NULL;
285 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
286 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
290 static int lookup_block_device(const char *p, dev_t *dev) {
299 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
301 if (S_ISBLK(st.st_mode))
303 else if (major(st.st_dev) != 0) {
304 /* If this is not a device node then find the block
305 * device this file is stored on */
308 /* If this is a partition, try to get the originating
310 block_get_whole_disk(*dev, dev);
312 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
319 static int whitelist_device(const char *path, const char *node, const char *acc) {
320 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
322 bool ignore_notfound;
328 if (node[0] == '-') {
329 /* Non-existent paths starting with "-" must be silently ignored */
331 ignore_notfound = true;
333 ignore_notfound = false;
335 if (stat(node, &st) < 0) {
336 if (errno == ENOENT && ignore_notfound)
339 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
342 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
343 log_warning("%s is not a device.", node);
349 S_ISCHR(st.st_mode) ? 'c' : 'b',
350 major(st.st_rdev), minor(st.st_rdev),
353 r = cg_set_attribute("devices", path, "devices.allow", buf);
355 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
356 "Failed to set devices.allow on %s: %m", path);
361 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
362 _cleanup_fclose_ FILE *f = NULL;
369 assert(IN_SET(type, 'b', 'c'));
371 f = fopen("/proc/devices", "re");
373 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
375 FOREACH_LINE(line, f, goto fail) {
376 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
381 if (type == 'c' && streq(line, "Character devices:")) {
386 if (type == 'b' && streq(line, "Block devices:")) {
401 w = strpbrk(p, WHITESPACE);
406 r = safe_atou(p, &maj);
413 w += strspn(w, WHITESPACE);
415 if (fnmatch(name, w, 0) != 0)
424 r = cg_set_attribute("devices", path, "devices.allow", buf);
426 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
427 "Failed to set devices.allow on %s: %m", path);
433 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
436 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
437 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
438 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
441 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
442 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
443 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
446 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
447 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
448 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
449 return c->startup_cpu_weight;
450 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
451 return c->cpu_weight;
453 return CGROUP_WEIGHT_DEFAULT;
456 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
457 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
458 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
459 return c->startup_cpu_shares;
460 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
461 return c->cpu_shares;
463 return CGROUP_CPU_SHARES_DEFAULT;
466 static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
467 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
470 xsprintf(buf, "%" PRIu64 "\n", weight);
471 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
473 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
474 "Failed to set cpu.weight: %m");
476 if (quota != USEC_INFINITY)
477 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
478 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
480 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
482 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
485 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
486 "Failed to set cpu.max: %m");
489 static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
490 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
493 xsprintf(buf, "%" PRIu64 "\n", shares);
494 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
496 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
497 "Failed to set cpu.shares: %m");
499 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
500 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
502 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
503 "Failed to set cpu.cfs_period_us: %m");
505 if (quota != USEC_INFINITY) {
506 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
507 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
509 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
511 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
512 "Failed to set cpu.cfs_quota_us: %m");
515 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
516 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
517 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
520 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
521 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
522 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
525 static bool cgroup_context_has_io_config(CGroupContext *c) {
526 return c->io_accounting ||
527 c->io_weight != CGROUP_WEIGHT_INVALID ||
528 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
529 c->io_device_weights ||
533 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
534 return c->blockio_accounting ||
535 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
536 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
537 c->blockio_device_weights ||
538 c->blockio_device_bandwidths;
541 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
542 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
543 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
544 return c->startup_io_weight;
545 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
548 return CGROUP_WEIGHT_DEFAULT;
551 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
552 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
553 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
554 return c->startup_blockio_weight;
555 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
556 return c->blockio_weight;
558 return CGROUP_BLKIO_WEIGHT_DEFAULT;
561 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
562 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
563 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
566 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
567 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
568 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
571 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
572 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
576 r = lookup_block_device(dev_path, &dev);
580 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
581 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
583 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
584 "Failed to set io.weight: %m");
587 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
588 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
592 r = lookup_block_device(dev_path, &dev);
596 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
597 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
599 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
600 "Failed to set blkio.weight_device: %m");
603 static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
604 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
605 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
606 CGroupIOLimitType type;
611 r = lookup_block_device(dev_path, &dev);
615 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
616 if (limits[type] != cgroup_io_limit_defaults[type]) {
617 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
620 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
624 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
625 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
626 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
627 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
629 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
630 "Failed to set io.max: %m");
634 static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
635 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
640 r = lookup_block_device(dev_path, &dev);
644 if (rbps != CGROUP_LIMIT_MAX)
646 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
647 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
649 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
650 "Failed to set blkio.throttle.read_bps_device: %m");
652 if (wbps != CGROUP_LIMIT_MAX)
654 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
655 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
657 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
658 "Failed to set blkio.throttle.write_bps_device: %m");
663 static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
664 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
667 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
668 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
671 if (v != CGROUP_LIMIT_MAX)
672 xsprintf(buf, "%" PRIu64 "\n", v);
674 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
676 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
677 "Failed to set %s: %m", file);
680 static void cgroup_apply_firewall(Unit *u, CGroupContext *c) {
683 if (u->type == UNIT_SLICE) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is
684 * not recursive we don't ever touch the bpf on them */
687 r = bpf_firewall_compile(u);
691 (void) bpf_firewall_install(u);
695 static void cgroup_context_apply(
697 CGroupMask apply_mask,
699 ManagerState state) {
708 c = unit_get_cgroup_context(u);
709 path = u->cgroup_path;
714 /* Nothing to do? Exit early! */
715 if (apply_mask == 0 && !apply_bpf)
718 /* Some cgroup attributes are not supported on the root cgroup,
719 * hence silently ignore */
720 is_root = isempty(path) || path_equal(path, "/");
722 /* Make sure we don't try to display messages with an empty path. */
725 /* We generally ignore errors caused by read-only mounted
726 * cgroup trees (assuming we are running in a container then),
727 * and missing cgroups, i.e. EROFS and ENOENT. */
729 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
730 bool has_weight, has_shares;
732 has_weight = cgroup_context_has_cpu_weight(c);
733 has_shares = cgroup_context_has_cpu_shares(c);
735 if (cg_all_unified() > 0) {
739 weight = cgroup_context_cpu_weight(c, state);
740 else if (has_shares) {
741 uint64_t shares = cgroup_context_cpu_shares(c, state);
743 weight = cgroup_cpu_shares_to_weight(shares);
745 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
746 shares, weight, path);
748 weight = CGROUP_WEIGHT_DEFAULT;
750 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
755 uint64_t weight = cgroup_context_cpu_weight(c, state);
757 shares = cgroup_cpu_weight_to_shares(weight);
759 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
760 weight, shares, path);
761 } else if (has_shares)
762 shares = cgroup_context_cpu_shares(c, state);
764 shares = CGROUP_CPU_SHARES_DEFAULT;
766 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
770 if (apply_mask & CGROUP_MASK_IO) {
771 bool has_io = cgroup_context_has_io_config(c);
772 bool has_blockio = cgroup_context_has_blockio_config(c);
775 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
779 weight = cgroup_context_io_weight(c, state);
780 else if (has_blockio) {
781 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
783 weight = cgroup_weight_blkio_to_io(blkio_weight);
785 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
786 blkio_weight, weight);
788 weight = CGROUP_WEIGHT_DEFAULT;
790 xsprintf(buf, "default %" PRIu64 "\n", weight);
791 r = cg_set_attribute("io", path, "io.weight", buf);
793 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
794 "Failed to set io.weight: %m");
797 CGroupIODeviceWeight *w;
799 /* FIXME: no way to reset this list */
800 LIST_FOREACH(device_weights, w, c->io_device_weights)
801 cgroup_apply_io_device_weight(u, w->path, w->weight);
802 } else if (has_blockio) {
803 CGroupBlockIODeviceWeight *w;
805 /* FIXME: no way to reset this list */
806 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
807 weight = cgroup_weight_blkio_to_io(w->weight);
809 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
810 w->weight, weight, w->path);
812 cgroup_apply_io_device_weight(u, w->path, weight);
817 /* Apply limits and free ones without config. */
819 CGroupIODeviceLimit *l, *next;
821 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
822 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
823 cgroup_context_free_io_device_limit(c, l);
825 } else if (has_blockio) {
826 CGroupBlockIODeviceBandwidth *b, *next;
828 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
829 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
830 CGroupIOLimitType type;
832 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
833 limits[type] = cgroup_io_limit_defaults[type];
835 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
836 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
838 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
839 b->rbps, b->wbps, b->path);
841 if (!cgroup_apply_io_device_limit(u, b->path, limits))
842 cgroup_context_free_blockio_device_bandwidth(c, b);
847 if (apply_mask & CGROUP_MASK_BLKIO) {
848 bool has_io = cgroup_context_has_io_config(c);
849 bool has_blockio = cgroup_context_has_blockio_config(c);
852 char buf[DECIMAL_STR_MAX(uint64_t)+1];
856 uint64_t io_weight = cgroup_context_io_weight(c, state);
858 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
860 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
862 } else if (has_blockio)
863 weight = cgroup_context_blkio_weight(c, state);
865 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
867 xsprintf(buf, "%" PRIu64 "\n", weight);
868 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
870 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
871 "Failed to set blkio.weight: %m");
874 CGroupIODeviceWeight *w;
876 /* FIXME: no way to reset this list */
877 LIST_FOREACH(device_weights, w, c->io_device_weights) {
878 weight = cgroup_weight_io_to_blkio(w->weight);
880 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
881 w->weight, weight, w->path);
883 cgroup_apply_blkio_device_weight(u, w->path, weight);
885 } else if (has_blockio) {
886 CGroupBlockIODeviceWeight *w;
888 /* FIXME: no way to reset this list */
889 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
890 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
894 /* Apply limits and free ones without config. */
896 CGroupIODeviceLimit *l, *next;
898 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
899 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
900 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
902 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
903 cgroup_context_free_io_device_limit(c, l);
905 } else if (has_blockio) {
906 CGroupBlockIODeviceBandwidth *b, *next;
908 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths)
909 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
910 cgroup_context_free_blockio_device_bandwidth(c, b);
914 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
915 if (cg_all_unified() > 0) {
916 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
918 if (cgroup_context_has_unified_memory_config(c)) {
920 swap_max = c->memory_swap_max;
922 max = c->memory_limit;
924 if (max != CGROUP_LIMIT_MAX)
925 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
928 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
929 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
930 cgroup_apply_unified_memory_limit(u, "memory.max", max);
931 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
933 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
936 if (cgroup_context_has_unified_memory_config(c)) {
938 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
940 val = c->memory_limit;
942 if (val == CGROUP_LIMIT_MAX)
943 strncpy(buf, "-1\n", sizeof(buf));
945 xsprintf(buf, "%" PRIu64 "\n", val);
947 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
949 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
950 "Failed to set memory.limit_in_bytes: %m");
954 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
955 CGroupDeviceAllow *a;
957 /* Changing the devices list of a populated cgroup
958 * might result in EINVAL, hence ignore EINVAL
961 if (c->device_allow || c->device_policy != CGROUP_AUTO)
962 r = cg_set_attribute("devices", path, "devices.deny", "a");
964 r = cg_set_attribute("devices", path, "devices.allow", "a");
966 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
967 "Failed to reset devices.list: %m");
969 if (c->device_policy == CGROUP_CLOSED ||
970 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
971 static const char auto_devices[] =
972 "/dev/null\0" "rwm\0"
973 "/dev/zero\0" "rwm\0"
974 "/dev/full\0" "rwm\0"
975 "/dev/random\0" "rwm\0"
976 "/dev/urandom\0" "rwm\0"
978 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
979 /* Allow /run/elogind/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
980 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
981 "-/run/systemd/inaccessible/chr\0" "rwm\0"
982 "-/run/systemd/inaccessible/blk\0" "rwm\0";
986 NULSTR_FOREACH_PAIR(x, y, auto_devices)
987 whitelist_device(path, x, y);
989 whitelist_major(path, "pts", 'c', "rw");
992 LIST_FOREACH(device_allow, a, c->device_allow) {
1008 if (path_startswith(a->path, "/dev/"))
1009 whitelist_device(path, a->path, acc);
1010 else if ((val = startswith(a->path, "block-")))
1011 whitelist_major(path, val, 'b', acc);
1012 else if ((val = startswith(a->path, "char-")))
1013 whitelist_major(path, val, 'c', acc);
1015 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
1019 if ((apply_mask & CGROUP_MASK_PIDS) && !is_root) {
1021 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1022 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1024 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1025 r = cg_set_attribute("pids", path, "pids.max", buf);
1027 r = cg_set_attribute("pids", path, "pids.max", "max");
1030 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1031 "Failed to set pids.max: %m");
1035 cgroup_apply_firewall(u, c);
1038 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1039 CGroupMask mask = 0;
1041 /* Figure out which controllers we need */
1043 if (c->cpu_accounting ||
1044 cgroup_context_has_cpu_weight(c) ||
1045 cgroup_context_has_cpu_shares(c) ||
1046 c->cpu_quota_per_sec_usec != USEC_INFINITY)
1047 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
1049 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1050 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
1052 if (c->memory_accounting ||
1053 c->memory_limit != CGROUP_LIMIT_MAX ||
1054 cgroup_context_has_unified_memory_config(c))
1055 mask |= CGROUP_MASK_MEMORY;
1057 if (c->device_allow ||
1058 c->device_policy != CGROUP_AUTO)
1059 mask |= CGROUP_MASK_DEVICES;
1061 if (c->tasks_accounting ||
1062 c->tasks_max != (uint64_t) -1)
1063 mask |= CGROUP_MASK_PIDS;
1068 CGroupMask unit_get_own_mask(Unit *u) {
1071 /* Returns the mask of controllers the unit needs for itself */
1073 c = unit_get_cgroup_context(u);
1077 return cgroup_context_get_mask(c) | unit_get_delegate_mask(u);
1080 CGroupMask unit_get_delegate_mask(Unit *u) {
1083 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1084 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1086 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1088 if (u->type == UNIT_SLICE)
1091 c = unit_get_cgroup_context(u);
1098 if (cg_all_unified() <= 0) {
1101 e = unit_get_exec_context(u);
1102 if (e && !exec_context_maintains_privileges(e))
1106 return c->delegate_controllers;
1109 CGroupMask unit_get_members_mask(Unit *u) {
1112 /* Returns the mask of controllers all of the unit's children require, merged */
1114 if (u->cgroup_members_mask_valid)
1115 return u->cgroup_members_mask;
1117 u->cgroup_members_mask = 0;
1119 if (u->type == UNIT_SLICE) {
1124 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
1129 if (UNIT_DEREF(member->slice) != u)
1132 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
1136 u->cgroup_members_mask_valid = true;
1137 return u->cgroup_members_mask;
1140 CGroupMask unit_get_siblings_mask(Unit *u) {
1143 /* Returns the mask of controllers all of the unit's siblings
1144 * require, i.e. the members mask of the unit's parent slice
1145 * if there is one. */
1147 if (UNIT_ISSET(u->slice))
1148 return unit_get_members_mask(UNIT_DEREF(u->slice));
1150 return unit_get_subtree_mask(u); /* we are the top-level slice */
1153 CGroupMask unit_get_subtree_mask(Unit *u) {
1155 /* Returns the mask of this subtree, meaning of the group
1156 * itself and its children. */
1158 return unit_get_own_mask(u) | unit_get_members_mask(u);
1161 CGroupMask unit_get_target_mask(Unit *u) {
1164 /* This returns the cgroup mask of all controllers to enable
1165 * for a specific cgroup, i.e. everything it needs itself,
1166 * plus all that its children need, plus all that its siblings
1167 * need. This is primarily useful on the legacy cgroup
1168 * hierarchy, where we need to duplicate each cgroup in each
1169 * hierarchy that shall be enabled for it. */
1171 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1172 mask &= u->manager->cgroup_supported;
1177 CGroupMask unit_get_enable_mask(Unit *u) {
1180 /* This returns the cgroup mask of all controllers to enable
1181 * for the children of a specific cgroup. This is primarily
1182 * useful for the unified cgroup hierarchy, where each cgroup
1183 * controls which controllers are enabled for its children. */
1185 mask = unit_get_members_mask(u);
1186 mask &= u->manager->cgroup_supported;
1191 bool unit_get_needs_bpf(Unit *u) {
1196 /* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
1198 if (u->type == UNIT_SLICE)
1201 c = unit_get_cgroup_context(u);
1205 if (c->ip_accounting ||
1206 c->ip_address_allow ||
1210 /* If any parent slice has an IP access list defined, it applies too */
1211 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1212 c = unit_get_cgroup_context(p);
1216 if (c->ip_address_allow ||
1224 /* Recurse from a unit up through its containing slices, propagating
1225 * mask bits upward. A unit is also member of itself. */
1226 void unit_update_cgroup_members_masks(Unit *u) {
1232 /* Calculate subtree mask */
1233 m = unit_get_subtree_mask(u);
1235 /* See if anything changed from the previous invocation. If
1236 * not, we're done. */
1237 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1241 u->cgroup_subtree_mask_valid &&
1242 ((m & ~u->cgroup_subtree_mask) != 0) &&
1243 ((~m & u->cgroup_subtree_mask) == 0);
1245 u->cgroup_subtree_mask = m;
1246 u->cgroup_subtree_mask_valid = true;
1248 if (UNIT_ISSET(u->slice)) {
1249 Unit *s = UNIT_DEREF(u->slice);
1252 /* There's more set now than before. We
1253 * propagate the new mask to the parent's mask
1254 * (not caring if it actually was valid or
1257 s->cgroup_members_mask |= m;
1260 /* There's less set now than before (or we
1261 * don't know), we need to recalculate
1262 * everything, so let's invalidate the
1263 * parent's members mask */
1265 s->cgroup_members_mask_valid = false;
1267 /* And now make sure that this change also hits our
1269 unit_update_cgroup_members_masks(s);
1273 static const char *migrate_callback(CGroupMask mask, void *userdata) {
1280 if (u->cgroup_path &&
1281 u->cgroup_realized &&
1282 (u->cgroup_realized_mask & mask) == mask)
1283 return u->cgroup_path;
1285 u = UNIT_DEREF(u->slice);
1291 char *unit_default_cgroup_path(Unit *u) {
1292 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1297 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1298 return strdup(u->manager->cgroup_root);
1300 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1301 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1306 escaped = cg_escape(u->id);
1311 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1314 return strjoin(u->manager->cgroup_root, "/", escaped);
1317 int unit_set_cgroup_path(Unit *u, const char *path) {
1318 _cleanup_free_ char *p = NULL;
1330 if (streq_ptr(u->cgroup_path, p))
1334 r = hashmap_put(u->manager->cgroup_unit, p, u);
1339 unit_release_cgroup(u);
1347 int unit_watch_cgroup(Unit *u) {
1348 _cleanup_free_ char *events = NULL;
1353 if (!u->cgroup_path)
1356 if (u->cgroup_inotify_wd >= 0)
1359 /* Only applies to the unified hierarchy */
1360 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1362 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1366 /* Don't watch the root slice, it's pointless. */
1367 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1370 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1374 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
1378 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1379 if (u->cgroup_inotify_wd < 0) {
1381 if (errno == ENOENT) /* If the directory is already
1382 * gone we don't need to track
1383 * it, so this is not an error */
1386 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1389 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1391 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1396 static int unit_create_cgroup(
1398 CGroupMask target_mask,
1399 CGroupMask enable_mask,
1407 c = unit_get_cgroup_context(u);
1411 if (!u->cgroup_path) {
1412 _cleanup_free_ char *path = NULL;
1414 path = unit_default_cgroup_path(u);
1418 r = unit_set_cgroup_path(u, path);
1420 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1422 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1425 /* First, create our own group */
1426 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
1428 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1430 /* Start watching it */
1431 (void) unit_watch_cgroup(u);
1433 /* Enable all controllers we need */
1434 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1436 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
1438 /* Keep track that this is now realized */
1439 u->cgroup_realized = true;
1440 u->cgroup_realized_mask = target_mask;
1441 u->cgroup_enabled_mask = enable_mask;
1442 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
1444 if (u->type != UNIT_SLICE && !c->delegate) {
1446 /* Then, possibly move things over, but not if
1447 * subgroups may contain processes, which is the case
1448 * for slice and delegation units. */
1449 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1451 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
1457 int unit_attach_pids_to_cgroup(Unit *u) {
1461 r = unit_realize_cgroup(u);
1465 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
1472 static void cgroup_xattr_apply(Unit *u) {
1473 char ids[SD_ID128_STRING_MAX];
1478 if (!MANAGER_IS_SYSTEM(u->manager))
1481 if (sd_id128_is_null(u->invocation_id))
1484 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1485 "trusted.invocation_id",
1486 sd_id128_to_string(u->invocation_id, ids), 32,
1489 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
1492 static bool unit_has_mask_realized(
1494 CGroupMask target_mask,
1495 CGroupMask enable_mask,
1500 return u->cgroup_realized &&
1501 u->cgroup_realized_mask == target_mask &&
1502 u->cgroup_enabled_mask == enable_mask &&
1503 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1504 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
1507 /* Check if necessary controllers and attributes for a unit are in place.
1509 * If so, do nothing.
1510 * If not, create paths, move processes over, and set attributes.
1512 * Returns 0 on success and < 0 on failure. */
1513 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
1514 CGroupMask target_mask, enable_mask;
1515 bool needs_bpf, apply_bpf;
1520 if (u->in_cgroup_realize_queue) {
1521 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1522 u->in_cgroup_realize_queue = false;
1525 target_mask = unit_get_target_mask(u);
1526 enable_mask = unit_get_enable_mask(u);
1527 needs_bpf = unit_get_needs_bpf(u);
1529 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
1532 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1533 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1534 * this will trickle down properly to cgroupfs. */
1535 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1537 /* First, realize parents */
1538 if (UNIT_ISSET(u->slice)) {
1539 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
1544 /* And then do the real work */
1545 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
1549 /* Finally, apply the necessary attributes. */
1550 cgroup_context_apply(u, target_mask, apply_bpf, state);
1551 cgroup_xattr_apply(u);
1556 static void unit_add_to_cgroup_realize_queue(Unit *u) {
1559 if (u->in_cgroup_realize_queue)
1562 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1563 u->in_cgroup_realize_queue = true;
1566 unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
1574 state = manager_state(m);
1576 while ((i = m->cgroup_realize_queue)) {
1577 assert(i->in_cgroup_realize_queue);
1579 r = unit_realize_cgroup_now(i, state);
1581 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
1589 static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
1592 /* This adds the siblings of the specified unit and the
1593 * siblings of all parent units to the cgroup queue. (But
1594 * neither the specified unit itself nor the parents.) */
1596 while ((slice = UNIT_DEREF(u->slice))) {
1601 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
1605 /* Skip units that have a dependency on the slice
1606 * but aren't actually in it. */
1607 if (UNIT_DEREF(m->slice) != slice)
1610 /* No point in doing cgroup application for units
1611 * without active processes. */
1612 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1615 /* If the unit doesn't need any new controllers
1616 * and has current ones realized, it doesn't need
1618 if (unit_has_mask_realized(m,
1619 unit_get_target_mask(m),
1620 unit_get_enable_mask(m),
1621 unit_get_needs_bpf(m)))
1624 unit_add_to_cgroup_realize_queue(m);
1631 int unit_realize_cgroup(Unit *u) {
1634 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1637 /* So, here's the deal: when realizing the cgroups for this
1638 * unit, we need to first create all parents, but there's more
1639 * actually: for the weight-based controllers we also need to
1640 * make sure that all our siblings (i.e. units that are in the
1641 * same slice as we are) have cgroups, too. Otherwise, things
1642 * would become very uneven as each of their processes would
1643 * get as much resources as all our group together. This call
1644 * will synchronously create the parent cgroups, but will
1645 * defer work on the siblings to the next event loop
1648 /* Add all sibling slices to the cgroup queue. */
1649 unit_add_siblings_to_cgroup_realize_queue(u);
1651 /* And realize this one now (and apply the values) */
1652 return unit_realize_cgroup_now(u, manager_state(u->manager));
1655 void unit_release_cgroup(Unit *u) {
1658 /* Forgets all cgroup details for this cgroup */
1660 if (u->cgroup_path) {
1661 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1662 u->cgroup_path = mfree(u->cgroup_path);
1665 if (u->cgroup_inotify_wd >= 0) {
1666 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1667 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1669 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1670 u->cgroup_inotify_wd = -1;
1674 void unit_prune_cgroup(Unit *u) {
1680 /* Removes the cgroup, if empty and possible, and stops watching it. */
1682 if (!u->cgroup_path)
1685 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1687 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1689 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1691 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1698 unit_release_cgroup(u);
1700 u->cgroup_realized = false;
1701 u->cgroup_realized_mask = 0;
1702 u->cgroup_enabled_mask = 0;
1705 int unit_search_main_pid(Unit *u, pid_t *ret) {
1706 _cleanup_fclose_ FILE *f = NULL;
1707 pid_t pid = 0, npid, mypid;
1713 if (!u->cgroup_path)
1716 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1720 mypid = getpid_cached();
1721 while (cg_read_pid(f, &npid) > 0) {
1727 /* Ignore processes that aren't our kids */
1728 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1732 /* Dang, there's more than one daemonized PID
1733 in this group, so we don't know what process
1734 is the main process. */
1745 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1746 _cleanup_closedir_ DIR *d = NULL;
1747 _cleanup_fclose_ FILE *f = NULL;
1753 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1759 while ((r = cg_read_pid(f, &pid)) > 0) {
1760 r = unit_watch_pid(u, pid);
1761 if (r < 0 && ret >= 0)
1765 if (r < 0 && ret >= 0)
1769 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1776 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1777 _cleanup_free_ char *p = NULL;
1779 p = strjoin(path, "/", fn);
1785 r = unit_watch_pids_in_path(u, p);
1786 if (r < 0 && ret >= 0)
1790 if (r < 0 && ret >= 0)
1797 int unit_watch_all_pids(Unit *u) {
1802 /* Adds all PIDs from our cgroup to the set of PIDs we
1803 * watch. This is a fallback logic for cases where we do not
1804 * get reliable cgroup empty notifications: we try to use
1805 * SIGCHLD as replacement. */
1807 if (!u->cgroup_path)
1810 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1813 if (r > 0) /* On unified we can use proper notifications */
1816 return unit_watch_pids_in_path(u, u->cgroup_path);
1819 static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
1820 Manager *m = userdata;
1827 u = m->cgroup_empty_queue;
1831 assert(u->in_cgroup_empty_queue);
1832 u->in_cgroup_empty_queue = false;
1833 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
1835 if (m->cgroup_empty_queue) {
1836 /* More stuff queued, let's make sure we remain enabled */
1837 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
1839 log_debug_errno(r, "Failed to reenable cgroup empty event source: %m");
1842 unit_add_to_gc_queue(u);
1844 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1845 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1850 void unit_add_to_cgroup_empty_queue(Unit *u) {
1855 /* Note that there are four different ways how cgroup empty events reach us:
1857 * 1. On the unified hierarchy we get an inotify event on the cgroup
1859 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
1861 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
1863 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
1864 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
1866 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
1867 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
1868 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
1869 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
1870 * case for scope units). */
1872 if (u->in_cgroup_empty_queue)
1875 /* Let's verify that the cgroup is really empty */
1876 if (!u->cgroup_path)
1878 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1880 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
1886 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
1887 u->in_cgroup_empty_queue = true;
1889 /* Trigger the defer event */
1890 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
1892 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
1895 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1896 Manager *m = userdata;
1903 union inotify_event_buffer buffer;
1904 struct inotify_event *e;
1907 l = read(fd, &buffer, sizeof(buffer));
1909 if (IN_SET(errno, EINTR, EAGAIN))
1912 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1915 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1919 /* Queue overflow has no watch descriptor */
1922 if (e->mask & IN_IGNORED)
1923 /* The watch was just removed */
1926 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1927 if (!u) /* Not that inotify might deliver
1928 * events for a watch even after it
1929 * was removed, because it was queued
1930 * before the removal. Let's ignore
1931 * this here safely. */
1934 unit_add_to_cgroup_empty_queue(u);
1940 int manager_setup_cgroup(Manager *m) {
1941 _cleanup_free_ char *path = NULL;
1942 const char *scope_path;
1949 /* 1. Determine hierarchy */
1950 m->cgroup_root = mfree(m->cgroup_root);
1951 #if 0 /// elogind is not init and must therefore search for PID 1 instead of self.
1952 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
1954 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 1, &m->cgroup_root);
1957 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
1959 #if 0 /// elogind does not support systemd scopes and slices
1960 /* Chop off the init scope, if we are already located in it */
1961 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1963 /* LEGACY: Also chop off the system slice if we are in
1964 * it. This is to support live upgrades from older systemd
1965 * versions where PID 1 was moved there. Also see
1966 * cg_get_root_path(). */
1967 if (!e && MANAGER_IS_SYSTEM(m)) {
1968 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
1970 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
1976 /* And make sure to store away the root value without trailing
1977 * slash, even for the root dir, so that we can easily prepend
1979 while ((e = endswith(m->cgroup_root, "/")))
1981 log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
1982 SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root);
1985 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
1987 return log_error_errno(r, "Cannot find cgroup mount point: %m");
1989 r = cg_unified_flush();
1991 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
1993 all_unified = cg_all_unified();
1994 if (all_unified < 0)
1995 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
1996 if (all_unified > 0)
1997 log_debug("Unified cgroup hierarchy is located at %s.", path);
1999 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2001 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2003 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2005 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2008 #if 0 /// elogind is not init, and does not install the agent here.
2009 /* 3. Allocate cgroup empty defer event source */
2010 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2011 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2013 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2015 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2017 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2019 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2021 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2023 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2025 /* 4. Install notifier inotify object, or agent */
2026 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
2028 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2030 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2031 safe_close(m->cgroup_inotify_fd);
2033 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2034 if (m->cgroup_inotify_fd < 0)
2035 return log_error_errno(errno, "Failed to create control group inotify object: %m");
2037 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2039 return log_error_errno(r, "Failed to watch control group inotify object: %m");
2041 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2042 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2043 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
2045 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
2047 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
2049 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
2051 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2052 * since it does not generate events when control groups with children run empty. */
2054 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
2056 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2058 log_debug("Installed release agent.");
2060 log_debug("Release agent already installed.");
2063 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2064 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2065 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2068 * This method is in core, and normally called by systemd
2069 * being init. As elogind is never init, we can not install
2070 * our agent here. We do so when mounting our cgroup file
2071 * system, so only if elogind is its own tiny controller.
2072 * Further, elogind is not meant to run in systemd init scope. */
2073 if (MANAGER_IS_SYSTEM(m))
2074 // we are our own cgroup controller
2075 scope_path = strjoina("");
2076 else if (streq(m->cgroup_root, "/elogind"))
2077 // root already is our cgroup
2078 scope_path = strjoina(m->cgroup_root);
2080 // we have to create our own group
2081 scope_path = strjoina(m->cgroup_root, "/elogind");
2082 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2085 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
2086 log_debug_elogind("Created control group \"%s\"", scope_path);
2088 #if 0 /// elogind is not a "sub-controller" like systemd, so migration is not needed.
2089 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2090 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2092 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
2095 /* 6. And pin it, so that it cannot be unmounted */
2096 safe_close(m->pin_cgroupfs_fd);
2097 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2098 if (m->pin_cgroupfs_fd < 0)
2099 return log_error_errno(errno, "Failed to open pin file: %m");
2101 /* 7. Always enable hierarchical support if it exists... */
2102 if (!all_unified && m->test_run_flags == 0)
2103 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2105 /* 8. Figure out which controllers are supported, and log about it */
2106 r = cg_mask_supported(&m->cgroup_supported);
2108 return log_error_errno(r, "Failed to determine supported controllers: %m");
2109 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
2110 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
2115 void manager_shutdown_cgroup(Manager *m, bool delete) {
2118 /* We can't really delete the group, since we are in it. But
2120 if (delete && m->cgroup_root)
2121 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2123 #if 0 /// elogind is not init
2124 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2126 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2128 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2129 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
2132 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
2134 m->cgroup_root = mfree(m->cgroup_root);
2137 #if 0 /// UNNEEDED by elogind
2138 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
2145 u = hashmap_get(m->cgroup_unit, cgroup);
2149 p = strdupa(cgroup);
2153 e = strrchr(p, '/');
2155 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
2159 u = hashmap_get(m->cgroup_unit, p);
2165 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
2166 _cleanup_free_ char *cgroup = NULL;
2174 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
2178 return manager_get_unit_by_cgroup(m, cgroup);
2181 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
2190 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2192 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
2196 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
2200 return manager_get_unit_by_pid_cgroup(m, pid);
2204 #if 0 /// elogind must substitute this with its own variant
2205 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2211 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2212 * or from the --system instance */
2214 log_debug("Got cgroup empty notification for: %s", cgroup);
2216 u = manager_get_unit_by_cgroup(m, cgroup);
2220 unit_add_to_cgroup_empty_queue(u);
2224 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2230 log_debug("Got cgroup empty notification for: %s", cgroup);
2232 s = hashmap_get(m->sessions, cgroup);
2235 session_finalize(s);
2238 log_warning("Session not found: %s", cgroup);
2243 #if 0 /// UNNEEDED by elogind
2244 int unit_get_memory_current(Unit *u, uint64_t *ret) {
2245 _cleanup_free_ char *v = NULL;
2251 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
2254 if (!u->cgroup_path)
2257 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
2260 r = cg_all_unified();
2264 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
2266 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
2272 return safe_atou64(v, ret);
2275 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2276 _cleanup_free_ char *v = NULL;
2282 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
2285 if (!u->cgroup_path)
2288 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2291 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2297 return safe_atou64(v, ret);
2300 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2301 _cleanup_free_ char *v = NULL;
2308 if (!u->cgroup_path)
2311 r = cg_all_unified();
2315 const char *keys[] = { "usage_usec", NULL };
2316 _cleanup_free_ char *val = NULL;
2319 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2322 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val);
2326 r = safe_atou64(val, &us);
2330 ns = us * NSEC_PER_USEC;
2332 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2335 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2341 r = safe_atou64(v, &ns);
2350 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2356 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2357 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2358 * call this function with a NULL return value. */
2360 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
2363 r = unit_get_cpu_usage_raw(u, &ns);
2364 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2365 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2369 *ret = u->cpu_usage_last;
2375 if (ns > u->cpu_usage_base)
2376 ns -= u->cpu_usage_base;
2380 u->cpu_usage_last = ns;
2387 int unit_get_ip_accounting(
2389 CGroupIPAccountingMetric metric,
2396 assert(metric >= 0);
2397 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2400 /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
2401 * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
2402 * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
2404 if (u->type == UNIT_SLICE)
2407 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
2410 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2411 u->ip_accounting_ingress_map_fd :
2412 u->ip_accounting_egress_map_fd;
2417 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2418 r = bpf_firewall_read_accounting(fd, &value, NULL);
2420 r = bpf_firewall_read_accounting(fd, NULL, &value);
2424 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2425 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2426 * ip_accounting_extra[] field, and add them in here transparently. */
2428 *ret = value + u->ip_accounting_extra[metric];
2433 int unit_reset_cpu_accounting(Unit *u) {
2439 u->cpu_usage_last = NSEC_INFINITY;
2441 r = unit_get_cpu_usage_raw(u, &ns);
2443 u->cpu_usage_base = 0;
2447 u->cpu_usage_base = ns;
2451 int unit_reset_ip_accounting(Unit *u) {
2456 if (u->ip_accounting_ingress_map_fd >= 0)
2457 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2459 if (u->ip_accounting_egress_map_fd >= 0)
2460 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2462 zero(u->ip_accounting_extra);
2464 return r < 0 ? r : q;
2467 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2470 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2476 /* always invalidate compat pairs together */
2477 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2478 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2480 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2481 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2483 if ((u->cgroup_realized_mask & m) == 0)
2486 u->cgroup_realized_mask &= ~m;
2487 unit_add_to_cgroup_realize_queue(u);
2490 void unit_invalidate_cgroup_bpf(Unit *u) {
2493 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2496 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED)
2499 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
2500 unit_add_to_cgroup_realize_queue(u);
2502 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2503 * list of our children includes our own. */
2504 if (u->type == UNIT_SLICE) {
2509 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
2513 if (UNIT_DEREF(member->slice) != u)
2516 unit_invalidate_cgroup_bpf(member);
2521 void manager_invalidate_startup_units(Manager *m) {
2527 SET_FOREACH(u, m->startup_units, i)
2528 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
2531 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2532 [CGROUP_AUTO] = "auto",
2533 [CGROUP_CLOSED] = "closed",
2534 [CGROUP_STRICT] = "strict",
2537 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);