2 This file is part of systemd.
4 Copyright 2013 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include "alloc-util.h"
24 #include "cgroup-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
32 //#include "special.h"
33 #include "string-table.h"
34 #include "string-util.h"
36 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
38 #if 0 /// UNNEEDED by elogind
39 void cgroup_context_init(CGroupContext *c) {
42 /* Initialize everything to the kernel defaults, assuming the
43 * structure is preinitialized to 0 */
45 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
46 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
47 c->cpu_quota_per_sec_usec = USEC_INFINITY;
49 c->memory_limit = (uint64_t) -1;
51 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
52 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
54 c->tasks_max = (uint64_t) -1;
57 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
61 LIST_REMOVE(device_allow, c->device_allow, a);
66 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
70 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
75 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
79 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
84 void cgroup_context_done(CGroupContext *c) {
87 while (c->blockio_device_weights)
88 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
90 while (c->blockio_device_bandwidths)
91 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
93 while (c->device_allow)
94 cgroup_context_free_device_allow(c, c->device_allow);
97 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
98 CGroupBlockIODeviceBandwidth *b;
99 CGroupBlockIODeviceWeight *w;
100 CGroupDeviceAllow *a;
101 char u[FORMAT_TIMESPAN_MAX];
106 prefix = strempty(prefix);
109 "%sCPUAccounting=%s\n"
110 "%sBlockIOAccounting=%s\n"
111 "%sMemoryAccounting=%s\n"
112 "%sTasksAccounting=%s\n"
113 "%sCPUShares=%" PRIu64 "\n"
114 "%sStartupCPUShares=%" PRIu64 "\n"
115 "%sCPUQuotaPerSecSec=%s\n"
116 "%sBlockIOWeight=%" PRIu64 "\n"
117 "%sStartupBlockIOWeight=%" PRIu64 "\n"
118 "%sMemoryLimit=%" PRIu64 "\n"
119 "%sTasksMax=%" PRIu64 "\n"
120 "%sDevicePolicy=%s\n"
122 prefix, yes_no(c->cpu_accounting),
123 prefix, yes_no(c->blockio_accounting),
124 prefix, yes_no(c->memory_accounting),
125 prefix, yes_no(c->tasks_accounting),
126 prefix, c->cpu_shares,
127 prefix, c->startup_cpu_shares,
128 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
129 prefix, c->blockio_weight,
130 prefix, c->startup_blockio_weight,
131 prefix, c->memory_limit,
132 prefix, c->tasks_max,
133 prefix, cgroup_device_policy_to_string(c->device_policy),
134 prefix, yes_no(c->delegate));
136 LIST_FOREACH(device_allow, a, c->device_allow)
138 "%sDeviceAllow=%s %s%s%s\n",
141 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
143 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
145 "%sBlockIODeviceWeight=%s %" PRIu64,
150 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
151 char buf[FORMAT_BYTES_MAX];
156 b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
158 format_bytes(buf, sizeof(buf), b->bandwidth));
162 static int lookup_blkio_device(const char *p, dev_t *dev) {
171 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
173 if (S_ISBLK(st.st_mode))
175 else if (major(st.st_dev) != 0) {
176 /* If this is not a device node then find the block
177 * device this file is stored on */
180 /* If this is a partition, try to get the originating
182 block_get_whole_disk(*dev, dev);
184 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
191 static int whitelist_device(const char *path, const char *node, const char *acc) {
192 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
199 if (stat(node, &st) < 0) {
200 log_warning("Couldn't stat device %s", node);
204 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
205 log_warning("%s is not a device.", node);
211 S_ISCHR(st.st_mode) ? 'c' : 'b',
212 major(st.st_rdev), minor(st.st_rdev),
215 r = cg_set_attribute("devices", path, "devices.allow", buf);
217 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
218 "Failed to set devices.allow on %s: %m", path);
223 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
224 _cleanup_fclose_ FILE *f = NULL;
231 assert(type == 'b' || type == 'c');
233 f = fopen("/proc/devices", "re");
235 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
237 FOREACH_LINE(line, f, goto fail) {
238 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
243 if (type == 'c' && streq(line, "Character devices:")) {
248 if (type == 'b' && streq(line, "Block devices:")) {
263 w = strpbrk(p, WHITESPACE);
268 r = safe_atou(p, &maj);
275 w += strspn(w, WHITESPACE);
277 if (fnmatch(name, w, 0) != 0)
286 r = cg_set_attribute("devices", path, "devices.allow", buf);
288 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
289 "Failed to set devices.allow on %s: %m", path);
295 log_warning_errno(errno, "Failed to read /proc/devices: %m");
299 void cgroup_context_apply(CGroupContext *c, CGroupMask mask, const char *path, ManagerState state) {
309 /* Some cgroup attributes are not supported on the root cgroup,
310 * hence silently ignore */
311 is_root = isempty(path) || path_equal(path, "/");
313 /* Make sure we don't try to display messages with an empty path. */
316 /* We generally ignore errors caused by read-only mounted
317 * cgroup trees (assuming we are running in a container then),
318 * and missing cgroups, i.e. EROFS and ENOENT. */
320 if ((mask & CGROUP_MASK_CPU) && !is_root) {
321 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
323 sprintf(buf, "%" PRIu64 "\n",
324 IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ? c->startup_cpu_shares :
325 c->cpu_shares != CGROUP_CPU_SHARES_INVALID ? c->cpu_shares : CGROUP_CPU_SHARES_DEFAULT);
326 r = cg_set_attribute("cpu", path, "cpu.shares", buf);
328 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
329 "Failed to set cpu.shares on %s: %m", path);
331 sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
332 r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf);
334 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
335 "Failed to set cpu.cfs_period_us on %s: %m", path);
337 if (c->cpu_quota_per_sec_usec != USEC_INFINITY) {
338 sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
339 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf);
341 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1");
343 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
344 "Failed to set cpu.cfs_quota_us on %s: %m", path);
347 if (mask & CGROUP_MASK_BLKIO) {
348 char buf[MAX(DECIMAL_STR_MAX(uint64_t)+1,
349 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
350 CGroupBlockIODeviceWeight *w;
351 CGroupBlockIODeviceBandwidth *b;
354 sprintf(buf, "%" PRIu64 "\n",
355 IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ? c->startup_blockio_weight :
356 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ? c->blockio_weight : CGROUP_BLKIO_WEIGHT_DEFAULT);
357 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
359 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
360 "Failed to set blkio.weight on %s: %m", path);
362 /* FIXME: no way to reset this list */
363 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
366 r = lookup_blkio_device(w->path, &dev);
370 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), w->weight);
371 r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
373 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
374 "Failed to set blkio.weight_device on %s: %m", path);
378 /* FIXME: no way to reset this list */
379 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
383 r = lookup_blkio_device(b->path, &dev);
387 a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
389 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
390 r = cg_set_attribute("blkio", path, a, buf);
392 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
393 "Failed to set %s on %s: %m", a, path);
397 if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
398 if (c->memory_limit != (uint64_t) -1) {
399 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
401 sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
403 if (cg_unified() <= 0)
404 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
406 r = cg_set_attribute("memory", path, "memory.max", buf);
409 if (cg_unified() <= 0)
410 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
412 r = cg_set_attribute("memory", path, "memory.max", "max");
416 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
417 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path);
420 if ((mask & CGROUP_MASK_DEVICES) && !is_root) {
421 CGroupDeviceAllow *a;
423 /* Changing the devices list of a populated cgroup
424 * might result in EINVAL, hence ignore EINVAL
427 if (c->device_allow || c->device_policy != CGROUP_AUTO)
428 r = cg_set_attribute("devices", path, "devices.deny", "a");
430 r = cg_set_attribute("devices", path, "devices.allow", "a");
432 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
433 "Failed to reset devices.list on %s: %m", path);
435 if (c->device_policy == CGROUP_CLOSED ||
436 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
437 static const char auto_devices[] =
438 "/dev/null\0" "rwm\0"
439 "/dev/zero\0" "rwm\0"
440 "/dev/full\0" "rwm\0"
441 "/dev/random\0" "rwm\0"
442 "/dev/urandom\0" "rwm\0"
444 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
448 NULSTR_FOREACH_PAIR(x, y, auto_devices)
449 whitelist_device(path, x, y);
451 whitelist_major(path, "pts", 'c', "rw");
452 whitelist_major(path, "kdbus", 'c', "rw");
453 whitelist_major(path, "kdbus/*", 'c', "rw");
456 LIST_FOREACH(device_allow, a, c->device_allow) {
472 if (startswith(a->path, "/dev/"))
473 whitelist_device(path, a->path, acc);
474 else if (startswith(a->path, "block-"))
475 whitelist_major(path, a->path + 6, 'b', acc);
476 else if (startswith(a->path, "char-"))
477 whitelist_major(path, a->path + 5, 'c', acc);
479 log_debug("Ignoring device %s while writing cgroup attribute.", a->path);
483 if ((mask & CGROUP_MASK_PIDS) && !is_root) {
485 if (c->tasks_max != (uint64_t) -1) {
486 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
488 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
489 r = cg_set_attribute("pids", path, "pids.max", buf);
491 r = cg_set_attribute("pids", path, "pids.max", "max");
494 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
495 "Failed to set pids.max on %s: %m", path);
499 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
502 /* Figure out which controllers we need */
504 if (c->cpu_accounting ||
505 c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
506 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ||
507 c->cpu_quota_per_sec_usec != USEC_INFINITY)
508 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
510 if (c->blockio_accounting ||
511 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
512 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
513 c->blockio_device_weights ||
514 c->blockio_device_bandwidths)
515 mask |= CGROUP_MASK_BLKIO;
517 if (c->memory_accounting ||
518 c->memory_limit != (uint64_t) -1)
519 mask |= CGROUP_MASK_MEMORY;
521 if (c->device_allow ||
522 c->device_policy != CGROUP_AUTO)
523 mask |= CGROUP_MASK_DEVICES;
525 if (c->tasks_accounting ||
526 c->tasks_max != (uint64_t) -1)
527 mask |= CGROUP_MASK_PIDS;
532 CGroupMask unit_get_own_mask(Unit *u) {
535 /* Returns the mask of controllers the unit needs for itself */
537 c = unit_get_cgroup_context(u);
541 /* If delegation is turned on, then turn on all cgroups,
542 * unless we are on the legacy hierarchy and the process we
543 * fork into it is known to drop privileges, and hence
544 * shouldn't get access to the controllers.
546 * Note that on the unified hierarchy it is safe to delegate
547 * controllers to unprivileged services. */
552 e = unit_get_exec_context(u);
554 exec_context_maintains_privileges(e) ||
556 return _CGROUP_MASK_ALL;
559 return cgroup_context_get_mask(c);
562 CGroupMask unit_get_members_mask(Unit *u) {
565 /* Returns the mask of controllers all of the unit's children
568 if (u->cgroup_members_mask_valid)
569 return u->cgroup_members_mask;
571 u->cgroup_members_mask = 0;
573 if (u->type == UNIT_SLICE) {
577 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
582 if (UNIT_DEREF(member->slice) != u)
585 u->cgroup_members_mask |=
586 unit_get_own_mask(member) |
587 unit_get_members_mask(member);
591 u->cgroup_members_mask_valid = true;
592 return u->cgroup_members_mask;
595 CGroupMask unit_get_siblings_mask(Unit *u) {
598 /* Returns the mask of controllers all of the unit's siblings
599 * require, i.e. the members mask of the unit's parent slice
600 * if there is one. */
602 if (UNIT_ISSET(u->slice))
603 return unit_get_members_mask(UNIT_DEREF(u->slice));
605 return unit_get_own_mask(u) | unit_get_members_mask(u);
608 CGroupMask unit_get_subtree_mask(Unit *u) {
610 /* Returns the mask of this subtree, meaning of the group
611 * itself and its children. */
613 return unit_get_own_mask(u) | unit_get_members_mask(u);
616 CGroupMask unit_get_target_mask(Unit *u) {
619 /* This returns the cgroup mask of all controllers to enable
620 * for a specific cgroup, i.e. everything it needs itself,
621 * plus all that its children need, plus all that its siblings
622 * need. This is primarily useful on the legacy cgroup
623 * hierarchy, where we need to duplicate each cgroup in each
624 * hierarchy that shall be enabled for it. */
626 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
627 mask &= u->manager->cgroup_supported;
632 CGroupMask unit_get_enable_mask(Unit *u) {
635 /* This returns the cgroup mask of all controllers to enable
636 * for the children of a specific cgroup. This is primarily
637 * useful for the unified cgroup hierarchy, where each cgroup
638 * controls which controllers are enabled for its children. */
640 mask = unit_get_members_mask(u);
641 mask &= u->manager->cgroup_supported;
646 /* Recurse from a unit up through its containing slices, propagating
647 * mask bits upward. A unit is also member of itself. */
648 void unit_update_cgroup_members_masks(Unit *u) {
654 /* Calculate subtree mask */
655 m = unit_get_subtree_mask(u);
657 /* See if anything changed from the previous invocation. If
658 * not, we're done. */
659 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
663 u->cgroup_subtree_mask_valid &&
664 ((m & ~u->cgroup_subtree_mask) != 0) &&
665 ((~m & u->cgroup_subtree_mask) == 0);
667 u->cgroup_subtree_mask = m;
668 u->cgroup_subtree_mask_valid = true;
670 if (UNIT_ISSET(u->slice)) {
671 Unit *s = UNIT_DEREF(u->slice);
674 /* There's more set now than before. We
675 * propagate the new mask to the parent's mask
676 * (not caring if it actually was valid or
679 s->cgroup_members_mask |= m;
682 /* There's less set now than before (or we
683 * don't know), we need to recalculate
684 * everything, so let's invalidate the
685 * parent's members mask */
687 s->cgroup_members_mask_valid = false;
689 /* And now make sure that this change also hits our
691 unit_update_cgroup_members_masks(s);
695 static const char *migrate_callback(CGroupMask mask, void *userdata) {
702 if (u->cgroup_path &&
703 u->cgroup_realized &&
704 (u->cgroup_realized_mask & mask) == mask)
705 return u->cgroup_path;
707 u = UNIT_DEREF(u->slice);
713 char *unit_default_cgroup_path(Unit *u) {
714 _cleanup_free_ char *escaped = NULL, *slice = NULL;
719 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
720 return strdup(u->manager->cgroup_root);
722 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
723 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
728 escaped = cg_escape(u->id);
733 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
735 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
738 int unit_set_cgroup_path(Unit *u, const char *path) {
739 _cleanup_free_ char *p = NULL;
751 if (streq_ptr(u->cgroup_path, p))
755 r = hashmap_put(u->manager->cgroup_unit, p, u);
760 unit_release_cgroup(u);
768 int unit_watch_cgroup(Unit *u) {
769 _cleanup_free_ char *events = NULL;
777 if (u->cgroup_inotify_wd >= 0)
780 /* Only applies to the unified hierarchy */
783 return log_unit_error_errno(u, r, "Failed detect wether the unified hierarchy is used: %m");
787 /* Don't watch the root slice, it's pointless. */
788 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
791 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
795 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
799 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
800 if (u->cgroup_inotify_wd < 0) {
802 if (errno == ENOENT) /* If the directory is already
803 * gone we don't need to track
804 * it, so this is not an error */
807 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
810 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
812 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
817 static int unit_create_cgroup(
819 CGroupMask target_mask,
820 CGroupMask enable_mask) {
827 c = unit_get_cgroup_context(u);
831 if (!u->cgroup_path) {
832 _cleanup_free_ char *path = NULL;
834 path = unit_default_cgroup_path(u);
838 r = unit_set_cgroup_path(u, path);
840 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
842 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
845 /* First, create our own group */
846 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
848 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
850 /* Start watching it */
851 (void) unit_watch_cgroup(u);
853 /* Enable all controllers we need */
854 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
856 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
858 /* Keep track that this is now realized */
859 u->cgroup_realized = true;
860 u->cgroup_realized_mask = target_mask;
862 if (u->type != UNIT_SLICE && !c->delegate) {
864 /* Then, possibly move things over, but not if
865 * subgroups may contain processes, which is the case
866 * for slice and delegation units. */
867 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
869 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
875 int unit_attach_pids_to_cgroup(Unit *u) {
879 r = unit_realize_cgroup(u);
883 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
890 static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask) {
893 return u->cgroup_realized && u->cgroup_realized_mask == target_mask;
896 /* Check if necessary controllers and attributes for a unit are in place.
899 * If not, create paths, move processes over, and set attributes.
901 * Returns 0 on success and < 0 on failure. */
902 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
903 CGroupMask target_mask, enable_mask;
908 if (u->in_cgroup_queue) {
909 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
910 u->in_cgroup_queue = false;
913 target_mask = unit_get_target_mask(u);
914 if (unit_has_mask_realized(u, target_mask))
917 /* First, realize parents */
918 if (UNIT_ISSET(u->slice)) {
919 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
924 /* And then do the real work */
925 enable_mask = unit_get_enable_mask(u);
926 r = unit_create_cgroup(u, target_mask, enable_mask);
930 /* Finally, apply the necessary attributes. */
931 cgroup_context_apply(unit_get_cgroup_context(u), target_mask, u->cgroup_path, state);
936 static void unit_add_to_cgroup_queue(Unit *u) {
938 if (u->in_cgroup_queue)
941 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
942 u->in_cgroup_queue = true;
945 unsigned manager_dispatch_cgroup_queue(Manager *m) {
951 state = manager_state(m);
953 while ((i = m->cgroup_queue)) {
954 assert(i->in_cgroup_queue);
956 r = unit_realize_cgroup_now(i, state);
958 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
966 static void unit_queue_siblings(Unit *u) {
969 /* This adds the siblings of the specified unit and the
970 * siblings of all parent units to the cgroup queue. (But
971 * neither the specified unit itself nor the parents.) */
973 while ((slice = UNIT_DEREF(u->slice))) {
977 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
981 /* Skip units that have a dependency on the slice
982 * but aren't actually in it. */
983 if (UNIT_DEREF(m->slice) != slice)
986 /* No point in doing cgroup application for units
987 * without active processes. */
988 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
991 /* If the unit doesn't need any new controllers
992 * and has current ones realized, it doesn't need
994 if (unit_has_mask_realized(m, unit_get_target_mask(m)))
997 unit_add_to_cgroup_queue(m);
1004 int unit_realize_cgroup(Unit *u) {
1007 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1010 /* So, here's the deal: when realizing the cgroups for this
1011 * unit, we need to first create all parents, but there's more
1012 * actually: for the weight-based controllers we also need to
1013 * make sure that all our siblings (i.e. units that are in the
1014 * same slice as we are) have cgroups, too. Otherwise, things
1015 * would become very uneven as each of their processes would
1016 * get as much resources as all our group together. This call
1017 * will synchronously create the parent cgroups, but will
1018 * defer work on the siblings to the next event loop
1021 /* Add all sibling slices to the cgroup queue. */
1022 unit_queue_siblings(u);
1024 /* And realize this one now (and apply the values) */
1025 return unit_realize_cgroup_now(u, manager_state(u->manager));
1028 void unit_release_cgroup(Unit *u) {
1031 /* Forgets all cgroup details for this cgroup */
1033 if (u->cgroup_path) {
1034 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1035 u->cgroup_path = mfree(u->cgroup_path);
1038 if (u->cgroup_inotify_wd >= 0) {
1039 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1040 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1042 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1043 u->cgroup_inotify_wd = -1;
1047 void unit_prune_cgroup(Unit *u) {
1053 /* Removes the cgroup, if empty and possible, and stops watching it. */
1055 if (!u->cgroup_path)
1058 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1060 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1062 log_debug_errno(r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1069 unit_release_cgroup(u);
1071 u->cgroup_realized = false;
1072 u->cgroup_realized_mask = 0;
1075 int unit_search_main_pid(Unit *u, pid_t *ret) {
1076 _cleanup_fclose_ FILE *f = NULL;
1077 pid_t pid = 0, npid, mypid;
1083 if (!u->cgroup_path)
1086 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1091 while (cg_read_pid(f, &npid) > 0) {
1097 /* Ignore processes that aren't our kids */
1098 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1102 /* Dang, there's more than one daemonized PID
1103 in this group, so we don't know what process
1104 is the main process. */
1115 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1116 _cleanup_closedir_ DIR *d = NULL;
1117 _cleanup_fclose_ FILE *f = NULL;
1123 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1129 while ((r = cg_read_pid(f, &pid)) > 0) {
1130 r = unit_watch_pid(u, pid);
1131 if (r < 0 && ret >= 0)
1135 if (r < 0 && ret >= 0)
1139 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1146 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1147 _cleanup_free_ char *p = NULL;
1149 p = strjoin(path, "/", fn, NULL);
1155 r = unit_watch_pids_in_path(u, p);
1156 if (r < 0 && ret >= 0)
1160 if (r < 0 && ret >= 0)
1167 int unit_watch_all_pids(Unit *u) {
1170 /* Adds all PIDs from our cgroup to the set of PIDs we
1171 * watch. This is a fallback logic for cases where we do not
1172 * get reliable cgroup empty notifications: we try to use
1173 * SIGCHLD as replacement. */
1175 if (!u->cgroup_path)
1178 if (cg_unified() > 0) /* On unified we can use proper notifications */
1181 return unit_watch_pids_in_path(u, u->cgroup_path);
1184 int unit_notify_cgroup_empty(Unit *u) {
1189 if (!u->cgroup_path)
1192 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1196 unit_add_to_gc_queue(u);
1198 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1199 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1204 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1205 Manager *m = userdata;
1212 union inotify_event_buffer buffer;
1213 struct inotify_event *e;
1216 l = read(fd, &buffer, sizeof(buffer));
1218 if (errno == EINTR || errno == EAGAIN)
1221 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1224 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1228 /* Queue overflow has no watch descriptor */
1231 if (e->mask & IN_IGNORED)
1232 /* The watch was just removed */
1235 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1236 if (!u) /* Not that inotify might deliver
1237 * events for a watch even after it
1238 * was removed, because it was queued
1239 * before the removal. Let's ignore
1240 * this here safely. */
1243 (void) unit_notify_cgroup_empty(u);
1249 int manager_setup_cgroup(Manager *m) {
1250 _cleanup_free_ char *path = NULL;
1257 /* 1. Determine hierarchy */
1258 m->cgroup_root = mfree(m->cgroup_root);
1259 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
1261 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
1263 #if 0 /// elogind does not support systemd scopes and slices
1264 /* Chop off the init scope, if we are already located in it */
1265 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1267 /* LEGACY: Also chop off the system slice if we are in
1268 * it. This is to support live upgrades from older systemd
1269 * versions where PID 1 was moved there. Also see
1270 * cg_get_root_path(). */
1271 if (!e && m->running_as == MANAGER_SYSTEM) {
1272 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
1274 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
1280 /* And make sure to store away the root value without trailing
1281 * slash, even for the root dir, so that we can easily prepend
1283 while ((e = endswith(m->cgroup_root, "/")))
1285 log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
1286 SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root);
1289 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
1291 return log_error_errno(r, "Cannot find cgroup mount point: %m");
1293 unified = cg_unified();
1295 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
1297 log_debug("Unified cgroup hierarchy is located at %s.", path);
1299 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
1302 const char *scope_path;
1304 /* 3. Install agent */
1307 /* In the unified hierarchy we can can get
1308 * cgroup empty notifications via inotify. */
1310 #if 0 /// elogind does not support the unified hierarchy, yet.
1311 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1312 safe_close(m->cgroup_inotify_fd);
1314 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
1315 if (m->cgroup_inotify_fd < 0)
1316 return log_error_errno(errno, "Failed to create control group inotify object: %m");
1318 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
1320 return log_error_errno(r, "Failed to watch control group inotify object: %m");
1322 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_IDLE - 5);
1324 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
1326 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
1329 return log_error_errno(EOPNOTSUPP, "Unified cgroup hierarchy not supported: %m");
1331 } else if (m->running_as == MANAGER_SYSTEM) {
1333 /* On the legacy hierarchy we only get
1334 * notifications via cgroup agents. (Which
1335 * isn't really reliable, since it does not
1336 * generate events when control groups with
1337 * children run empty. */
1339 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, ELOGIND_CGROUP_AGENT_PATH);
1341 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
1343 log_debug("Installed release agent.");
1345 log_debug("Release agent already installed.");
1348 #if 0 /// elogind is not meant to run in systemd init scope
1349 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1350 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1351 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1353 if (streq(SYSTEMD_CGROUP_CONTROLLER, "name=elogind"))
1354 // we are our own cgroup controller
1355 scope_path = strjoina("");
1356 else if (streq(m->cgroup_root, "/elogind"))
1357 // root already is our cgroup
1358 scope_path = strjoina(m->cgroup_root);
1360 // we have to create our own group
1361 scope_path = strjoina(m->cgroup_root, "/elogind");
1362 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1365 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
1366 log_debug_elogind("Created control group \"%s\"", scope_path);
1368 /* also, move all other userspace processes remaining
1369 * in the root cgroup into that scope. */
1370 if (!streq(m->cgroup_root, scope_path)) {
1371 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, false);
1373 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
1376 /* 5. And pin it, so that it cannot be unmounted */
1377 safe_close(m->pin_cgroupfs_fd);
1378 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
1379 if (m->pin_cgroupfs_fd < 0)
1380 return log_error_errno(errno, "Failed to open pin file: %m");
1382 /* 6. Always enable hierarchical support if it exists... */
1384 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1387 /* 7. Figure out which controllers are supported */
1388 r = cg_mask_supported(&m->cgroup_supported);
1390 return log_error_errno(r, "Failed to determine supported controllers: %m");
1392 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
1393 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & c));
1398 void manager_shutdown_cgroup(Manager *m, bool delete) {
1401 /* We can't really delete the group, since we are in it. But
1403 if (delete && m->cgroup_root)
1404 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
1406 #if 0 /// elogind does not support the unified hierarchy, yet.
1407 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
1409 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1410 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
1413 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
1415 m->cgroup_root = mfree(m->cgroup_root);
1418 #if 0 /// UNNEEDED by elogind
1419 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
1426 u = hashmap_get(m->cgroup_unit, cgroup);
1430 p = strdupa(cgroup);
1434 e = strrchr(p, '/');
1436 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
1440 u = hashmap_get(m->cgroup_unit, p);
1446 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
1447 _cleanup_free_ char *cgroup = NULL;
1455 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
1459 return manager_get_unit_by_cgroup(m, cgroup);
1462 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
1471 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
1473 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
1477 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
1481 return manager_get_unit_by_pid_cgroup(m, pid);
1484 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1490 u = manager_get_unit_by_cgroup(m, cgroup);
1494 return unit_notify_cgroup_empty(u);
1497 int unit_get_memory_current(Unit *u, uint64_t *ret) {
1498 _cleanup_free_ char *v = NULL;
1504 if (!u->cgroup_path)
1507 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
1510 if (cg_unified() <= 0)
1511 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
1513 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
1519 return safe_atou64(v, ret);
1522 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
1523 _cleanup_free_ char *v = NULL;
1529 if (!u->cgroup_path)
1532 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
1535 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
1541 return safe_atou64(v, ret);
1544 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
1545 _cleanup_free_ char *v = NULL;
1552 if (!u->cgroup_path)
1555 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
1558 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
1564 r = safe_atou64(v, &ns);
1572 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
1576 r = unit_get_cpu_usage_raw(u, &ns);
1580 if (ns > u->cpuacct_usage_base)
1581 ns -= u->cpuacct_usage_base;
1589 int unit_reset_cpu_usage(Unit *u) {
1595 r = unit_get_cpu_usage_raw(u, &ns);
1597 u->cpuacct_usage_base = 0;
1601 u->cpuacct_usage_base = ns;
1605 bool unit_cgroup_delegate(Unit *u) {
1610 c = unit_get_cgroup_context(u);
1617 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
1620 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1626 if ((u->cgroup_realized_mask & m) == 0)
1629 u->cgroup_realized_mask &= ~m;
1630 unit_add_to_cgroup_queue(u);
1633 void manager_invalidate_startup_units(Manager *m) {
1639 SET_FOREACH(u, m->startup_units, i)
1640 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_BLKIO);
1643 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
1644 [CGROUP_AUTO] = "auto",
1645 [CGROUP_CLOSED] = "closed",
1646 [CGROUP_STRICT] = "strict",
1649 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);