1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include "path-util.h"
27 #include "cgroup-util.h"
30 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
32 void cgroup_context_init(CGroupContext *c) {
35 /* Initialize everything to the kernel defaults, assuming the
36 * structure is preinitialized to 0 */
38 c->cpu_shares = (unsigned long) -1;
39 c->startup_cpu_shares = (unsigned long) -1;
40 c->memory_limit = (uint64_t) -1;
41 c->blockio_weight = (unsigned long) -1;
42 c->startup_blockio_weight = (unsigned long) -1;
44 c->cpu_quota_per_sec_usec = USEC_INFINITY;
47 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
51 LIST_REMOVE(device_allow, c->device_allow, a);
56 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
60 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
65 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
69 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
74 void cgroup_context_done(CGroupContext *c) {
77 while (c->blockio_device_weights)
78 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
80 while (c->blockio_device_bandwidths)
81 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
83 while (c->device_allow)
84 cgroup_context_free_device_allow(c, c->device_allow);
87 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
88 CGroupBlockIODeviceBandwidth *b;
89 CGroupBlockIODeviceWeight *w;
91 char u[FORMAT_TIMESPAN_MAX];
96 prefix = strempty(prefix);
99 "%sCPUAccounting=%s\n"
100 "%sBlockIOAccounting=%s\n"
101 "%sMemoryAccounting=%s\n"
103 "%sStartupCPUShares=%lu\n"
104 "%sCPUQuotaPerSecSec=%s\n"
105 "%sBlockIOWeight=%lu\n"
106 "%sStartupBlockIOWeight=%lu\n"
107 "%sMemoryLimit=%" PRIu64 "\n"
108 "%sDevicePolicy=%s\n"
110 prefix, yes_no(c->cpu_accounting),
111 prefix, yes_no(c->blockio_accounting),
112 prefix, yes_no(c->memory_accounting),
113 prefix, c->cpu_shares,
114 prefix, c->startup_cpu_shares,
115 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
116 prefix, c->blockio_weight,
117 prefix, c->startup_blockio_weight,
118 prefix, c->memory_limit,
119 prefix, cgroup_device_policy_to_string(c->device_policy),
120 prefix, yes_no(c->delegate));
122 LIST_FOREACH(device_allow, a, c->device_allow)
124 "%sDeviceAllow=%s %s%s%s\n",
127 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
129 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
131 "%sBlockIODeviceWeight=%s %lu",
136 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
137 char buf[FORMAT_BYTES_MAX];
142 b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
144 format_bytes(buf, sizeof(buf), b->bandwidth));
148 static int lookup_blkio_device(const char *p, dev_t *dev) {
157 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
159 if (S_ISBLK(st.st_mode))
161 else if (major(st.st_dev) != 0) {
162 /* If this is not a device node then find the block
163 * device this file is stored on */
166 /* If this is a partition, try to get the originating
168 block_get_whole_disk(*dev, dev);
170 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
177 static int whitelist_device(const char *path, const char *node, const char *acc) {
178 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
185 if (stat(node, &st) < 0) {
186 log_warning("Couldn't stat device %s", node);
190 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
191 log_warning("%s is not a device.", node);
197 S_ISCHR(st.st_mode) ? 'c' : 'b',
198 major(st.st_rdev), minor(st.st_rdev),
201 r = cg_set_attribute("devices", path, "devices.allow", buf);
203 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
204 "Failed to set devices.allow on %s: %m", path);
209 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
210 _cleanup_fclose_ FILE *f = NULL;
217 assert(type == 'b' || type == 'c');
219 f = fopen("/proc/devices", "re");
221 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
223 FOREACH_LINE(line, f, goto fail) {
224 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
229 if (type == 'c' && streq(line, "Character devices:")) {
234 if (type == 'b' && streq(line, "Block devices:")) {
249 w = strpbrk(p, WHITESPACE);
254 r = safe_atou(p, &maj);
261 w += strspn(w, WHITESPACE);
263 if (fnmatch(name, w, 0) != 0)
272 r = cg_set_attribute("devices", path, "devices.allow", buf);
274 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
275 "Failed to set devices.allow on %s: %m", path);
281 log_warning_errno(errno, "Failed to read /proc/devices: %m");
285 void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const char *path, ManagerState state) {
295 /* Some cgroup attributes are not support on the root cgroup,
296 * hence silently ignore */
297 is_root = isempty(path) || path_equal(path, "/");
299 /* We generally ignore errors caused by read-only mounted
300 * cgroup trees (assuming we are running in a container then),
301 * and missing cgroups, i.e. EROFS and ENOENT. */
303 if ((mask & CGROUP_CPU) && !is_root) {
304 char buf[MAX(DECIMAL_STR_MAX(unsigned long), DECIMAL_STR_MAX(usec_t)) + 1];
306 sprintf(buf, "%lu\n",
307 IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_cpu_shares != (unsigned long) -1 ? c->startup_cpu_shares :
308 c->cpu_shares != (unsigned long) -1 ? c->cpu_shares : 1024);
309 r = cg_set_attribute("cpu", path, "cpu.shares", buf);
311 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
312 "Failed to set cpu.shares on %s: %m", path);
314 sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
315 r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf);
317 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
318 "Failed to set cpu.cfs_period_us on %s: %m", path);
320 if (c->cpu_quota_per_sec_usec != USEC_INFINITY) {
321 sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
322 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf);
324 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1");
326 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
327 "Failed to set cpu.cfs_quota_us on %s: %m", path);
330 if (mask & CGROUP_BLKIO) {
331 char buf[MAX3(DECIMAL_STR_MAX(unsigned long)+1,
332 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(unsigned long)*1,
333 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
334 CGroupBlockIODeviceWeight *w;
335 CGroupBlockIODeviceBandwidth *b;
338 sprintf(buf, "%lu\n", IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_blockio_weight != (unsigned long) -1 ? c->startup_blockio_weight :
339 c->blockio_weight != (unsigned long) -1 ? c->blockio_weight : 1000);
340 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
342 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
343 "Failed to set blkio.weight on %s: %m", path);
345 /* FIXME: no way to reset this list */
346 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
349 r = lookup_blkio_device(w->path, &dev);
353 sprintf(buf, "%u:%u %lu", major(dev), minor(dev), w->weight);
354 r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
356 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
357 "Failed to set blkio.weight_device on %s: %m", path);
361 /* FIXME: no way to reset this list */
362 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
366 r = lookup_blkio_device(b->path, &dev);
370 a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
372 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
373 r = cg_set_attribute("blkio", path, a, buf);
375 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
376 "Failed to set %s on %s: %m", a, path);
380 if (mask & CGROUP_MEMORY) {
381 if (c->memory_limit != (uint64_t) -1) {
382 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
384 sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
385 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
387 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
390 log_full_errno(IN_SET(r, -ENOENT, -EROFS) ? LOG_DEBUG : LOG_WARNING, r,
391 "Failed to set memory.limit_in_bytes on %s: %m", path);
394 if ((mask & CGROUP_DEVICE) && !is_root) {
395 CGroupDeviceAllow *a;
397 /* Changing the devices list of a populated cgroup
398 * might result in EINVAL, hence ignore EINVAL
401 if (c->device_allow || c->device_policy != CGROUP_AUTO)
402 r = cg_set_attribute("devices", path, "devices.deny", "a");
404 r = cg_set_attribute("devices", path, "devices.allow", "a");
406 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL) ? LOG_DEBUG : LOG_WARNING, r,
407 "Failed to reset devices.list on %s: %m", path);
409 if (c->device_policy == CGROUP_CLOSED ||
410 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
411 static const char auto_devices[] =
412 "/dev/null\0" "rwm\0"
413 "/dev/zero\0" "rwm\0"
414 "/dev/full\0" "rwm\0"
415 "/dev/random\0" "rwm\0"
416 "/dev/urandom\0" "rwm\0"
418 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
422 NULSTR_FOREACH_PAIR(x, y, auto_devices)
423 whitelist_device(path, x, y);
425 whitelist_major(path, "pts", 'c', "rw");
426 whitelist_major(path, "kdbus", 'c', "rw");
427 whitelist_major(path, "kdbus/*", 'c', "rw");
430 LIST_FOREACH(device_allow, a, c->device_allow) {
446 if (startswith(a->path, "/dev/"))
447 whitelist_device(path, a->path, acc);
448 else if (startswith(a->path, "block-"))
449 whitelist_major(path, a->path + 6, 'b', acc);
450 else if (startswith(a->path, "char-"))
451 whitelist_major(path, a->path + 5, 'c', acc);
453 log_debug("Ignoring device %s while writing cgroup attribute.", a->path);
458 CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) {
459 CGroupControllerMask mask = 0;
461 /* Figure out which controllers we need */
463 if (c->cpu_accounting ||
464 c->cpu_shares != (unsigned long) -1 ||
465 c->startup_cpu_shares != (unsigned long) -1 ||
466 c->cpu_quota_per_sec_usec != USEC_INFINITY)
467 mask |= CGROUP_CPUACCT | CGROUP_CPU;
469 if (c->blockio_accounting ||
470 c->blockio_weight != (unsigned long) -1 ||
471 c->startup_blockio_weight != (unsigned long) -1 ||
472 c->blockio_device_weights ||
473 c->blockio_device_bandwidths)
474 mask |= CGROUP_BLKIO;
476 if (c->memory_accounting ||
477 c->memory_limit != (uint64_t) -1)
478 mask |= CGROUP_MEMORY;
480 if (c->device_allow ||
481 c->device_policy != CGROUP_AUTO)
482 mask |= CGROUP_DEVICE;
487 CGroupControllerMask unit_get_cgroup_mask(Unit *u) {
490 c = unit_get_cgroup_context(u);
494 /* If delegation is turned on, then turn on all cgroups,
495 * unless the process we fork into it is known to drop
496 * privileges anyway, and shouldn't get access to the
497 * controllers anyway. */
502 e = unit_get_exec_context(u);
503 if (!e || exec_context_maintains_privileges(e))
504 return _CGROUP_CONTROLLER_MASK_ALL;
507 return cgroup_context_get_mask(c);
510 CGroupControllerMask unit_get_members_mask(Unit *u) {
513 if (u->cgroup_members_mask_valid)
514 return u->cgroup_members_mask;
516 u->cgroup_members_mask = 0;
518 if (u->type == UNIT_SLICE) {
522 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
527 if (UNIT_DEREF(member->slice) != u)
530 u->cgroup_members_mask |=
531 unit_get_cgroup_mask(member) |
532 unit_get_members_mask(member);
536 u->cgroup_members_mask_valid = true;
537 return u->cgroup_members_mask;
540 CGroupControllerMask unit_get_siblings_mask(Unit *u) {
543 if (UNIT_ISSET(u->slice))
544 return unit_get_members_mask(UNIT_DEREF(u->slice));
546 return unit_get_cgroup_mask(u) | unit_get_members_mask(u);
549 CGroupControllerMask unit_get_target_mask(Unit *u) {
550 CGroupControllerMask mask;
552 mask = unit_get_cgroup_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
553 mask &= u->manager->cgroup_supported;
558 /* Recurse from a unit up through its containing slices, propagating
559 * mask bits upward. A unit is also member of itself. */
560 void unit_update_cgroup_members_masks(Unit *u) {
561 CGroupControllerMask m;
566 /* Calculate subtree mask */
567 m = unit_get_cgroup_mask(u) | unit_get_members_mask(u);
569 /* See if anything changed from the previous invocation. If
570 * not, we're done. */
571 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
575 u->cgroup_subtree_mask_valid &&
576 ((m & ~u->cgroup_subtree_mask) != 0) &&
577 ((~m & u->cgroup_subtree_mask) == 0);
579 u->cgroup_subtree_mask = m;
580 u->cgroup_subtree_mask_valid = true;
582 if (UNIT_ISSET(u->slice)) {
583 Unit *s = UNIT_DEREF(u->slice);
586 /* There's more set now than before. We
587 * propagate the new mask to the parent's mask
588 * (not caring if it actually was valid or
591 s->cgroup_members_mask |= m;
594 /* There's less set now than before (or we
595 * don't know), we need to recalculate
596 * everything, so let's invalidate the
597 * parent's members mask */
599 s->cgroup_members_mask_valid = false;
601 /* And now make sure that this change also hits our
603 unit_update_cgroup_members_masks(s);
607 static const char *migrate_callback(CGroupControllerMask mask, void *userdata) {
614 if (u->cgroup_path &&
615 u->cgroup_realized &&
616 (u->cgroup_realized_mask & mask) == mask)
617 return u->cgroup_path;
619 u = UNIT_DEREF(u->slice);
625 static int unit_create_cgroups(Unit *u, CGroupControllerMask mask) {
631 c = unit_get_cgroup_context(u);
635 if (!u->cgroup_path) {
636 _cleanup_free_ char *path = NULL;
638 path = unit_default_cgroup_path(u);
642 r = hashmap_put(u->manager->cgroup_unit, path, u);
644 log_error(r == -EEXIST ? "cgroup %s exists already: %s" : "hashmap_put failed for %s: %s", path, strerror(-r));
648 u->cgroup_path = path;
653 /* First, create our own group */
654 r = cg_create_everywhere(u->manager->cgroup_supported, mask, u->cgroup_path);
656 return log_error_errno(r, "Failed to create cgroup %s: %m", u->cgroup_path);
658 /* Keep track that this is now realized */
659 u->cgroup_realized = true;
660 u->cgroup_realized_mask = mask;
662 if (u->type != UNIT_SLICE && !c->delegate) {
664 /* Then, possibly move things over, but not if
665 * subgroups may contain processes, which is the case
666 * for slice and delegation units. */
667 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
669 log_warning_errno(r, "Failed to migrate cgroup from to %s: %m", u->cgroup_path);
675 int unit_attach_pids_to_cgroup(Unit *u) {
679 r = unit_realize_cgroup(u);
683 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
690 static bool unit_has_mask_realized(Unit *u, CGroupControllerMask mask) {
693 return u->cgroup_realized && u->cgroup_realized_mask == mask;
696 /* Check if necessary controllers and attributes for a unit are in place.
699 * If not, create paths, move processes over, and set attributes.
701 * Returns 0 on success and < 0 on failure. */
702 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
703 CGroupControllerMask mask;
708 if (u->in_cgroup_queue) {
709 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
710 u->in_cgroup_queue = false;
713 mask = unit_get_target_mask(u);
715 if (unit_has_mask_realized(u, mask))
718 /* First, realize parents */
719 if (UNIT_ISSET(u->slice)) {
720 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
725 /* And then do the real work */
726 r = unit_create_cgroups(u, mask);
730 /* Finally, apply the necessary attributes. */
731 cgroup_context_apply(unit_get_cgroup_context(u), mask, u->cgroup_path, state);
736 static void unit_add_to_cgroup_queue(Unit *u) {
738 if (u->in_cgroup_queue)
741 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
742 u->in_cgroup_queue = true;
745 unsigned manager_dispatch_cgroup_queue(Manager *m) {
751 state = manager_state(m);
753 while ((i = m->cgroup_queue)) {
754 assert(i->in_cgroup_queue);
756 r = unit_realize_cgroup_now(i, state);
758 log_warning_errno(r, "Failed to realize cgroups for queued unit %s: %m", i->id);
766 static void unit_queue_siblings(Unit *u) {
769 /* This adds the siblings of the specified unit and the
770 * siblings of all parent units to the cgroup queue. (But
771 * neither the specified unit itself nor the parents.) */
773 while ((slice = UNIT_DEREF(u->slice))) {
777 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
781 /* Skip units that have a dependency on the slice
782 * but aren't actually in it. */
783 if (UNIT_DEREF(m->slice) != slice)
786 /* No point in doing cgroup application for units
787 * without active processes. */
788 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
791 /* If the unit doesn't need any new controllers
792 * and has current ones realized, it doesn't need
794 if (unit_has_mask_realized(m, unit_get_target_mask(m)))
797 unit_add_to_cgroup_queue(m);
804 int unit_realize_cgroup(Unit *u) {
809 c = unit_get_cgroup_context(u);
813 /* So, here's the deal: when realizing the cgroups for this
814 * unit, we need to first create all parents, but there's more
815 * actually: for the weight-based controllers we also need to
816 * make sure that all our siblings (i.e. units that are in the
817 * same slice as we are) have cgroups, too. Otherwise, things
818 * would become very uneven as each of their processes would
819 * get as much resources as all our group together. This call
820 * will synchronously create the parent cgroups, but will
821 * defer work on the siblings to the next event loop
824 /* Add all sibling slices to the cgroup queue. */
825 unit_queue_siblings(u);
827 /* And realize this one now (and apply the values) */
828 return unit_realize_cgroup_now(u, manager_state(u->manager));
831 void unit_destroy_cgroup_if_empty(Unit *u) {
839 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !unit_has_name(u, SPECIAL_ROOT_SLICE));
841 log_debug_errno(r, "Failed to destroy cgroup %s: %m", u->cgroup_path);
845 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
847 free(u->cgroup_path);
848 u->cgroup_path = NULL;
849 u->cgroup_realized = false;
850 u->cgroup_realized_mask = 0;
853 pid_t unit_search_main_pid(Unit *u) {
854 _cleanup_fclose_ FILE *f = NULL;
855 pid_t pid = 0, npid, mypid;
862 if (cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f) < 0)
866 while (cg_read_pid(f, &npid) > 0) {
872 /* Ignore processes that aren't our kids */
873 if (get_parent_of_pid(npid, &ppid) >= 0 && ppid != mypid)
877 /* Dang, there's more than one daemonized PID
878 in this group, so we don't know what process
879 is the main process. */
890 int manager_setup_cgroup(Manager *m) {
891 _cleanup_free_ char *path = NULL;
896 /* 1. Determine hierarchy */
897 free(m->cgroup_root);
898 m->cgroup_root = NULL;
900 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
902 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
904 /* LEGACY: Already in /system.slice? If so, let's cut this
905 * off. This is to support live upgrades from older systemd
906 * versions where PID 1 was moved there. */
907 if (m->running_as == SYSTEMD_SYSTEM) {
910 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
912 e = endswith(m->cgroup_root, "/system");
917 /* And make sure to store away the root value without trailing
918 * slash, even for the root dir, so that we can easily prepend
920 if (streq(m->cgroup_root, "/"))
921 m->cgroup_root[0] = 0;
924 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
926 return log_error_errno(r, "Cannot find cgroup mount point: %m");
928 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
931 /* 3. Install agent */
932 if (m->running_as == SYSTEMD_SYSTEM) {
933 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
935 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
937 log_debug("Installed release agent.");
939 log_debug("Release agent already installed.");
942 /* 4. Make sure we are in the root cgroup */
943 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, 0);
945 return log_error_errno(r, "Failed to create root cgroup hierarchy: %m");
947 /* 5. And pin it, so that it cannot be unmounted */
948 safe_close(m->pin_cgroupfs_fd);
950 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
951 if (m->pin_cgroupfs_fd < 0)
952 return log_error_errno(errno, "Failed to open pin file: %m");
954 /* 6. Always enable hierarchial support if it exists... */
955 cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
958 /* 7. Figure out which controllers are supported */
959 m->cgroup_supported = cg_mask_supported();
964 void manager_shutdown_cgroup(Manager *m, bool delete) {
967 /* We can't really delete the group, since we are in it. But
969 if (delete && m->cgroup_root)
970 cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
972 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
974 free(m->cgroup_root);
975 m->cgroup_root = NULL;
978 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
985 u = hashmap_get(m->cgroup_unit, cgroup);
999 u = hashmap_get(m->cgroup_unit, p);
1005 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
1006 _cleanup_free_ char *cgroup = NULL;
1014 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
1018 return manager_get_unit_by_cgroup(m, cgroup);
1021 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1028 u = manager_get_unit_by_cgroup(m, cgroup);
1030 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, true);
1032 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1033 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1035 unit_add_to_gc_queue(u);
1042 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
1043 [CGROUP_AUTO] = "auto",
1044 [CGROUP_CLOSED] = "closed",
1045 [CGROUP_STRICT] = "strict",
1048 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);