chiark / gitweb /
cgroup: support prefix "-" in cgroups whitelisting entries (#4687)
[elogind.git] / src / core / cgroup.c
1 /***
2   This file is part of systemd.
3
4   Copyright 2013 Lennart Poettering
5
6   systemd is free software; you can redistribute it and/or modify it
7   under the terms of the GNU Lesser General Public License as published by
8   the Free Software Foundation; either version 2.1 of the License, or
9   (at your option) any later version.
10
11   systemd is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14   Lesser General Public License for more details.
15
16   You should have received a copy of the GNU Lesser General Public License
17   along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <fcntl.h>
21 #include <fnmatch.h>
22
23 #include "alloc-util.h"
24 #include "cgroup-util.h"
25 #include "cgroup.h"
26 #include "fd-util.h"
27 #include "fileio.h"
28 #include "fs-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
32 //#include "special.h"
33 #include "string-table.h"
34 #include "string-util.h"
35 #include "stdio-util.h"
36
37 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
38
39 #if 0 /// UNNEEDED by elogind
40 static void cgroup_compat_warn(void) {
41         static bool cgroup_compat_warned = false;
42
43         if (cgroup_compat_warned)
44                 return;
45
46         log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
47         cgroup_compat_warned = true;
48 }
49
50 #define log_cgroup_compat(unit, fmt, ...) do {                                  \
51                 cgroup_compat_warn();                                           \
52                 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__);     \
53         } while (false)
54
55 void cgroup_context_init(CGroupContext *c) {
56         assert(c);
57
58         /* Initialize everything to the kernel defaults, assuming the
59          * structure is preinitialized to 0 */
60
61         c->cpu_weight = CGROUP_WEIGHT_INVALID;
62         c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
63         c->cpu_quota_per_sec_usec = USEC_INFINITY;
64
65         c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
66         c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
67
68         c->memory_high = CGROUP_LIMIT_MAX;
69         c->memory_max = CGROUP_LIMIT_MAX;
70         c->memory_swap_max = CGROUP_LIMIT_MAX;
71
72         c->memory_limit = CGROUP_LIMIT_MAX;
73
74         c->io_weight = CGROUP_WEIGHT_INVALID;
75         c->startup_io_weight = CGROUP_WEIGHT_INVALID;
76
77         c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
78         c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
79
80         c->tasks_max = (uint64_t) -1;
81 }
82
83 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
84         assert(c);
85         assert(a);
86
87         LIST_REMOVE(device_allow, c->device_allow, a);
88         free(a->path);
89         free(a);
90 }
91
92 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
93         assert(c);
94         assert(w);
95
96         LIST_REMOVE(device_weights, c->io_device_weights, w);
97         free(w->path);
98         free(w);
99 }
100
101 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
102         assert(c);
103         assert(l);
104
105         LIST_REMOVE(device_limits, c->io_device_limits, l);
106         free(l->path);
107         free(l);
108 }
109
110 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
111         assert(c);
112         assert(w);
113
114         LIST_REMOVE(device_weights, c->blockio_device_weights, w);
115         free(w->path);
116         free(w);
117 }
118
119 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
120         assert(c);
121         assert(b);
122
123         LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
124         free(b->path);
125         free(b);
126 }
127
128 void cgroup_context_done(CGroupContext *c) {
129         assert(c);
130
131         while (c->io_device_weights)
132                 cgroup_context_free_io_device_weight(c, c->io_device_weights);
133
134         while (c->io_device_limits)
135                 cgroup_context_free_io_device_limit(c, c->io_device_limits);
136
137         while (c->blockio_device_weights)
138                 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
139
140         while (c->blockio_device_bandwidths)
141                 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
142
143         while (c->device_allow)
144                 cgroup_context_free_device_allow(c, c->device_allow);
145 }
146
147 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
148         CGroupIODeviceLimit *il;
149         CGroupIODeviceWeight *iw;
150         CGroupBlockIODeviceBandwidth *b;
151         CGroupBlockIODeviceWeight *w;
152         CGroupDeviceAllow *a;
153         char u[FORMAT_TIMESPAN_MAX];
154
155         assert(c);
156         assert(f);
157
158         prefix = strempty(prefix);
159
160         fprintf(f,
161                 "%sCPUAccounting=%s\n"
162                 "%sIOAccounting=%s\n"
163                 "%sBlockIOAccounting=%s\n"
164                 "%sMemoryAccounting=%s\n"
165                 "%sTasksAccounting=%s\n"
166                 "%sCPUWeight=%" PRIu64 "\n"
167                 "%sStartupCPUWeight=%" PRIu64 "\n"
168                 "%sCPUShares=%" PRIu64 "\n"
169                 "%sStartupCPUShares=%" PRIu64 "\n"
170                 "%sCPUQuotaPerSecSec=%s\n"
171                 "%sIOWeight=%" PRIu64 "\n"
172                 "%sStartupIOWeight=%" PRIu64 "\n"
173                 "%sBlockIOWeight=%" PRIu64 "\n"
174                 "%sStartupBlockIOWeight=%" PRIu64 "\n"
175                 "%sMemoryLow=%" PRIu64 "\n"
176                 "%sMemoryHigh=%" PRIu64 "\n"
177                 "%sMemoryMax=%" PRIu64 "\n"
178                 "%sMemorySwapMax=%" PRIu64 "\n"
179                 "%sMemoryLimit=%" PRIu64 "\n"
180                 "%sTasksMax=%" PRIu64 "\n"
181                 "%sDevicePolicy=%s\n"
182                 "%sDelegate=%s\n",
183                 prefix, yes_no(c->cpu_accounting),
184                 prefix, yes_no(c->io_accounting),
185                 prefix, yes_no(c->blockio_accounting),
186                 prefix, yes_no(c->memory_accounting),
187                 prefix, yes_no(c->tasks_accounting),
188                 prefix, c->cpu_weight,
189                 prefix, c->startup_cpu_weight,
190                 prefix, c->cpu_shares,
191                 prefix, c->startup_cpu_shares,
192                 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
193                 prefix, c->io_weight,
194                 prefix, c->startup_io_weight,
195                 prefix, c->blockio_weight,
196                 prefix, c->startup_blockio_weight,
197                 prefix, c->memory_low,
198                 prefix, c->memory_high,
199                 prefix, c->memory_max,
200                 prefix, c->memory_swap_max,
201                 prefix, c->memory_limit,
202                 prefix, c->tasks_max,
203                 prefix, cgroup_device_policy_to_string(c->device_policy),
204                 prefix, yes_no(c->delegate));
205
206         LIST_FOREACH(device_allow, a, c->device_allow)
207                 fprintf(f,
208                         "%sDeviceAllow=%s %s%s%s\n",
209                         prefix,
210                         a->path,
211                         a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
212
213         LIST_FOREACH(device_weights, iw, c->io_device_weights)
214                 fprintf(f,
215                         "%sIODeviceWeight=%s %" PRIu64,
216                         prefix,
217                         iw->path,
218                         iw->weight);
219
220         LIST_FOREACH(device_limits, il, c->io_device_limits) {
221                 char buf[FORMAT_BYTES_MAX];
222                 CGroupIOLimitType type;
223
224                 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
225                         if (il->limits[type] != cgroup_io_limit_defaults[type])
226                                 fprintf(f,
227                                         "%s%s=%s %s\n",
228                                         prefix,
229                                         cgroup_io_limit_type_to_string(type),
230                                         il->path,
231                                         format_bytes(buf, sizeof(buf), il->limits[type]));
232         }
233
234         LIST_FOREACH(device_weights, w, c->blockio_device_weights)
235                 fprintf(f,
236                         "%sBlockIODeviceWeight=%s %" PRIu64,
237                         prefix,
238                         w->path,
239                         w->weight);
240
241         LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
242                 char buf[FORMAT_BYTES_MAX];
243
244                 if (b->rbps != CGROUP_LIMIT_MAX)
245                         fprintf(f,
246                                 "%sBlockIOReadBandwidth=%s %s\n",
247                                 prefix,
248                                 b->path,
249                                 format_bytes(buf, sizeof(buf), b->rbps));
250                 if (b->wbps != CGROUP_LIMIT_MAX)
251                         fprintf(f,
252                                 "%sBlockIOWriteBandwidth=%s %s\n",
253                                 prefix,
254                                 b->path,
255                                 format_bytes(buf, sizeof(buf), b->wbps));
256         }
257 }
258
259 static int lookup_block_device(const char *p, dev_t *dev) {
260         struct stat st;
261         int r;
262
263         assert(p);
264         assert(dev);
265
266         r = stat(p, &st);
267         if (r < 0)
268                 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
269
270         if (S_ISBLK(st.st_mode))
271                 *dev = st.st_rdev;
272         else if (major(st.st_dev) != 0) {
273                 /* If this is not a device node then find the block
274                  * device this file is stored on */
275                 *dev = st.st_dev;
276
277                 /* If this is a partition, try to get the originating
278                  * block device */
279                 block_get_whole_disk(*dev, dev);
280         } else {
281                 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
282                 return -ENODEV;
283         }
284
285         return 0;
286 }
287
288 static int whitelist_device(const char *path, const char *node, const char *acc) {
289         char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
290         struct stat st;
291         int r;
292
293         assert(path);
294         assert(acc);
295
296         if (stat(node, &st) < 0) {
297                 /* path starting with "-" must be silently ignored */
298                 if (errno == ENOENT && startswith(node, "-"))
299                         return 0;
300
301                 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
302         }
303
304         if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
305                 log_warning("%s is not a device.", node);
306                 return -ENODEV;
307         }
308
309         sprintf(buf,
310                 "%c %u:%u %s",
311                 S_ISCHR(st.st_mode) ? 'c' : 'b',
312                 major(st.st_rdev), minor(st.st_rdev),
313                 acc);
314
315         r = cg_set_attribute("devices", path, "devices.allow", buf);
316         if (r < 0)
317                 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
318                                "Failed to set devices.allow on %s: %m", path);
319
320         return r;
321 }
322
323 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
324         _cleanup_fclose_ FILE *f = NULL;
325         char line[LINE_MAX];
326         bool good = false;
327         int r;
328
329         assert(path);
330         assert(acc);
331         assert(type == 'b' || type == 'c');
332
333         f = fopen("/proc/devices", "re");
334         if (!f)
335                 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
336
337         FOREACH_LINE(line, f, goto fail) {
338                 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
339                 unsigned maj;
340
341                 truncate_nl(line);
342
343                 if (type == 'c' && streq(line, "Character devices:")) {
344                         good = true;
345                         continue;
346                 }
347
348                 if (type == 'b' && streq(line, "Block devices:")) {
349                         good = true;
350                         continue;
351                 }
352
353                 if (isempty(line)) {
354                         good = false;
355                         continue;
356                 }
357
358                 if (!good)
359                         continue;
360
361                 p = strstrip(line);
362
363                 w = strpbrk(p, WHITESPACE);
364                 if (!w)
365                         continue;
366                 *w = 0;
367
368                 r = safe_atou(p, &maj);
369                 if (r < 0)
370                         continue;
371                 if (maj <= 0)
372                         continue;
373
374                 w++;
375                 w += strspn(w, WHITESPACE);
376
377                 if (fnmatch(name, w, 0) != 0)
378                         continue;
379
380                 sprintf(buf,
381                         "%c %u:* %s",
382                         type,
383                         maj,
384                         acc);
385
386                 r = cg_set_attribute("devices", path, "devices.allow", buf);
387                 if (r < 0)
388                         log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
389                                        "Failed to set devices.allow on %s: %m", path);
390         }
391
392         return 0;
393
394 fail:
395         log_warning_errno(errno, "Failed to read /proc/devices: %m");
396         return -errno;
397 }
398
399 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
400         return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
401                 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
402 }
403
404 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
405         return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
406                 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
407 }
408
409 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
410         if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
411             c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
412                 return c->startup_cpu_weight;
413         else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
414                 return c->cpu_weight;
415         else
416                 return CGROUP_WEIGHT_DEFAULT;
417 }
418
419 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
420         if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
421             c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
422                 return c->startup_cpu_shares;
423         else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
424                 return c->cpu_shares;
425         else
426                 return CGROUP_CPU_SHARES_DEFAULT;
427 }
428
429 static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
430         char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
431         int r;
432
433         xsprintf(buf, "%" PRIu64 "\n", weight);
434         r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
435         if (r < 0)
436                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
437                               "Failed to set cpu.weight: %m");
438
439         if (quota != USEC_INFINITY)
440                 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
441                          quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
442         else
443                 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
444
445         r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
446
447         if (r < 0)
448                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
449                               "Failed to set cpu.max: %m");
450 }
451
452 static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
453         char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
454         int r;
455
456         xsprintf(buf, "%" PRIu64 "\n", shares);
457         r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
458         if (r < 0)
459                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
460                               "Failed to set cpu.shares: %m");
461
462         xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
463         r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
464         if (r < 0)
465                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
466                               "Failed to set cpu.cfs_period_us: %m");
467
468         if (quota != USEC_INFINITY) {
469                 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
470                 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
471         } else
472                 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
473         if (r < 0)
474                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
475                               "Failed to set cpu.cfs_quota_us: %m");
476 }
477
478 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
479         return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
480                      CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
481 }
482
483 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
484         return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
485                      CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
486 }
487
488 static bool cgroup_context_has_io_config(CGroupContext *c) {
489         return c->io_accounting ||
490                 c->io_weight != CGROUP_WEIGHT_INVALID ||
491                 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
492                 c->io_device_weights ||
493                 c->io_device_limits;
494 }
495
496 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
497         return c->blockio_accounting ||
498                 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
499                 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
500                 c->blockio_device_weights ||
501                 c->blockio_device_bandwidths;
502 }
503
504 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
505         if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
506             c->startup_io_weight != CGROUP_WEIGHT_INVALID)
507                 return c->startup_io_weight;
508         else if (c->io_weight != CGROUP_WEIGHT_INVALID)
509                 return c->io_weight;
510         else
511                 return CGROUP_WEIGHT_DEFAULT;
512 }
513
514 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
515         if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
516             c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
517                 return c->startup_blockio_weight;
518         else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
519                 return c->blockio_weight;
520         else
521                 return CGROUP_BLKIO_WEIGHT_DEFAULT;
522 }
523
524 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
525         return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
526                      CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
527 }
528
529 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
530         return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
531                      CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
532 }
533
534 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
535         char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
536         dev_t dev;
537         int r;
538
539         r = lookup_block_device(dev_path, &dev);
540         if (r < 0)
541                 return;
542
543         xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
544         r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
545         if (r < 0)
546                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
547                               "Failed to set io.weight: %m");
548 }
549
550 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
551         char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
552         dev_t dev;
553         int r;
554
555         r = lookup_block_device(dev_path, &dev);
556         if (r < 0)
557                 return;
558
559         xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
560         r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
561         if (r < 0)
562                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
563                               "Failed to set blkio.weight_device: %m");
564 }
565
566 static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
567         char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
568         char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
569         CGroupIOLimitType type;
570         dev_t dev;
571         unsigned n = 0;
572         int r;
573
574         r = lookup_block_device(dev_path, &dev);
575         if (r < 0)
576                 return 0;
577
578         for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
579                 if (limits[type] != cgroup_io_limit_defaults[type]) {
580                         xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
581                         n++;
582                 } else {
583                         xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
584                 }
585         }
586
587         xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
588                  limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
589                  limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
590         r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
591         if (r < 0)
592                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
593                               "Failed to set io.max: %m");
594         return n;
595 }
596
597 static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
598         char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
599         dev_t dev;
600         unsigned n = 0;
601         int r;
602
603         r = lookup_block_device(dev_path, &dev);
604         if (r < 0)
605                 return 0;
606
607         if (rbps != CGROUP_LIMIT_MAX)
608                 n++;
609         sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
610         r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
611         if (r < 0)
612                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
613                               "Failed to set blkio.throttle.read_bps_device: %m");
614
615         if (wbps != CGROUP_LIMIT_MAX)
616                 n++;
617         sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
618         r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
619         if (r < 0)
620                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
621                               "Failed to set blkio.throttle.write_bps_device: %m");
622
623         return n;
624 }
625
626 static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
627         return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
628 }
629
630 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
631         char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
632         int r;
633
634         if (v != CGROUP_LIMIT_MAX)
635                 xsprintf(buf, "%" PRIu64 "\n", v);
636
637         r = cg_set_attribute("memory", u->cgroup_path, file, buf);
638         if (r < 0)
639                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
640                               "Failed to set %s: %m", file);
641 }
642
643 static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
644         const char *path;
645         CGroupContext *c;
646         bool is_root;
647         int r;
648
649         assert(u);
650
651         c = unit_get_cgroup_context(u);
652         path = u->cgroup_path;
653
654         assert(c);
655         assert(path);
656
657         if (mask == 0)
658                 return;
659
660         /* Some cgroup attributes are not supported on the root cgroup,
661          * hence silently ignore */
662         is_root = isempty(path) || path_equal(path, "/");
663         if (is_root)
664                 /* Make sure we don't try to display messages with an empty path. */
665                 path = "/";
666
667         /* We generally ignore errors caused by read-only mounted
668          * cgroup trees (assuming we are running in a container then),
669          * and missing cgroups, i.e. EROFS and ENOENT. */
670
671         if ((mask & CGROUP_MASK_CPU) && !is_root) {
672                 bool has_weight = cgroup_context_has_cpu_weight(c);
673                 bool has_shares = cgroup_context_has_cpu_shares(c);
674
675                 if (cg_all_unified() > 0) {
676                         uint64_t weight;
677
678                         if (has_weight)
679                                 weight = cgroup_context_cpu_weight(c, state);
680                         else if (has_shares) {
681                                 uint64_t shares = cgroup_context_cpu_shares(c, state);
682
683                                 weight = cgroup_cpu_shares_to_weight(shares);
684
685                                 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
686                                                   shares, weight, path);
687                         } else
688                                 weight = CGROUP_WEIGHT_DEFAULT;
689
690                         cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
691                 } else {
692                         uint64_t shares;
693
694                         if (has_weight) {
695                                 uint64_t weight = cgroup_context_cpu_weight(c, state);
696
697                                 shares = cgroup_cpu_weight_to_shares(weight);
698
699                                 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
700                                                   weight, shares, path);
701                         } else if (has_shares)
702                                 shares = cgroup_context_cpu_shares(c, state);
703                         else
704                                 shares = CGROUP_CPU_SHARES_DEFAULT;
705
706                         cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
707                 }
708         }
709
710         if (mask & CGROUP_MASK_IO) {
711                 bool has_io = cgroup_context_has_io_config(c);
712                 bool has_blockio = cgroup_context_has_blockio_config(c);
713
714                 if (!is_root) {
715                         char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
716                         uint64_t weight;
717
718                         if (has_io)
719                                 weight = cgroup_context_io_weight(c, state);
720                         else if (has_blockio) {
721                                 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
722
723                                 weight = cgroup_weight_blkio_to_io(blkio_weight);
724
725                                 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
726                                                   blkio_weight, weight);
727                         } else
728                                 weight = CGROUP_WEIGHT_DEFAULT;
729
730                         xsprintf(buf, "default %" PRIu64 "\n", weight);
731                         r = cg_set_attribute("io", path, "io.weight", buf);
732                         if (r < 0)
733                                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
734                                               "Failed to set io.weight: %m");
735
736                         if (has_io) {
737                                 CGroupIODeviceWeight *w;
738
739                                 /* FIXME: no way to reset this list */
740                                 LIST_FOREACH(device_weights, w, c->io_device_weights)
741                                         cgroup_apply_io_device_weight(u, w->path, w->weight);
742                         } else if (has_blockio) {
743                                 CGroupBlockIODeviceWeight *w;
744
745                                 /* FIXME: no way to reset this list */
746                                 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
747                                         weight = cgroup_weight_blkio_to_io(w->weight);
748
749                                         log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
750                                                           w->weight, weight, w->path);
751
752                                         cgroup_apply_io_device_weight(u, w->path, weight);
753                                 }
754                         }
755                 }
756
757                 /* Apply limits and free ones without config. */
758                 if (has_io) {
759                         CGroupIODeviceLimit *l, *next;
760
761                         LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
762                                 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
763                                         cgroup_context_free_io_device_limit(c, l);
764                         }
765                 } else if (has_blockio) {
766                         CGroupBlockIODeviceBandwidth *b, *next;
767
768                         LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
769                                 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
770                                 CGroupIOLimitType type;
771
772                                 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
773                                         limits[type] = cgroup_io_limit_defaults[type];
774
775                                 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
776                                 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
777
778                                 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
779                                                   b->rbps, b->wbps, b->path);
780
781                                 if (!cgroup_apply_io_device_limit(u, b->path, limits))
782                                         cgroup_context_free_blockio_device_bandwidth(c, b);
783                         }
784                 }
785         }
786
787         if (mask & CGROUP_MASK_BLKIO) {
788                 bool has_io = cgroup_context_has_io_config(c);
789                 bool has_blockio = cgroup_context_has_blockio_config(c);
790
791                 if (!is_root) {
792                         char buf[DECIMAL_STR_MAX(uint64_t)+1];
793                         uint64_t weight;
794
795                         if (has_io) {
796                                 uint64_t io_weight = cgroup_context_io_weight(c, state);
797
798                                 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
799
800                                 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
801                                                   io_weight, weight);
802                         } else if (has_blockio)
803                                 weight = cgroup_context_blkio_weight(c, state);
804                         else
805                                 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
806
807                         xsprintf(buf, "%" PRIu64 "\n", weight);
808                         r = cg_set_attribute("blkio", path, "blkio.weight", buf);
809                         if (r < 0)
810                                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
811                                               "Failed to set blkio.weight: %m");
812
813                         if (has_io) {
814                                 CGroupIODeviceWeight *w;
815
816                                 /* FIXME: no way to reset this list */
817                                 LIST_FOREACH(device_weights, w, c->io_device_weights) {
818                                         weight = cgroup_weight_io_to_blkio(w->weight);
819
820                                         log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
821                                                           w->weight, weight, w->path);
822
823                                         cgroup_apply_blkio_device_weight(u, w->path, weight);
824                                 }
825                         } else if (has_blockio) {
826                                 CGroupBlockIODeviceWeight *w;
827
828                                 /* FIXME: no way to reset this list */
829                                 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
830                                         cgroup_apply_blkio_device_weight(u, w->path, w->weight);
831                         }
832                 }
833
834                 /* Apply limits and free ones without config. */
835                 if (has_io) {
836                         CGroupIODeviceLimit *l, *next;
837
838                         LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
839                                 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
840                                                   l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
841
842                                 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
843                                         cgroup_context_free_io_device_limit(c, l);
844                         }
845                 } else if (has_blockio) {
846                         CGroupBlockIODeviceBandwidth *b, *next;
847
848                         LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths)
849                                 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
850                                         cgroup_context_free_blockio_device_bandwidth(c, b);
851                 }
852         }
853
854         if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
855                 if (cg_all_unified() > 0) {
856                         uint64_t max;
857                         uint64_t swap_max = CGROUP_LIMIT_MAX;
858
859                         if (cgroup_context_has_unified_memory_config(c)) {
860                                 max = c->memory_max;
861                                 swap_max = c->memory_swap_max;
862                         } else {
863                                 max = c->memory_limit;
864
865                                 if (max != CGROUP_LIMIT_MAX)
866                                         log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
867                         }
868
869                         cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
870                         cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
871                         cgroup_apply_unified_memory_limit(u, "memory.max", max);
872                         cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
873                 } else {
874                         char buf[DECIMAL_STR_MAX(uint64_t) + 1];
875                         uint64_t val;
876
877                         if (cgroup_context_has_unified_memory_config(c)) {
878                                 val = c->memory_max;
879                                 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
880                         } else
881                                 val = c->memory_limit;
882
883                         if (val == CGROUP_LIMIT_MAX)
884                                 strncpy(buf, "-1\n", sizeof(buf));
885                         else
886                                 xsprintf(buf, "%" PRIu64 "\n", val);
887
888                         r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
889                         if (r < 0)
890                                 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
891                                               "Failed to set memory.limit_in_bytes: %m");
892                 }
893         }
894
895         if ((mask & CGROUP_MASK_DEVICES) && !is_root) {
896                 CGroupDeviceAllow *a;
897
898                 /* Changing the devices list of a populated cgroup
899                  * might result in EINVAL, hence ignore EINVAL
900                  * here. */
901
902                 if (c->device_allow || c->device_policy != CGROUP_AUTO)
903                         r = cg_set_attribute("devices", path, "devices.deny", "a");
904                 else
905                         r = cg_set_attribute("devices", path, "devices.allow", "a");
906                 if (r < 0)
907                         log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
908                                       "Failed to reset devices.list: %m");
909
910                 if (c->device_policy == CGROUP_CLOSED ||
911                     (c->device_policy == CGROUP_AUTO && c->device_allow)) {
912                         static const char auto_devices[] =
913                                 "/dev/null\0" "rwm\0"
914                                 "/dev/zero\0" "rwm\0"
915                                 "/dev/full\0" "rwm\0"
916                                 "/dev/random\0" "rwm\0"
917                                 "/dev/urandom\0" "rwm\0"
918                                 "/dev/tty\0" "rwm\0"
919                                 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
920                                 /* Allow /run/elogind/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
921                                 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
922                                 "-/run/systemd/inaccessible/chr\0" "rwm\0"
923                                 "-/run/systemd/inaccessible/blk\0" "rwm\0";
924
925                         const char *x, *y;
926
927                         NULSTR_FOREACH_PAIR(x, y, auto_devices)
928                                 whitelist_device(path, x, y);
929
930                         whitelist_major(path, "pts", 'c', "rw");
931                         whitelist_major(path, "kdbus", 'c', "rw");
932                         whitelist_major(path, "kdbus/*", 'c', "rw");
933                 }
934
935                 LIST_FOREACH(device_allow, a, c->device_allow) {
936                         char acc[4], *val;
937                         unsigned k = 0;
938
939                         if (a->r)
940                                 acc[k++] = 'r';
941                         if (a->w)
942                                 acc[k++] = 'w';
943                         if (a->m)
944                                 acc[k++] = 'm';
945
946                         if (k == 0)
947                                 continue;
948
949                         acc[k++] = 0;
950
951                         if (startswith(a->path, "/dev/"))
952                                 whitelist_device(path, a->path, acc);
953                         else if ((val = startswith(a->path, "block-")))
954                                 whitelist_major(path, val, 'b', acc);
955                         else if ((val = startswith(a->path, "char-")))
956                                 whitelist_major(path, val, 'c', acc);
957                         else
958                                 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
959                 }
960         }
961
962         if ((mask & CGROUP_MASK_PIDS) && !is_root) {
963
964                 if (c->tasks_max != CGROUP_LIMIT_MAX) {
965                         char buf[DECIMAL_STR_MAX(uint64_t) + 2];
966
967                         sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
968                         r = cg_set_attribute("pids", path, "pids.max", buf);
969                 } else
970                         r = cg_set_attribute("pids", path, "pids.max", "max");
971
972                 if (r < 0)
973                         log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
974                                       "Failed to set pids.max: %m");
975         }
976 }
977
978 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
979         CGroupMask mask = 0;
980
981         /* Figure out which controllers we need */
982
983         if (c->cpu_accounting ||
984             cgroup_context_has_cpu_weight(c) ||
985             cgroup_context_has_cpu_shares(c) ||
986             c->cpu_quota_per_sec_usec != USEC_INFINITY)
987                 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
988
989         if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
990                 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
991
992         if (c->memory_accounting ||
993             c->memory_limit != CGROUP_LIMIT_MAX ||
994             cgroup_context_has_unified_memory_config(c))
995                 mask |= CGROUP_MASK_MEMORY;
996
997         if (c->device_allow ||
998             c->device_policy != CGROUP_AUTO)
999                 mask |= CGROUP_MASK_DEVICES;
1000
1001         if (c->tasks_accounting ||
1002             c->tasks_max != (uint64_t) -1)
1003                 mask |= CGROUP_MASK_PIDS;
1004
1005         return mask;
1006 }
1007
1008 CGroupMask unit_get_own_mask(Unit *u) {
1009         CGroupContext *c;
1010
1011         /* Returns the mask of controllers the unit needs for itself */
1012
1013         c = unit_get_cgroup_context(u);
1014         if (!c)
1015                 return 0;
1016
1017         /* If delegation is turned on, then turn on all cgroups,
1018          * unless we are on the legacy hierarchy and the process we
1019          * fork into it is known to drop privileges, and hence
1020          * shouldn't get access to the controllers.
1021          *
1022          * Note that on the unified hierarchy it is safe to delegate
1023          * controllers to unprivileged services. */
1024
1025         if (c->delegate) {
1026                 ExecContext *e;
1027
1028                 e = unit_get_exec_context(u);
1029                 if (!e ||
1030                     exec_context_maintains_privileges(e) ||
1031                     cg_all_unified() > 0)
1032                         return _CGROUP_MASK_ALL;
1033         }
1034
1035         return cgroup_context_get_mask(c);
1036 }
1037
1038 CGroupMask unit_get_members_mask(Unit *u) {
1039         assert(u);
1040
1041         /* Returns the mask of controllers all of the unit's children
1042          * require, merged */
1043
1044         if (u->cgroup_members_mask_valid)
1045                 return u->cgroup_members_mask;
1046
1047         u->cgroup_members_mask = 0;
1048
1049         if (u->type == UNIT_SLICE) {
1050                 Unit *member;
1051                 Iterator i;
1052
1053                 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
1054
1055                         if (member == u)
1056                                 continue;
1057
1058                         if (UNIT_DEREF(member->slice) != u)
1059                                 continue;
1060
1061                         u->cgroup_members_mask |=
1062                                 unit_get_own_mask(member) |
1063                                 unit_get_members_mask(member);
1064                 }
1065         }
1066
1067         u->cgroup_members_mask_valid = true;
1068         return u->cgroup_members_mask;
1069 }
1070
1071 CGroupMask unit_get_siblings_mask(Unit *u) {
1072         assert(u);
1073
1074         /* Returns the mask of controllers all of the unit's siblings
1075          * require, i.e. the members mask of the unit's parent slice
1076          * if there is one. */
1077
1078         if (UNIT_ISSET(u->slice))
1079                 return unit_get_members_mask(UNIT_DEREF(u->slice));
1080
1081         return unit_get_own_mask(u) | unit_get_members_mask(u);
1082 }
1083
1084 CGroupMask unit_get_subtree_mask(Unit *u) {
1085
1086         /* Returns the mask of this subtree, meaning of the group
1087          * itself and its children. */
1088
1089         return unit_get_own_mask(u) | unit_get_members_mask(u);
1090 }
1091
1092 CGroupMask unit_get_target_mask(Unit *u) {
1093         CGroupMask mask;
1094
1095         /* This returns the cgroup mask of all controllers to enable
1096          * for a specific cgroup, i.e. everything it needs itself,
1097          * plus all that its children need, plus all that its siblings
1098          * need. This is primarily useful on the legacy cgroup
1099          * hierarchy, where we need to duplicate each cgroup in each
1100          * hierarchy that shall be enabled for it. */
1101
1102         mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1103         mask &= u->manager->cgroup_supported;
1104
1105         return mask;
1106 }
1107
1108 CGroupMask unit_get_enable_mask(Unit *u) {
1109         CGroupMask mask;
1110
1111         /* This returns the cgroup mask of all controllers to enable
1112          * for the children of a specific cgroup. This is primarily
1113          * useful for the unified cgroup hierarchy, where each cgroup
1114          * controls which controllers are enabled for its children. */
1115
1116         mask = unit_get_members_mask(u);
1117         mask &= u->manager->cgroup_supported;
1118
1119         return mask;
1120 }
1121
1122 /* Recurse from a unit up through its containing slices, propagating
1123  * mask bits upward. A unit is also member of itself. */
1124 void unit_update_cgroup_members_masks(Unit *u) {
1125         CGroupMask m;
1126         bool more;
1127
1128         assert(u);
1129
1130         /* Calculate subtree mask */
1131         m = unit_get_subtree_mask(u);
1132
1133         /* See if anything changed from the previous invocation. If
1134          * not, we're done. */
1135         if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1136                 return;
1137
1138         more =
1139                 u->cgroup_subtree_mask_valid &&
1140                 ((m & ~u->cgroup_subtree_mask) != 0) &&
1141                 ((~m & u->cgroup_subtree_mask) == 0);
1142
1143         u->cgroup_subtree_mask = m;
1144         u->cgroup_subtree_mask_valid = true;
1145
1146         if (UNIT_ISSET(u->slice)) {
1147                 Unit *s = UNIT_DEREF(u->slice);
1148
1149                 if (more)
1150                         /* There's more set now than before. We
1151                          * propagate the new mask to the parent's mask
1152                          * (not caring if it actually was valid or
1153                          * not). */
1154
1155                         s->cgroup_members_mask |= m;
1156
1157                 else
1158                         /* There's less set now than before (or we
1159                          * don't know), we need to recalculate
1160                          * everything, so let's invalidate the
1161                          * parent's members mask */
1162
1163                         s->cgroup_members_mask_valid = false;
1164
1165                 /* And now make sure that this change also hits our
1166                  * grandparents */
1167                 unit_update_cgroup_members_masks(s);
1168         }
1169 }
1170
1171 static const char *migrate_callback(CGroupMask mask, void *userdata) {
1172         Unit *u = userdata;
1173
1174         assert(mask != 0);
1175         assert(u);
1176
1177         while (u) {
1178                 if (u->cgroup_path &&
1179                     u->cgroup_realized &&
1180                     (u->cgroup_realized_mask & mask) == mask)
1181                         return u->cgroup_path;
1182
1183                 u = UNIT_DEREF(u->slice);
1184         }
1185
1186         return NULL;
1187 }
1188
1189 char *unit_default_cgroup_path(Unit *u) {
1190         _cleanup_free_ char *escaped = NULL, *slice = NULL;
1191         int r;
1192
1193         assert(u);
1194
1195         if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1196                 return strdup(u->manager->cgroup_root);
1197
1198         if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1199                 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1200                 if (r < 0)
1201                         return NULL;
1202         }
1203
1204         escaped = cg_escape(u->id);
1205         if (!escaped)
1206                 return NULL;
1207
1208         if (slice)
1209                 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1210                                escaped);
1211         else
1212                 return strjoin(u->manager->cgroup_root, "/", escaped);
1213 }
1214
1215 int unit_set_cgroup_path(Unit *u, const char *path) {
1216         _cleanup_free_ char *p = NULL;
1217         int r;
1218
1219         assert(u);
1220
1221         if (path) {
1222                 p = strdup(path);
1223                 if (!p)
1224                         return -ENOMEM;
1225         } else
1226                 p = NULL;
1227
1228         if (streq_ptr(u->cgroup_path, p))
1229                 return 0;
1230
1231         if (p) {
1232                 r = hashmap_put(u->manager->cgroup_unit, p, u);
1233                 if (r < 0)
1234                         return r;
1235         }
1236
1237         unit_release_cgroup(u);
1238
1239         u->cgroup_path = p;
1240         p = NULL;
1241
1242         return 1;
1243 }
1244
1245 int unit_watch_cgroup(Unit *u) {
1246         _cleanup_free_ char *events = NULL;
1247         int r;
1248
1249         assert(u);
1250
1251         if (!u->cgroup_path)
1252                 return 0;
1253
1254         if (u->cgroup_inotify_wd >= 0)
1255                 return 0;
1256
1257         /* Only applies to the unified hierarchy */
1258         r = cg_unified(SYSTEMD_CGROUP_CONTROLLER);
1259         if (r < 0)
1260                 return log_unit_error_errno(u, r, "Failed detect whether the unified hierarchy is used: %m");
1261         if (r == 0)
1262                 return 0;
1263
1264         /* Don't watch the root slice, it's pointless. */
1265         if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1266                 return 0;
1267
1268         r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1269         if (r < 0)
1270                 return log_oom();
1271
1272         r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
1273         if (r < 0)
1274                 return log_oom();
1275
1276         u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1277         if (u->cgroup_inotify_wd < 0) {
1278
1279                 if (errno == ENOENT) /* If the directory is already
1280                                       * gone we don't need to track
1281                                       * it, so this is not an error */
1282                         return 0;
1283
1284                 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1285         }
1286
1287         r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1288         if (r < 0)
1289                 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1290
1291         return 0;
1292 }
1293
1294 static int unit_create_cgroup(
1295                 Unit *u,
1296                 CGroupMask target_mask,
1297                 CGroupMask enable_mask) {
1298
1299         CGroupContext *c;
1300         int r;
1301
1302         assert(u);
1303
1304         c = unit_get_cgroup_context(u);
1305         if (!c)
1306                 return 0;
1307
1308         if (!u->cgroup_path) {
1309                 _cleanup_free_ char *path = NULL;
1310
1311                 path = unit_default_cgroup_path(u);
1312                 if (!path)
1313                         return log_oom();
1314
1315                 r = unit_set_cgroup_path(u, path);
1316                 if (r == -EEXIST)
1317                         return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1318                 if (r < 0)
1319                         return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1320         }
1321
1322         /* First, create our own group */
1323         r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
1324         if (r < 0)
1325                 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1326
1327         /* Start watching it */
1328         (void) unit_watch_cgroup(u);
1329
1330         /* Enable all controllers we need */
1331         r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1332         if (r < 0)
1333                 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
1334
1335         /* Keep track that this is now realized */
1336         u->cgroup_realized = true;
1337         u->cgroup_realized_mask = target_mask;
1338         u->cgroup_enabled_mask = enable_mask;
1339
1340         if (u->type != UNIT_SLICE && !c->delegate) {
1341
1342                 /* Then, possibly move things over, but not if
1343                  * subgroups may contain processes, which is the case
1344                  * for slice and delegation units. */
1345                 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1346                 if (r < 0)
1347                         log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
1348         }
1349
1350         return 0;
1351 }
1352
1353 int unit_attach_pids_to_cgroup(Unit *u) {
1354         int r;
1355         assert(u);
1356
1357         r = unit_realize_cgroup(u);
1358         if (r < 0)
1359                 return r;
1360
1361         r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
1362         if (r < 0)
1363                 return r;
1364
1365         return 0;
1366 }
1367
1368 static void cgroup_xattr_apply(Unit *u) {
1369         char ids[SD_ID128_STRING_MAX];
1370         int r;
1371
1372         assert(u);
1373
1374         if (!MANAGER_IS_SYSTEM(u->manager))
1375                 return;
1376
1377         if (sd_id128_is_null(u->invocation_id))
1378                 return;
1379
1380         r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1381                          "trusted.invocation_id",
1382                          sd_id128_to_string(u->invocation_id, ids), 32,
1383                          0);
1384         if (r < 0)
1385                 log_unit_warning_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
1386 }
1387
1388 static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask enable_mask) {
1389         assert(u);
1390
1391         return u->cgroup_realized && u->cgroup_realized_mask == target_mask && u->cgroup_enabled_mask == enable_mask;
1392 }
1393
1394 /* Check if necessary controllers and attributes for a unit are in place.
1395  *
1396  * If so, do nothing.
1397  * If not, create paths, move processes over, and set attributes.
1398  *
1399  * Returns 0 on success and < 0 on failure. */
1400 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
1401         CGroupMask target_mask, enable_mask;
1402         int r;
1403
1404         assert(u);
1405
1406         if (u->in_cgroup_queue) {
1407                 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
1408                 u->in_cgroup_queue = false;
1409         }
1410
1411         target_mask = unit_get_target_mask(u);
1412         enable_mask = unit_get_enable_mask(u);
1413
1414         if (unit_has_mask_realized(u, target_mask, enable_mask))
1415                 return 0;
1416
1417         /* First, realize parents */
1418         if (UNIT_ISSET(u->slice)) {
1419                 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
1420                 if (r < 0)
1421                         return r;
1422         }
1423
1424         /* And then do the real work */
1425         r = unit_create_cgroup(u, target_mask, enable_mask);
1426         if (r < 0)
1427                 return r;
1428
1429         /* Finally, apply the necessary attributes. */
1430         cgroup_context_apply(u, target_mask, state);
1431         cgroup_xattr_apply(u);
1432
1433         return 0;
1434 }
1435
1436 static void unit_add_to_cgroup_queue(Unit *u) {
1437
1438         if (u->in_cgroup_queue)
1439                 return;
1440
1441         LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
1442         u->in_cgroup_queue = true;
1443 }
1444
1445 unsigned manager_dispatch_cgroup_queue(Manager *m) {
1446         ManagerState state;
1447         unsigned n = 0;
1448         Unit *i;
1449         int r;
1450
1451         state = manager_state(m);
1452
1453         while ((i = m->cgroup_queue)) {
1454                 assert(i->in_cgroup_queue);
1455
1456                 r = unit_realize_cgroup_now(i, state);
1457                 if (r < 0)
1458                         log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
1459
1460                 n++;
1461         }
1462
1463         return n;
1464 }
1465
1466 static void unit_queue_siblings(Unit *u) {
1467         Unit *slice;
1468
1469         /* This adds the siblings of the specified unit and the
1470          * siblings of all parent units to the cgroup queue. (But
1471          * neither the specified unit itself nor the parents.) */
1472
1473         while ((slice = UNIT_DEREF(u->slice))) {
1474                 Iterator i;
1475                 Unit *m;
1476
1477                 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
1478                         if (m == u)
1479                                 continue;
1480
1481                         /* Skip units that have a dependency on the slice
1482                          * but aren't actually in it. */
1483                         if (UNIT_DEREF(m->slice) != slice)
1484                                 continue;
1485
1486                         /* No point in doing cgroup application for units
1487                          * without active processes. */
1488                         if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1489                                 continue;
1490
1491                         /* If the unit doesn't need any new controllers
1492                          * and has current ones realized, it doesn't need
1493                          * any changes. */
1494                         if (unit_has_mask_realized(m, unit_get_target_mask(m), unit_get_enable_mask(m)))
1495                                 continue;
1496
1497                         unit_add_to_cgroup_queue(m);
1498                 }
1499
1500                 u = slice;
1501         }
1502 }
1503
1504 int unit_realize_cgroup(Unit *u) {
1505         assert(u);
1506
1507         if (!UNIT_HAS_CGROUP_CONTEXT(u))
1508                 return 0;
1509
1510         /* So, here's the deal: when realizing the cgroups for this
1511          * unit, we need to first create all parents, but there's more
1512          * actually: for the weight-based controllers we also need to
1513          * make sure that all our siblings (i.e. units that are in the
1514          * same slice as we are) have cgroups, too. Otherwise, things
1515          * would become very uneven as each of their processes would
1516          * get as much resources as all our group together. This call
1517          * will synchronously create the parent cgroups, but will
1518          * defer work on the siblings to the next event loop
1519          * iteration. */
1520
1521         /* Add all sibling slices to the cgroup queue. */
1522         unit_queue_siblings(u);
1523
1524         /* And realize this one now (and apply the values) */
1525         return unit_realize_cgroup_now(u, manager_state(u->manager));
1526 }
1527
1528 void unit_release_cgroup(Unit *u) {
1529         assert(u);
1530
1531         /* Forgets all cgroup details for this cgroup */
1532
1533         if (u->cgroup_path) {
1534                 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1535                 u->cgroup_path = mfree(u->cgroup_path);
1536         }
1537
1538         if (u->cgroup_inotify_wd >= 0) {
1539                 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1540                         log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1541
1542                 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1543                 u->cgroup_inotify_wd = -1;
1544         }
1545 }
1546
1547 void unit_prune_cgroup(Unit *u) {
1548         int r;
1549         bool is_root_slice;
1550
1551         assert(u);
1552
1553         /* Removes the cgroup, if empty and possible, and stops watching it. */
1554
1555         if (!u->cgroup_path)
1556                 return;
1557
1558         (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1559
1560         is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1561
1562         r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1563         if (r < 0) {
1564                 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1565                 return;
1566         }
1567
1568         if (is_root_slice)
1569                 return;
1570
1571         unit_release_cgroup(u);
1572
1573         u->cgroup_realized = false;
1574         u->cgroup_realized_mask = 0;
1575         u->cgroup_enabled_mask = 0;
1576 }
1577
1578 int unit_search_main_pid(Unit *u, pid_t *ret) {
1579         _cleanup_fclose_ FILE *f = NULL;
1580         pid_t pid = 0, npid, mypid;
1581         int r;
1582
1583         assert(u);
1584         assert(ret);
1585
1586         if (!u->cgroup_path)
1587                 return -ENXIO;
1588
1589         r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1590         if (r < 0)
1591                 return r;
1592
1593         mypid = getpid();
1594         while (cg_read_pid(f, &npid) > 0)  {
1595                 pid_t ppid;
1596
1597                 if (npid == pid)
1598                         continue;
1599
1600                 /* Ignore processes that aren't our kids */
1601                 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1602                         continue;
1603
1604                 if (pid != 0)
1605                         /* Dang, there's more than one daemonized PID
1606                         in this group, so we don't know what process
1607                         is the main process. */
1608
1609                         return -ENODATA;
1610
1611                 pid = npid;
1612         }
1613
1614         *ret = pid;
1615         return 0;
1616 }
1617
1618 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1619         _cleanup_closedir_ DIR *d = NULL;
1620         _cleanup_fclose_ FILE *f = NULL;
1621         int ret = 0, r;
1622
1623         assert(u);
1624         assert(path);
1625
1626         r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1627         if (r < 0)
1628                 ret = r;
1629         else {
1630                 pid_t pid;
1631
1632                 while ((r = cg_read_pid(f, &pid)) > 0) {
1633                         r = unit_watch_pid(u, pid);
1634                         if (r < 0 && ret >= 0)
1635                                 ret = r;
1636                 }
1637
1638                 if (r < 0 && ret >= 0)
1639                         ret = r;
1640         }
1641
1642         r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1643         if (r < 0) {
1644                 if (ret >= 0)
1645                         ret = r;
1646         } else {
1647                 char *fn;
1648
1649                 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1650                         _cleanup_free_ char *p = NULL;
1651
1652                         p = strjoin(path, "/", fn);
1653                         free(fn);
1654
1655                         if (!p)
1656                                 return -ENOMEM;
1657
1658                         r = unit_watch_pids_in_path(u, p);
1659                         if (r < 0 && ret >= 0)
1660                                 ret = r;
1661                 }
1662
1663                 if (r < 0 && ret >= 0)
1664                         ret = r;
1665         }
1666
1667         return ret;
1668 }
1669
1670 int unit_watch_all_pids(Unit *u) {
1671         assert(u);
1672
1673         /* Adds all PIDs from our cgroup to the set of PIDs we
1674          * watch. This is a fallback logic for cases where we do not
1675          * get reliable cgroup empty notifications: we try to use
1676          * SIGCHLD as replacement. */
1677
1678         if (!u->cgroup_path)
1679                 return -ENOENT;
1680
1681         if (cg_unified(SYSTEMD_CGROUP_CONTROLLER) > 0) /* On unified we can use proper notifications */
1682                 return 0;
1683
1684         return unit_watch_pids_in_path(u, u->cgroup_path);
1685 }
1686
1687 int unit_notify_cgroup_empty(Unit *u) {
1688         int r;
1689
1690         assert(u);
1691
1692         if (!u->cgroup_path)
1693                 return 0;
1694
1695         r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1696         if (r <= 0)
1697                 return r;
1698
1699         unit_add_to_gc_queue(u);
1700
1701         if (UNIT_VTABLE(u)->notify_cgroup_empty)
1702                 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1703
1704         return 0;
1705 }
1706
1707 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1708         Manager *m = userdata;
1709
1710         assert(s);
1711         assert(fd >= 0);
1712         assert(m);
1713
1714         for (;;) {
1715                 union inotify_event_buffer buffer;
1716                 struct inotify_event *e;
1717                 ssize_t l;
1718
1719                 l = read(fd, &buffer, sizeof(buffer));
1720                 if (l < 0) {
1721                         if (errno == EINTR || errno == EAGAIN)
1722                                 return 0;
1723
1724                         return log_error_errno(errno, "Failed to read control group inotify events: %m");
1725                 }
1726
1727                 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1728                         Unit *u;
1729
1730                         if (e->wd < 0)
1731                                 /* Queue overflow has no watch descriptor */
1732                                 continue;
1733
1734                         if (e->mask & IN_IGNORED)
1735                                 /* The watch was just removed */
1736                                 continue;
1737
1738                         u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1739                         if (!u) /* Not that inotify might deliver
1740                                  * events for a watch even after it
1741                                  * was removed, because it was queued
1742                                  * before the removal. Let's ignore
1743                                  * this here safely. */
1744                                 continue;
1745
1746                         (void) unit_notify_cgroup_empty(u);
1747                 }
1748         }
1749 }
1750 #endif // 0
1751
1752 int manager_setup_cgroup(Manager *m) {
1753         _cleanup_free_ char *path = NULL;
1754         CGroupController c;
1755         int r, all_unified, systemd_unified;
1756         char *e;
1757
1758         assert(m);
1759
1760         /* 1. Determine hierarchy */
1761         m->cgroup_root = mfree(m->cgroup_root);
1762         r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
1763         if (r < 0)
1764                 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
1765
1766 #if 0 /// elogind does not support systemd scopes and slices
1767         /* Chop off the init scope, if we are already located in it */
1768         e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1769
1770         /* LEGACY: Also chop off the system slice if we are in
1771          * it. This is to support live upgrades from older systemd
1772          * versions where PID 1 was moved there. Also see
1773          * cg_get_root_path(). */
1774         if (!e && MANAGER_IS_SYSTEM(m)) {
1775                 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
1776                 if (!e)
1777                         e = endswith(m->cgroup_root, "/system"); /* even more legacy */
1778         }
1779         if (e)
1780                 *e = 0;
1781 #endif // 0
1782
1783         /* And make sure to store away the root value without trailing
1784          * slash, even for the root dir, so that we can easily prepend
1785          * it everywhere. */
1786         while ((e = endswith(m->cgroup_root, "/")))
1787                 *e = 0;
1788         log_debug_elogind("Cgroup Controller \"%s\" -> root \"%s\"",
1789                           SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root);
1790
1791         /* 2. Show data */
1792         r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
1793         if (r < 0)
1794                 return log_error_errno(r, "Cannot find cgroup mount point: %m");
1795
1796         all_unified = cg_all_unified();
1797         systemd_unified = cg_unified(SYSTEMD_CGROUP_CONTROLLER);
1798
1799         if (all_unified < 0 || systemd_unified < 0)
1800                 return log_error_errno(all_unified < 0 ? all_unified : systemd_unified,
1801                                        "Couldn't determine if we are running in the unified hierarchy: %m");
1802
1803         if (all_unified > 0)
1804                 log_debug("Unified cgroup hierarchy is located at %s.", path);
1805         else if (systemd_unified > 0)
1806                 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
1807         else
1808                 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
1809
1810         if (!m->test_run) {
1811                 const char *scope_path;
1812
1813                 /* 3. Install agent */
1814                 if (systemd_unified) {
1815
1816                         /* In the unified hierarchy we can get
1817                          * cgroup empty notifications via inotify. */
1818
1819 #if 0 /// elogind does not support the unified hierarchy, yet.
1820                         m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1821                         safe_close(m->cgroup_inotify_fd);
1822
1823                         m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
1824                         if (m->cgroup_inotify_fd < 0)
1825                                 return log_error_errno(errno, "Failed to create control group inotify object: %m");
1826
1827                         r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
1828                         if (r < 0)
1829                                 return log_error_errno(r, "Failed to watch control group inotify object: %m");
1830
1831                         /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
1832                          * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
1833                         r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-5);
1834                         if (r < 0)
1835                                 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
1836
1837                         (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
1838
1839 #else
1840                         return log_error_errno(EOPNOTSUPP, "Unified cgroup hierarchy not supported: %m");
1841 #endif // 0
1842                 } else if (MANAGER_IS_SYSTEM(m)) {
1843
1844                         /* On the legacy hierarchy we only get
1845                          * notifications via cgroup agents. (Which
1846                          * isn't really reliable, since it does not
1847                          * generate events when control groups with
1848                          * children run empty. */
1849
1850                         r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
1851                         if (r < 0)
1852                                 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
1853                         else if (r > 0)
1854                                 log_debug("Installed release agent.");
1855                         else if (r == 0)
1856                                 log_debug("Release agent already installed.");
1857                 }
1858
1859 #if 0 /// elogind is not meant to run in systemd init scope
1860                 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1861                 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1862                 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1863 #else
1864                 if (streq(SYSTEMD_CGROUP_CONTROLLER, "name=elogind"))
1865                         // we are our own cgroup controller
1866                         scope_path = strjoina("");
1867                 else if (streq(m->cgroup_root, "/elogind"))
1868                         // root already is our cgroup
1869                         scope_path = strjoina(m->cgroup_root);
1870                 else
1871                         // we have to create our own group
1872                         scope_path = strjoina(m->cgroup_root, "/elogind");
1873                 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1874 #endif // 0
1875                 if (r < 0)
1876                         return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
1877                 log_debug_elogind("Created control group \"%s\"", scope_path);
1878
1879                 /* also, move all other userspace processes remaining
1880                  * in the root cgroup into that scope. */
1881                 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1882                 if (r < 0)
1883                         log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
1884
1885                 /* 5. And pin it, so that it cannot be unmounted */
1886                 safe_close(m->pin_cgroupfs_fd);
1887                 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
1888                 if (m->pin_cgroupfs_fd < 0)
1889                         return log_error_errno(errno, "Failed to open pin file: %m");
1890
1891                 /* 6.  Always enable hierarchical support if it exists... */
1892                 if (!all_unified)
1893                         (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1894         }
1895
1896         /* 7. Figure out which controllers are supported */
1897         r = cg_mask_supported(&m->cgroup_supported);
1898         if (r < 0)
1899                 return log_error_errno(r, "Failed to determine supported controllers: %m");
1900
1901         for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
1902                 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
1903
1904         return 0;
1905 }
1906
1907 void manager_shutdown_cgroup(Manager *m, bool delete) {
1908         assert(m);
1909
1910         /* We can't really delete the group, since we are in it. But
1911          * let's trim it. */
1912         if (delete && m->cgroup_root)
1913                 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
1914
1915 #if 0 /// elogind does not support the unified hierarchy, yet.
1916         m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
1917
1918         m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1919         m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
1920 #endif // 0
1921
1922         m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
1923
1924         m->cgroup_root = mfree(m->cgroup_root);
1925 }
1926
1927 #if 0 /// UNNEEDED by elogind
1928 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
1929         char *p;
1930         Unit *u;
1931
1932         assert(m);
1933         assert(cgroup);
1934
1935         u = hashmap_get(m->cgroup_unit, cgroup);
1936         if (u)
1937                 return u;
1938
1939         p = strdupa(cgroup);
1940         for (;;) {
1941                 char *e;
1942
1943                 e = strrchr(p, '/');
1944                 if (!e || e == p)
1945                         return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
1946
1947                 *e = 0;
1948
1949                 u = hashmap_get(m->cgroup_unit, p);
1950                 if (u)
1951                         return u;
1952         }
1953 }
1954
1955 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
1956         _cleanup_free_ char *cgroup = NULL;
1957         int r;
1958
1959         assert(m);
1960
1961         if (pid <= 0)
1962                 return NULL;
1963
1964         r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
1965         if (r < 0)
1966                 return NULL;
1967
1968         return manager_get_unit_by_cgroup(m, cgroup);
1969 }
1970
1971 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
1972         Unit *u;
1973
1974         assert(m);
1975
1976         if (pid <= 0)
1977                 return NULL;
1978
1979         if (pid == 1)
1980                 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
1981
1982         u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
1983         if (u)
1984                 return u;
1985
1986         u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
1987         if (u)
1988                 return u;
1989
1990         return manager_get_unit_by_pid_cgroup(m, pid);
1991 }
1992 #endif // 0
1993
1994 #if 0 /// elogind must substitute this with its own variant
1995 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1996         Unit *u;
1997
1998         assert(m);
1999         assert(cgroup);
2000
2001         log_debug("Got cgroup empty notification for: %s", cgroup);
2002
2003         u = manager_get_unit_by_cgroup(m, cgroup);
2004         if (!u)
2005                 return 0;
2006
2007         return unit_notify_cgroup_empty(u);
2008 }
2009 #else
2010 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2011         Session *s;
2012
2013         assert(m);
2014         assert(cgroup);
2015
2016         log_debug("Got cgroup empty notification for: %s", cgroup);
2017
2018         s = hashmap_get(m->sessions, cgroup);
2019
2020         if (s) {
2021                 session_finalize(s);
2022                 session_free(s);
2023         } else
2024                 log_warning("Session not found: %s", cgroup);
2025
2026         return 0;
2027 }
2028 #endif // 0
2029 #if 0 /// UNNEEDED by elogind
2030 int unit_get_memory_current(Unit *u, uint64_t *ret) {
2031         _cleanup_free_ char *v = NULL;
2032         int r;
2033
2034         assert(u);
2035         assert(ret);
2036
2037         if (!u->cgroup_path)
2038                 return -ENODATA;
2039
2040         if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
2041                 return -ENODATA;
2042
2043         if (cg_all_unified() <= 0)
2044                 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
2045         else
2046                 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
2047         if (r == -ENOENT)
2048                 return -ENODATA;
2049         if (r < 0)
2050                 return r;
2051
2052         return safe_atou64(v, ret);
2053 }
2054
2055 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2056         _cleanup_free_ char *v = NULL;
2057         int r;
2058
2059         assert(u);
2060         assert(ret);
2061
2062         if (!u->cgroup_path)
2063                 return -ENODATA;
2064
2065         if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2066                 return -ENODATA;
2067
2068         r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2069         if (r == -ENOENT)
2070                 return -ENODATA;
2071         if (r < 0)
2072                 return r;
2073
2074         return safe_atou64(v, ret);
2075 }
2076
2077 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2078         _cleanup_free_ char *v = NULL;
2079         uint64_t ns;
2080         int r;
2081
2082         assert(u);
2083         assert(ret);
2084
2085         if (!u->cgroup_path)
2086                 return -ENODATA;
2087
2088         if (cg_all_unified() > 0) {
2089                 const char *keys[] = { "usage_usec", NULL };
2090                 _cleanup_free_ char *val = NULL;
2091                 uint64_t us;
2092
2093                 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2094                         return -ENODATA;
2095
2096                 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val);
2097                 if (r < 0)
2098                         return r;
2099
2100                 r = safe_atou64(val, &us);
2101                 if (r < 0)
2102                         return r;
2103
2104                 ns = us * NSEC_PER_USEC;
2105         } else {
2106                 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2107                         return -ENODATA;
2108
2109                 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2110                 if (r == -ENOENT)
2111                         return -ENODATA;
2112                 if (r < 0)
2113                         return r;
2114
2115                 r = safe_atou64(v, &ns);
2116                 if (r < 0)
2117                         return r;
2118         }
2119
2120         *ret = ns;
2121         return 0;
2122 }
2123
2124 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2125         nsec_t ns;
2126         int r;
2127
2128         assert(u);
2129
2130         /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2131          * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2132          * call this function with a NULL return value. */
2133
2134         r = unit_get_cpu_usage_raw(u, &ns);
2135         if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2136                 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2137                  * cached value. */
2138
2139                 if (ret)
2140                         *ret = u->cpu_usage_last;
2141                 return 0;
2142         }
2143         if (r < 0)
2144                 return r;
2145
2146         if (ns > u->cpu_usage_base)
2147                 ns -= u->cpu_usage_base;
2148         else
2149                 ns = 0;
2150
2151         u->cpu_usage_last = ns;
2152         if (ret)
2153                 *ret = ns;
2154
2155         return 0;
2156 }
2157
2158 int unit_reset_cpu_usage(Unit *u) {
2159         nsec_t ns;
2160         int r;
2161
2162         assert(u);
2163
2164         u->cpu_usage_last = NSEC_INFINITY;
2165
2166         r = unit_get_cpu_usage_raw(u, &ns);
2167         if (r < 0) {
2168                 u->cpu_usage_base = 0;
2169                 return r;
2170         }
2171
2172         u->cpu_usage_base = ns;
2173         return 0;
2174 }
2175
2176 bool unit_cgroup_delegate(Unit *u) {
2177         CGroupContext *c;
2178
2179         assert(u);
2180
2181         c = unit_get_cgroup_context(u);
2182         if (!c)
2183                 return false;
2184
2185         return c->delegate;
2186 }
2187
2188 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2189         assert(u);
2190
2191         if (!UNIT_HAS_CGROUP_CONTEXT(u))
2192                 return;
2193
2194         if (m == 0)
2195                 return;
2196
2197         /* always invalidate compat pairs together */
2198         if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2199                 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2200
2201         if ((u->cgroup_realized_mask & m) == 0)
2202                 return;
2203
2204         u->cgroup_realized_mask &= ~m;
2205         unit_add_to_cgroup_queue(u);
2206 }
2207
2208 void manager_invalidate_startup_units(Manager *m) {
2209         Iterator i;
2210         Unit *u;
2211
2212         assert(m);
2213
2214         SET_FOREACH(u, m->startup_units, i)
2215                 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
2216 }
2217
2218 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2219         [CGROUP_AUTO] = "auto",
2220         [CGROUP_CLOSED] = "closed",
2221         [CGROUP_STRICT] = "strict",
2222 };
2223
2224 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);
2225 #endif // 0