#if 0 /// UNNEEDED by elogind
int unit_get_memory_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->memory_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
int unit_get_tasks_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->tasks_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
}
int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
+ CGroupContext *cc;
nsec_t ns;
int r;
* started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
* call this function with a NULL return value. */
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->cpu_accounting)
+ return -ENODATA;
+
r = unit_get_cpu_usage_raw(u, &ns);
if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
/* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
CGroupIPAccountingMetric metric,
uint64_t *ret) {
+ CGroupContext *cc;
uint64_t value;
int fd, r;
assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
assert(ret);
+ /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
+ * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
+ * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
+ * filters. */
+ if (u->type == UNIT_SLICE)
+ return -ENODATA;
+
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->ip_accounting)
+ return -ENODATA;
+
fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
u->ip_accounting_ingress_map_fd :
u->ip_accounting_egress_map_fd;