blob: 2d0ef613ca070d0411f00d6dcf2eaf0d81a328ab [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <linux/fs_parser.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
19#include <trace/events/cgroup.h>
20
David Brazdil0f672f62019-12-10 10:32:29 +000021#define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
22
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023/*
24 * pidlists linger the following amount before being destroyed. The goal
25 * is avoiding frequent destruction in the middle of consecutive read calls
26 * Expiring in the middle is a performance problem not a correctness one.
27 * 1 sec should be enough.
28 */
29#define CGROUP_PIDLIST_DESTROY_DELAY HZ
30
31/* Controllers blocked by the commandline in v1 */
32static u16 cgroup_no_v1_mask;
33
David Brazdil0f672f62019-12-10 10:32:29 +000034/* disable named v1 mounts */
35static bool cgroup_no_v1_named;
36
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037/*
38 * pidlist destructions need to be flushed on cgroup destruction. Use a
39 * separate workqueue as flush domain.
40 */
41static struct workqueue_struct *cgroup_pidlist_destroy_wq;
42
43/*
44 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
45 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
46 */
47static DEFINE_SPINLOCK(release_agent_path_lock);
48
49bool cgroup1_ssid_disabled(int ssid)
50{
51 return cgroup_no_v1_mask & (1 << ssid);
52}
53
54/**
55 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
56 * @from: attach to all cgroups of a given task
57 * @tsk: the task to be attached
58 */
59int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
60{
61 struct cgroup_root *root;
62 int retval = 0;
63
64 mutex_lock(&cgroup_mutex);
65 percpu_down_write(&cgroup_threadgroup_rwsem);
66 for_each_root(root) {
67 struct cgroup *from_cgrp;
68
69 if (root == &cgrp_dfl_root)
70 continue;
71
72 spin_lock_irq(&css_set_lock);
73 from_cgrp = task_cgroup_from_root(from, root);
74 spin_unlock_irq(&css_set_lock);
75
76 retval = cgroup_attach_task(from_cgrp, tsk, false);
77 if (retval)
78 break;
79 }
80 percpu_up_write(&cgroup_threadgroup_rwsem);
81 mutex_unlock(&cgroup_mutex);
82
83 return retval;
84}
85EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
86
87/**
88 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
89 * @to: cgroup to which the tasks will be moved
90 * @from: cgroup in which the tasks currently reside
91 *
92 * Locking rules between cgroup_post_fork() and the migration path
93 * guarantee that, if a task is forking while being migrated, the new child
94 * is guaranteed to be either visible in the source cgroup after the
95 * parent's migration is complete or put into the target cgroup. No task
96 * can slip out of migration through forking.
97 */
98int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
99{
100 DEFINE_CGROUP_MGCTX(mgctx);
101 struct cgrp_cset_link *link;
102 struct css_task_iter it;
103 struct task_struct *task;
104 int ret;
105
106 if (cgroup_on_dfl(to))
107 return -EINVAL;
108
109 ret = cgroup_migrate_vet_dst(to);
110 if (ret)
111 return ret;
112
113 mutex_lock(&cgroup_mutex);
114
115 percpu_down_write(&cgroup_threadgroup_rwsem);
116
117 /* all tasks in @from are being moved, all csets are source */
118 spin_lock_irq(&css_set_lock);
119 list_for_each_entry(link, &from->cset_links, cset_link)
120 cgroup_migrate_add_src(link->cset, to, &mgctx);
121 spin_unlock_irq(&css_set_lock);
122
123 ret = cgroup_migrate_prepare_dst(&mgctx);
124 if (ret)
125 goto out_err;
126
127 /*
128 * Migrate tasks one-by-one until @from is empty. This fails iff
129 * ->can_attach() fails.
130 */
131 do {
132 css_task_iter_start(&from->self, 0, &it);
133
134 do {
135 task = css_task_iter_next(&it);
136 } while (task && (task->flags & PF_EXITING));
137
138 if (task)
139 get_task_struct(task);
140 css_task_iter_end(&it);
141
142 if (task) {
143 ret = cgroup_migrate(task, false, &mgctx);
144 if (!ret)
145 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
146 put_task_struct(task);
147 }
148 } while (task && !ret);
149out_err:
150 cgroup_migrate_finish(&mgctx);
151 percpu_up_write(&cgroup_threadgroup_rwsem);
152 mutex_unlock(&cgroup_mutex);
153 return ret;
154}
155
156/*
157 * Stuff for reading the 'tasks'/'procs' files.
158 *
159 * Reading this file can return large amounts of data if a cgroup has
160 * *lots* of attached tasks. So it may need several calls to read(),
161 * but we cannot guarantee that the information we produce is correct
162 * unless we produce it entirely atomically.
163 *
164 */
165
166/* which pidlist file are we talking about? */
167enum cgroup_filetype {
168 CGROUP_FILE_PROCS,
169 CGROUP_FILE_TASKS,
170};
171
172/*
173 * A pidlist is a list of pids that virtually represents the contents of one
174 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
175 * a pair (one each for procs, tasks) for each pid namespace that's relevant
176 * to the cgroup.
177 */
178struct cgroup_pidlist {
179 /*
180 * used to find which pidlist is wanted. doesn't change as long as
181 * this particular list stays in the list.
182 */
183 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
184 /* array of xids */
185 pid_t *list;
186 /* how many elements the above list has */
187 int length;
188 /* each of these stored in a list by its cgroup */
189 struct list_head links;
190 /* pointer to the cgroup we belong to, for list removal purposes */
191 struct cgroup *owner;
192 /* for delayed destruction */
193 struct delayed_work destroy_dwork;
194};
195
196/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197 * Used to destroy all pidlists lingering waiting for destroy timer. None
198 * should be left afterwards.
199 */
200void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
201{
202 struct cgroup_pidlist *l, *tmp_l;
203
204 mutex_lock(&cgrp->pidlist_mutex);
205 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
206 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
207 mutex_unlock(&cgrp->pidlist_mutex);
208
209 flush_workqueue(cgroup_pidlist_destroy_wq);
210 BUG_ON(!list_empty(&cgrp->pidlists));
211}
212
213static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
214{
215 struct delayed_work *dwork = to_delayed_work(work);
216 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
217 destroy_dwork);
218 struct cgroup_pidlist *tofree = NULL;
219
220 mutex_lock(&l->owner->pidlist_mutex);
221
222 /*
223 * Destroy iff we didn't get queued again. The state won't change
224 * as destroy_dwork can only be queued while locked.
225 */
226 if (!delayed_work_pending(dwork)) {
227 list_del(&l->links);
David Brazdil0f672f62019-12-10 10:32:29 +0000228 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 put_pid_ns(l->key.ns);
230 tofree = l;
231 }
232
233 mutex_unlock(&l->owner->pidlist_mutex);
234 kfree(tofree);
235}
236
237/*
238 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
239 * Returns the number of unique elements.
240 */
241static int pidlist_uniq(pid_t *list, int length)
242{
243 int src, dest = 1;
244
245 /*
246 * we presume the 0th element is unique, so i starts at 1. trivial
247 * edge cases first; no work needs to be done for either
248 */
249 if (length == 0 || length == 1)
250 return length;
251 /* src and dest walk down the list; dest counts unique elements */
252 for (src = 1; src < length; src++) {
253 /* find next unique element */
254 while (list[src] == list[src-1]) {
255 src++;
256 if (src == length)
257 goto after;
258 }
259 /* dest always points to where the next unique element goes */
260 list[dest] = list[src];
261 dest++;
262 }
263after:
264 return dest;
265}
266
267/*
268 * The two pid files - task and cgroup.procs - guaranteed that the result
269 * is sorted, which forced this whole pidlist fiasco. As pid order is
270 * different per namespace, each namespace needs differently sorted list,
271 * making it impossible to use, for example, single rbtree of member tasks
272 * sorted by task pointer. As pidlists can be fairly large, allocating one
273 * per open file is dangerous, so cgroup had to implement shared pool of
274 * pidlists keyed by cgroup and namespace.
275 */
276static int cmppid(const void *a, const void *b)
277{
278 return *(pid_t *)a - *(pid_t *)b;
279}
280
281static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
282 enum cgroup_filetype type)
283{
284 struct cgroup_pidlist *l;
285 /* don't need task_nsproxy() if we're looking at ourself */
286 struct pid_namespace *ns = task_active_pid_ns(current);
287
288 lockdep_assert_held(&cgrp->pidlist_mutex);
289
290 list_for_each_entry(l, &cgrp->pidlists, links)
291 if (l->key.type == type && l->key.ns == ns)
292 return l;
293 return NULL;
294}
295
296/*
297 * find the appropriate pidlist for our purpose (given procs vs tasks)
298 * returns with the lock on that pidlist already held, and takes care
299 * of the use count, or returns NULL with no locks held if we're out of
300 * memory.
301 */
302static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
303 enum cgroup_filetype type)
304{
305 struct cgroup_pidlist *l;
306
307 lockdep_assert_held(&cgrp->pidlist_mutex);
308
309 l = cgroup_pidlist_find(cgrp, type);
310 if (l)
311 return l;
312
313 /* entry not found; create a new one */
314 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
315 if (!l)
316 return l;
317
318 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
319 l->key.type = type;
320 /* don't need task_nsproxy() if we're looking at ourself */
321 l->key.ns = get_pid_ns(task_active_pid_ns(current));
322 l->owner = cgrp;
323 list_add(&l->links, &cgrp->pidlists);
324 return l;
325}
326
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327/*
328 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
329 */
330static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
331 struct cgroup_pidlist **lp)
332{
333 pid_t *array;
334 int length;
335 int pid, n = 0; /* used for populating the array */
336 struct css_task_iter it;
337 struct task_struct *tsk;
338 struct cgroup_pidlist *l;
339
340 lockdep_assert_held(&cgrp->pidlist_mutex);
341
342 /*
343 * If cgroup gets more users after we read count, we won't have
344 * enough space - tough. This race is indistinguishable to the
345 * caller from the case that the additional cgroup users didn't
346 * show up until sometime later on.
347 */
348 length = cgroup_task_count(cgrp);
David Brazdil0f672f62019-12-10 10:32:29 +0000349 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350 if (!array)
351 return -ENOMEM;
352 /* now, populate the array */
353 css_task_iter_start(&cgrp->self, 0, &it);
354 while ((tsk = css_task_iter_next(&it))) {
355 if (unlikely(n == length))
356 break;
357 /* get tgid or pid for procs or tasks file respectively */
358 if (type == CGROUP_FILE_PROCS)
359 pid = task_tgid_vnr(tsk);
360 else
361 pid = task_pid_vnr(tsk);
362 if (pid > 0) /* make sure to only use valid results */
363 array[n++] = pid;
364 }
365 css_task_iter_end(&it);
366 length = n;
367 /* now sort & (if procs) strip out duplicates */
368 sort(array, length, sizeof(pid_t), cmppid, NULL);
369 if (type == CGROUP_FILE_PROCS)
370 length = pidlist_uniq(array, length);
371
372 l = cgroup_pidlist_find_create(cgrp, type);
373 if (!l) {
David Brazdil0f672f62019-12-10 10:32:29 +0000374 kvfree(array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 return -ENOMEM;
376 }
377
378 /* store array, freeing old if necessary */
David Brazdil0f672f62019-12-10 10:32:29 +0000379 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380 l->list = array;
381 l->length = length;
382 *lp = l;
383 return 0;
384}
385
386/*
387 * seq_file methods for the tasks/procs files. The seq_file position is the
388 * next pid to display; the seq_file iterator is a pointer to the pid
389 * in the cgroup->l->list array.
390 */
391
392static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
393{
394 /*
395 * Initially we receive a position value that corresponds to
396 * one more than the last pid shown (or 0 on the first call or
397 * after a seek to the start). Use a binary-search to find the
398 * next pid to display, if any
399 */
400 struct kernfs_open_file *of = s->private;
401 struct cgroup *cgrp = seq_css(s)->cgroup;
402 struct cgroup_pidlist *l;
403 enum cgroup_filetype type = seq_cft(s)->private;
404 int index = 0, pid = *pos;
405 int *iter, ret;
406
407 mutex_lock(&cgrp->pidlist_mutex);
408
409 /*
410 * !NULL @of->priv indicates that this isn't the first start()
411 * after open. If the matching pidlist is around, we can use that.
412 * Look for it. Note that @of->priv can't be used directly. It
413 * could already have been destroyed.
414 */
415 if (of->priv)
416 of->priv = cgroup_pidlist_find(cgrp, type);
417
418 /*
419 * Either this is the first start() after open or the matching
420 * pidlist has been destroyed inbetween. Create a new one.
421 */
422 if (!of->priv) {
423 ret = pidlist_array_load(cgrp, type,
424 (struct cgroup_pidlist **)&of->priv);
425 if (ret)
426 return ERR_PTR(ret);
427 }
428 l = of->priv;
429
430 if (pid) {
431 int end = l->length;
432
433 while (index < end) {
434 int mid = (index + end) / 2;
435 if (l->list[mid] == pid) {
436 index = mid;
437 break;
438 } else if (l->list[mid] <= pid)
439 index = mid + 1;
440 else
441 end = mid;
442 }
443 }
444 /* If we're off the end of the array, we're done */
445 if (index >= l->length)
446 return NULL;
447 /* Update the abstract position to be the actual pid that we found */
448 iter = l->list + index;
449 *pos = *iter;
450 return iter;
451}
452
453static void cgroup_pidlist_stop(struct seq_file *s, void *v)
454{
455 struct kernfs_open_file *of = s->private;
456 struct cgroup_pidlist *l = of->priv;
457
458 if (l)
459 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
460 CGROUP_PIDLIST_DESTROY_DELAY);
461 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
462}
463
464static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
465{
466 struct kernfs_open_file *of = s->private;
467 struct cgroup_pidlist *l = of->priv;
468 pid_t *p = v;
469 pid_t *end = l->list + l->length;
470 /*
471 * Advance to the next pid in the array. If this goes off the
472 * end, we're done
473 */
474 p++;
475 if (p >= end) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200476 (*pos)++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477 return NULL;
478 } else {
479 *pos = *p;
480 return p;
481 }
482}
483
484static int cgroup_pidlist_show(struct seq_file *s, void *v)
485{
486 seq_printf(s, "%d\n", *(int *)v);
487
488 return 0;
489}
490
491static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
492 char *buf, size_t nbytes, loff_t off,
493 bool threadgroup)
494{
495 struct cgroup *cgrp;
496 struct task_struct *task;
497 const struct cred *cred, *tcred;
498 ssize_t ret;
499
500 cgrp = cgroup_kn_lock_live(of->kn, false);
501 if (!cgrp)
502 return -ENODEV;
503
504 task = cgroup_procs_write_start(buf, threadgroup);
505 ret = PTR_ERR_OR_ZERO(task);
506 if (ret)
507 goto out_unlock;
508
509 /*
510 * Even if we're attaching all tasks in the thread group, we only
511 * need to check permissions on one of them.
512 */
513 cred = current_cred();
514 tcred = get_task_cred(task);
515 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
516 !uid_eq(cred->euid, tcred->uid) &&
517 !uid_eq(cred->euid, tcred->suid))
518 ret = -EACCES;
519 put_cred(tcred);
520 if (ret)
521 goto out_finish;
522
523 ret = cgroup_attach_task(cgrp, task, threadgroup);
524
525out_finish:
526 cgroup_procs_write_finish(task);
527out_unlock:
528 cgroup_kn_unlock(of->kn);
529
530 return ret ?: nbytes;
531}
532
533static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
534 char *buf, size_t nbytes, loff_t off)
535{
536 return __cgroup1_procs_write(of, buf, nbytes, off, true);
537}
538
539static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
540 char *buf, size_t nbytes, loff_t off)
541{
542 return __cgroup1_procs_write(of, buf, nbytes, off, false);
543}
544
545static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
546 char *buf, size_t nbytes, loff_t off)
547{
548 struct cgroup *cgrp;
549
550 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
551
552 cgrp = cgroup_kn_lock_live(of->kn, false);
553 if (!cgrp)
554 return -ENODEV;
555 spin_lock(&release_agent_path_lock);
556 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
557 sizeof(cgrp->root->release_agent_path));
558 spin_unlock(&release_agent_path_lock);
559 cgroup_kn_unlock(of->kn);
560 return nbytes;
561}
562
563static int cgroup_release_agent_show(struct seq_file *seq, void *v)
564{
565 struct cgroup *cgrp = seq_css(seq)->cgroup;
566
567 spin_lock(&release_agent_path_lock);
568 seq_puts(seq, cgrp->root->release_agent_path);
569 spin_unlock(&release_agent_path_lock);
570 seq_putc(seq, '\n');
571 return 0;
572}
573
574static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
575{
576 seq_puts(seq, "0\n");
577 return 0;
578}
579
580static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
581 struct cftype *cft)
582{
583 return notify_on_release(css->cgroup);
584}
585
586static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
587 struct cftype *cft, u64 val)
588{
589 if (val)
590 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
591 else
592 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
593 return 0;
594}
595
596static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
597 struct cftype *cft)
598{
599 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
600}
601
602static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
603 struct cftype *cft, u64 val)
604{
605 if (val)
606 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
607 else
608 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
609 return 0;
610}
611
612/* cgroup core interface files for the legacy hierarchies */
613struct cftype cgroup1_base_files[] = {
614 {
615 .name = "cgroup.procs",
616 .seq_start = cgroup_pidlist_start,
617 .seq_next = cgroup_pidlist_next,
618 .seq_stop = cgroup_pidlist_stop,
619 .seq_show = cgroup_pidlist_show,
620 .private = CGROUP_FILE_PROCS,
621 .write = cgroup1_procs_write,
622 },
623 {
624 .name = "cgroup.clone_children",
625 .read_u64 = cgroup_clone_children_read,
626 .write_u64 = cgroup_clone_children_write,
627 },
628 {
629 .name = "cgroup.sane_behavior",
630 .flags = CFTYPE_ONLY_ON_ROOT,
631 .seq_show = cgroup_sane_behavior_show,
632 },
633 {
634 .name = "tasks",
635 .seq_start = cgroup_pidlist_start,
636 .seq_next = cgroup_pidlist_next,
637 .seq_stop = cgroup_pidlist_stop,
638 .seq_show = cgroup_pidlist_show,
639 .private = CGROUP_FILE_TASKS,
640 .write = cgroup1_tasks_write,
641 },
642 {
643 .name = "notify_on_release",
644 .read_u64 = cgroup_read_notify_on_release,
645 .write_u64 = cgroup_write_notify_on_release,
646 },
647 {
648 .name = "release_agent",
649 .flags = CFTYPE_ONLY_ON_ROOT,
650 .seq_show = cgroup_release_agent_show,
651 .write = cgroup_release_agent_write,
652 .max_write_len = PATH_MAX - 1,
653 },
654 { } /* terminate */
655};
656
657/* Display information about each subsystem and each hierarchy */
658int proc_cgroupstats_show(struct seq_file *m, void *v)
659{
660 struct cgroup_subsys *ss;
661 int i;
662
663 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
664 /*
665 * ideally we don't want subsystems moving around while we do this.
666 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
667 * subsys/hierarchy state.
668 */
669 mutex_lock(&cgroup_mutex);
670
671 for_each_subsys(ss, i)
672 seq_printf(m, "%s\t%d\t%d\t%d\n",
673 ss->legacy_name, ss->root->hierarchy_id,
674 atomic_read(&ss->root->nr_cgrps),
675 cgroup_ssid_enabled(i));
676
677 mutex_unlock(&cgroup_mutex);
678 return 0;
679}
680
681/**
682 * cgroupstats_build - build and fill cgroupstats
683 * @stats: cgroupstats to fill information into
684 * @dentry: A dentry entry belonging to the cgroup for which stats have
685 * been requested.
686 *
687 * Build and fill cgroupstats so that taskstats can export it to user
688 * space.
689 */
690int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
691{
692 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
693 struct cgroup *cgrp;
694 struct css_task_iter it;
695 struct task_struct *tsk;
696
697 /* it should be kernfs_node belonging to cgroupfs and is a directory */
698 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
699 kernfs_type(kn) != KERNFS_DIR)
700 return -EINVAL;
701
702 mutex_lock(&cgroup_mutex);
703
704 /*
705 * We aren't being called from kernfs and there's no guarantee on
706 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
707 * @kn->priv is RCU safe. Let's do the RCU dancing.
708 */
709 rcu_read_lock();
710 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
711 if (!cgrp || cgroup_is_dead(cgrp)) {
712 rcu_read_unlock();
713 mutex_unlock(&cgroup_mutex);
714 return -ENOENT;
715 }
716 rcu_read_unlock();
717
718 css_task_iter_start(&cgrp->self, 0, &it);
719 while ((tsk = css_task_iter_next(&it))) {
720 switch (tsk->state) {
721 case TASK_RUNNING:
722 stats->nr_running++;
723 break;
724 case TASK_INTERRUPTIBLE:
725 stats->nr_sleeping++;
726 break;
727 case TASK_UNINTERRUPTIBLE:
728 stats->nr_uninterruptible++;
729 break;
730 case TASK_STOPPED:
731 stats->nr_stopped++;
732 break;
733 default:
734 if (delayacct_is_task_waiting_on_io(tsk))
735 stats->nr_io_wait++;
736 break;
737 }
738 }
739 css_task_iter_end(&it);
740
741 mutex_unlock(&cgroup_mutex);
742 return 0;
743}
744
745void cgroup1_check_for_release(struct cgroup *cgrp)
746{
747 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
748 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
749 schedule_work(&cgrp->release_agent_work);
750}
751
752/*
753 * Notify userspace when a cgroup is released, by running the
754 * configured release agent with the name of the cgroup (path
755 * relative to the root of cgroup file system) as the argument.
756 *
757 * Most likely, this user command will try to rmdir this cgroup.
758 *
759 * This races with the possibility that some other task will be
760 * attached to this cgroup before it is removed, or that some other
761 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
762 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
763 * unused, and this cgroup will be reprieved from its death sentence,
764 * to continue to serve a useful existence. Next time it's released,
765 * we will get notified again, if it still has 'notify_on_release' set.
766 *
767 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
768 * means only wait until the task is successfully execve()'d. The
769 * separate release agent task is forked by call_usermodehelper(),
770 * then control in this thread returns here, without waiting for the
771 * release agent task. We don't bother to wait because the caller of
772 * this routine has no use for the exit status of the release agent
773 * task, so no sense holding our caller up for that.
774 */
775void cgroup1_release_agent(struct work_struct *work)
776{
777 struct cgroup *cgrp =
778 container_of(work, struct cgroup, release_agent_work);
779 char *pathbuf = NULL, *agentbuf = NULL;
780 char *argv[3], *envp[3];
781 int ret;
782
783 mutex_lock(&cgroup_mutex);
784
785 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
786 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
Olivier Deprez0e641232021-09-23 10:07:05 +0200787 if (!pathbuf || !agentbuf || !strlen(agentbuf))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000788 goto out;
789
790 spin_lock_irq(&css_set_lock);
791 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
792 spin_unlock_irq(&css_set_lock);
793 if (ret < 0 || ret >= PATH_MAX)
794 goto out;
795
796 argv[0] = agentbuf;
797 argv[1] = pathbuf;
798 argv[2] = NULL;
799
800 /* minimal command environment */
801 envp[0] = "HOME=/";
802 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
803 envp[2] = NULL;
804
805 mutex_unlock(&cgroup_mutex);
806 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
807 goto out_free;
808out:
809 mutex_unlock(&cgroup_mutex);
810out_free:
811 kfree(agentbuf);
812 kfree(pathbuf);
813}
814
815/*
816 * cgroup_rename - Only allow simple rename of directories in place.
817 */
818static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
819 const char *new_name_str)
820{
821 struct cgroup *cgrp = kn->priv;
822 int ret;
823
Olivier Deprez0e641232021-09-23 10:07:05 +0200824 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
825 if (strchr(new_name_str, '\n'))
826 return -EINVAL;
827
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828 if (kernfs_type(kn) != KERNFS_DIR)
829 return -ENOTDIR;
830 if (kn->parent != new_parent)
831 return -EIO;
832
833 /*
834 * We're gonna grab cgroup_mutex which nests outside kernfs
835 * active_ref. kernfs_rename() doesn't require active_ref
836 * protection. Break them before grabbing cgroup_mutex.
837 */
838 kernfs_break_active_protection(new_parent);
839 kernfs_break_active_protection(kn);
840
841 mutex_lock(&cgroup_mutex);
842
843 ret = kernfs_rename(kn, new_parent, new_name_str);
844 if (!ret)
845 TRACE_CGROUP_PATH(rename, cgrp);
846
847 mutex_unlock(&cgroup_mutex);
848
849 kernfs_unbreak_active_protection(kn);
850 kernfs_unbreak_active_protection(new_parent);
851 return ret;
852}
853
854static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
855{
856 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
857 struct cgroup_subsys *ss;
858 int ssid;
859
860 for_each_subsys(ss, ssid)
861 if (root->subsys_mask & (1 << ssid))
862 seq_show_option(seq, ss->legacy_name, NULL);
863 if (root->flags & CGRP_ROOT_NOPREFIX)
864 seq_puts(seq, ",noprefix");
865 if (root->flags & CGRP_ROOT_XATTR)
866 seq_puts(seq, ",xattr");
867 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
868 seq_puts(seq, ",cpuset_v2_mode");
869
870 spin_lock(&release_agent_path_lock);
871 if (strlen(root->release_agent_path))
872 seq_show_option(seq, "release_agent",
873 root->release_agent_path);
874 spin_unlock(&release_agent_path_lock);
875
876 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
877 seq_puts(seq, ",clone_children");
878 if (strlen(root->name))
879 seq_show_option(seq, "name", root->name);
880 return 0;
881}
882
David Brazdil0f672f62019-12-10 10:32:29 +0000883enum cgroup1_param {
884 Opt_all,
885 Opt_clone_children,
886 Opt_cpuset_v2_mode,
887 Opt_name,
888 Opt_none,
889 Opt_noprefix,
890 Opt_release_agent,
891 Opt_xattr,
892};
893
894static const struct fs_parameter_spec cgroup1_param_specs[] = {
895 fsparam_flag ("all", Opt_all),
896 fsparam_flag ("clone_children", Opt_clone_children),
897 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
898 fsparam_string("name", Opt_name),
899 fsparam_flag ("none", Opt_none),
900 fsparam_flag ("noprefix", Opt_noprefix),
901 fsparam_string("release_agent", Opt_release_agent),
902 fsparam_flag ("xattr", Opt_xattr),
903 {}
904};
905
906const struct fs_parameter_description cgroup1_fs_parameters = {
907 .name = "cgroup1",
908 .specs = cgroup1_param_specs,
909};
910
911int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912{
David Brazdil0f672f62019-12-10 10:32:29 +0000913 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 struct cgroup_subsys *ss;
David Brazdil0f672f62019-12-10 10:32:29 +0000915 struct fs_parse_result result;
916 int opt, i;
917
918 opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
919 if (opt == -ENOPARAM) {
920 if (strcmp(param->key, "source") == 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200921 if (param->type != fs_value_is_string)
922 return invalf(fc, "Non-string source");
923 if (fc->source)
924 return invalf(fc, "Multiple sources not supported");
David Brazdil0f672f62019-12-10 10:32:29 +0000925 fc->source = param->string;
926 param->string = NULL;
927 return 0;
928 }
929 for_each_subsys(ss, i) {
930 if (strcmp(param->key, ss->legacy_name))
931 continue;
932 ctx->subsys_mask |= (1 << i);
933 return 0;
934 }
935 return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
936 }
937 if (opt < 0)
938 return opt;
939
940 switch (opt) {
941 case Opt_none:
942 /* Explicitly have no subsystems */
943 ctx->none = true;
944 break;
945 case Opt_all:
946 ctx->all_ss = true;
947 break;
948 case Opt_noprefix:
949 ctx->flags |= CGRP_ROOT_NOPREFIX;
950 break;
951 case Opt_clone_children:
952 ctx->cpuset_clone_children = true;
953 break;
954 case Opt_cpuset_v2_mode:
955 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
956 break;
957 case Opt_xattr:
958 ctx->flags |= CGRP_ROOT_XATTR;
959 break;
960 case Opt_release_agent:
961 /* Specifying two release agents is forbidden */
962 if (ctx->release_agent)
963 return cg_invalf(fc, "cgroup1: release_agent respecified");
964 ctx->release_agent = param->string;
965 param->string = NULL;
966 break;
967 case Opt_name:
968 /* blocked by boot param? */
969 if (cgroup_no_v1_named)
970 return -ENOENT;
971 /* Can't specify an empty name */
972 if (!param->size)
973 return cg_invalf(fc, "cgroup1: Empty name");
974 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
975 return cg_invalf(fc, "cgroup1: Name too long");
976 /* Must match [\w.-]+ */
977 for (i = 0; i < param->size; i++) {
978 char c = param->string[i];
979 if (isalnum(c))
980 continue;
981 if ((c == '.') || (c == '-') || (c == '_'))
982 continue;
983 return cg_invalf(fc, "cgroup1: Invalid name");
984 }
985 /* Specifying two names is forbidden */
986 if (ctx->name)
987 return cg_invalf(fc, "cgroup1: name respecified");
988 ctx->name = param->string;
989 param->string = NULL;
990 break;
991 }
992 return 0;
993}
994
995static int check_cgroupfs_options(struct fs_context *fc)
996{
997 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
998 u16 mask = U16_MAX;
999 u16 enabled = 0;
1000 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001001 int i;
1002
1003#ifdef CONFIG_CPUSETS
1004 mask = ~((u16)1 << cpuset_cgrp_id);
1005#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001006 for_each_subsys(ss, i)
1007 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1008 enabled |= 1 << i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001009
David Brazdil0f672f62019-12-10 10:32:29 +00001010 ctx->subsys_mask &= enabled;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011
1012 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001013 * In absense of 'none', 'name=' or subsystem name options,
1014 * let's default to 'all'.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001015 */
David Brazdil0f672f62019-12-10 10:32:29 +00001016 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1017 ctx->all_ss = true;
1018
1019 if (ctx->all_ss) {
1020 /* Mutually exclusive option 'all' + subsystem name */
1021 if (ctx->subsys_mask)
1022 return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
1023 /* 'all' => select all the subsystems */
1024 ctx->subsys_mask = enabled;
1025 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026
1027 /*
1028 * We either have to specify by name or by subsystems. (So all
1029 * empty hierarchies must have a name).
1030 */
David Brazdil0f672f62019-12-10 10:32:29 +00001031 if (!ctx->subsys_mask && !ctx->name)
1032 return cg_invalf(fc, "cgroup1: Need name or subsystem set");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033
1034 /*
1035 * Option noprefix was introduced just for backward compatibility
1036 * with the old cpuset, so we allow noprefix only if mounting just
1037 * the cpuset subsystem.
1038 */
David Brazdil0f672f62019-12-10 10:32:29 +00001039 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1040 return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 /* Can't specify "none" and some subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001043 if (ctx->subsys_mask && ctx->none)
1044 return cg_invalf(fc, "cgroup1: none used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001045
1046 return 0;
1047}
1048
David Brazdil0f672f62019-12-10 10:32:29 +00001049int cgroup1_reconfigure(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001050{
David Brazdil0f672f62019-12-10 10:32:29 +00001051 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1052 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001053 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
David Brazdil0f672f62019-12-10 10:32:29 +00001054 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001055 u16 added_mask, removed_mask;
1056
1057 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1058
1059 /* See what subsystems are wanted */
David Brazdil0f672f62019-12-10 10:32:29 +00001060 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001061 if (ret)
1062 goto out_unlock;
1063
David Brazdil0f672f62019-12-10 10:32:29 +00001064 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001065 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1066 task_tgid_nr(current), current->comm);
1067
David Brazdil0f672f62019-12-10 10:32:29 +00001068 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1069 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001070
1071 /* Don't allow flags or name to change at remount */
David Brazdil0f672f62019-12-10 10:32:29 +00001072 if ((ctx->flags ^ root->flags) ||
1073 (ctx->name && strcmp(ctx->name, root->name))) {
1074 cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1075 ctx->flags, ctx->name ?: "", root->flags, root->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001076 ret = -EINVAL;
1077 goto out_unlock;
1078 }
1079
1080 /* remounting is not allowed for populated hierarchies */
1081 if (!list_empty(&root->cgrp.self.children)) {
1082 ret = -EBUSY;
1083 goto out_unlock;
1084 }
1085
1086 ret = rebind_subsystems(root, added_mask);
1087 if (ret)
1088 goto out_unlock;
1089
1090 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1091
David Brazdil0f672f62019-12-10 10:32:29 +00001092 if (ctx->release_agent) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001093 spin_lock(&release_agent_path_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001094 strcpy(root->release_agent_path, ctx->release_agent);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001095 spin_unlock(&release_agent_path_lock);
1096 }
1097
1098 trace_cgroup_remount(root);
1099
1100 out_unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101 mutex_unlock(&cgroup_mutex);
1102 return ret;
1103}
1104
1105struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1106 .rename = cgroup1_rename,
1107 .show_options = cgroup1_show_options,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 .mkdir = cgroup_mkdir,
1109 .rmdir = cgroup_rmdir,
1110 .show_path = cgroup_show_path,
1111};
1112
David Brazdil0f672f62019-12-10 10:32:29 +00001113/*
1114 * The guts of cgroup1 mount - find or create cgroup_root to use.
1115 * Called with cgroup_mutex held; returns 0 on success, -E... on
1116 * error and positive - in case when the candidate is busy dying.
1117 * On success it stashes a reference to cgroup_root into given
1118 * cgroup_fs_context; that reference is *NOT* counting towards the
1119 * cgroup_root refcount.
1120 */
1121static int cgroup1_root_to_use(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001122{
David Brazdil0f672f62019-12-10 10:32:29 +00001123 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001124 struct cgroup_root *root;
1125 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001127
1128 /* First find the desired set of subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001129 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001130 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001131 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001132
1133 /*
1134 * Destruction of cgroup root is asynchronous, so subsystems may
1135 * still be dying after the previous unmount. Let's drain the
1136 * dying subsystems. We just need to ensure that the ones
1137 * unmounted previously finish dying and don't care about new ones
1138 * starting. Testing ref liveliness is good enough.
1139 */
1140 for_each_subsys(ss, i) {
David Brazdil0f672f62019-12-10 10:32:29 +00001141 if (!(ctx->subsys_mask & (1 << i)) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001142 ss->root == &cgrp_dfl_root)
1143 continue;
1144
David Brazdil0f672f62019-12-10 10:32:29 +00001145 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1146 return 1; /* restart */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001147 cgroup_put(&ss->root->cgrp);
1148 }
1149
1150 for_each_root(root) {
1151 bool name_match = false;
1152
1153 if (root == &cgrp_dfl_root)
1154 continue;
1155
1156 /*
1157 * If we asked for a name then it must match. Also, if
1158 * name matches but sybsys_mask doesn't, we should fail.
1159 * Remember whether name matched.
1160 */
David Brazdil0f672f62019-12-10 10:32:29 +00001161 if (ctx->name) {
1162 if (strcmp(ctx->name, root->name))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001163 continue;
1164 name_match = true;
1165 }
1166
1167 /*
1168 * If we asked for subsystems (or explicitly for no
1169 * subsystems) then they must match.
1170 */
David Brazdil0f672f62019-12-10 10:32:29 +00001171 if ((ctx->subsys_mask || ctx->none) &&
1172 (ctx->subsys_mask != root->subsys_mask)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001173 if (!name_match)
1174 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00001175 return -EBUSY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176 }
1177
David Brazdil0f672f62019-12-10 10:32:29 +00001178 if (root->flags ^ ctx->flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001179 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1180
David Brazdil0f672f62019-12-10 10:32:29 +00001181 ctx->root = root;
1182 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001183 }
1184
1185 /*
1186 * No such thing, create a new one. name= matching without subsys
1187 * specification is allowed for already existing hierarchies but we
1188 * can't create new one without subsys specification.
1189 */
David Brazdil0f672f62019-12-10 10:32:29 +00001190 if (!ctx->subsys_mask && !ctx->none)
1191 return cg_invalf(fc, "cgroup1: No subsys list or none specified");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192
1193 /* Hierarchies may only be created in the initial cgroup namespace. */
David Brazdil0f672f62019-12-10 10:32:29 +00001194 if (ctx->ns != &init_cgroup_ns)
1195 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001196
1197 root = kzalloc(sizeof(*root), GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001198 if (!root)
1199 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001200
David Brazdil0f672f62019-12-10 10:32:29 +00001201 ctx->root = root;
1202 init_cgroup_root(ctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001203
David Brazdil0f672f62019-12-10 10:32:29 +00001204 ret = cgroup_setup_root(root, ctx->subsys_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205 if (ret)
1206 cgroup_free_root(root);
David Brazdil0f672f62019-12-10 10:32:29 +00001207 return ret;
1208}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001209
David Brazdil0f672f62019-12-10 10:32:29 +00001210int cgroup1_get_tree(struct fs_context *fc)
1211{
1212 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1213 int ret;
1214
1215 /* Check if the caller has permission to mount. */
1216 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1217 return -EPERM;
1218
1219 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1220
1221 ret = cgroup1_root_to_use(fc);
1222 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1223 ret = 1; /* restart */
1224
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001225 mutex_unlock(&cgroup_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001226
David Brazdil0f672f62019-12-10 10:32:29 +00001227 if (!ret)
1228 ret = cgroup_do_get_tree(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001229
David Brazdil0f672f62019-12-10 10:32:29 +00001230 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001231 fc_drop_locked(fc);
David Brazdil0f672f62019-12-10 10:32:29 +00001232 ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001233 }
1234
David Brazdil0f672f62019-12-10 10:32:29 +00001235 if (unlikely(ret > 0)) {
1236 msleep(10);
1237 return restart_syscall();
1238 }
1239 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240}
1241
1242static int __init cgroup1_wq_init(void)
1243{
1244 /*
1245 * Used to destroy pidlists and separate to serve as flush domain.
1246 * Cap @max_active to 1 too.
1247 */
1248 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1249 0, 1);
1250 BUG_ON(!cgroup_pidlist_destroy_wq);
1251 return 0;
1252}
1253core_initcall(cgroup1_wq_init);
1254
1255static int __init cgroup_no_v1(char *str)
1256{
1257 struct cgroup_subsys *ss;
1258 char *token;
1259 int i;
1260
1261 while ((token = strsep(&str, ",")) != NULL) {
1262 if (!*token)
1263 continue;
1264
1265 if (!strcmp(token, "all")) {
1266 cgroup_no_v1_mask = U16_MAX;
David Brazdil0f672f62019-12-10 10:32:29 +00001267 continue;
1268 }
1269
1270 if (!strcmp(token, "named")) {
1271 cgroup_no_v1_named = true;
1272 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001273 }
1274
1275 for_each_subsys(ss, i) {
1276 if (strcmp(token, ss->name) &&
1277 strcmp(token, ss->legacy_name))
1278 continue;
1279
1280 cgroup_no_v1_mask |= 1 << i;
1281 }
1282 }
1283 return 1;
1284}
1285__setup("cgroup_no_v1=", cgroup_no_v1);