blob: 8f0ea12d7cee2e551db6c6d9f6bf9893eb5945ad [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <linux/fs_parser.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
19#include <trace/events/cgroup.h>
20
21/*
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
26 */
27#define CGROUP_PIDLIST_DESTROY_DELAY HZ
28
29/* Controllers blocked by the commandline in v1 */
30static u16 cgroup_no_v1_mask;
31
David Brazdil0f672f62019-12-10 10:32:29 +000032/* disable named v1 mounts */
33static bool cgroup_no_v1_named;
34
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035/*
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
38 */
39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40
Olivier Deprez157378f2022-04-04 15:47:50 +020041/* protects cgroup_subsys->release_agent_path */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042static DEFINE_SPINLOCK(release_agent_path_lock);
43
44bool cgroup1_ssid_disabled(int ssid)
45{
46 return cgroup_no_v1_mask & (1 << ssid);
47}
48
49/**
50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51 * @from: attach to all cgroups of a given task
52 * @tsk: the task to be attached
53 */
54int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
55{
56 struct cgroup_root *root;
57 int retval = 0;
58
59 mutex_lock(&cgroup_mutex);
60 percpu_down_write(&cgroup_threadgroup_rwsem);
61 for_each_root(root) {
62 struct cgroup *from_cgrp;
63
64 if (root == &cgrp_dfl_root)
65 continue;
66
67 spin_lock_irq(&css_set_lock);
68 from_cgrp = task_cgroup_from_root(from, root);
69 spin_unlock_irq(&css_set_lock);
70
71 retval = cgroup_attach_task(from_cgrp, tsk, false);
72 if (retval)
73 break;
74 }
75 percpu_up_write(&cgroup_threadgroup_rwsem);
76 mutex_unlock(&cgroup_mutex);
77
78 return retval;
79}
80EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
81
82/**
83 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
84 * @to: cgroup to which the tasks will be moved
85 * @from: cgroup in which the tasks currently reside
86 *
87 * Locking rules between cgroup_post_fork() and the migration path
88 * guarantee that, if a task is forking while being migrated, the new child
89 * is guaranteed to be either visible in the source cgroup after the
90 * parent's migration is complete or put into the target cgroup. No task
91 * can slip out of migration through forking.
92 */
93int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
94{
95 DEFINE_CGROUP_MGCTX(mgctx);
96 struct cgrp_cset_link *link;
97 struct css_task_iter it;
98 struct task_struct *task;
99 int ret;
100
101 if (cgroup_on_dfl(to))
102 return -EINVAL;
103
104 ret = cgroup_migrate_vet_dst(to);
105 if (ret)
106 return ret;
107
108 mutex_lock(&cgroup_mutex);
109
110 percpu_down_write(&cgroup_threadgroup_rwsem);
111
112 /* all tasks in @from are being moved, all csets are source */
113 spin_lock_irq(&css_set_lock);
114 list_for_each_entry(link, &from->cset_links, cset_link)
115 cgroup_migrate_add_src(link->cset, to, &mgctx);
116 spin_unlock_irq(&css_set_lock);
117
118 ret = cgroup_migrate_prepare_dst(&mgctx);
119 if (ret)
120 goto out_err;
121
122 /*
123 * Migrate tasks one-by-one until @from is empty. This fails iff
124 * ->can_attach() fails.
125 */
126 do {
127 css_task_iter_start(&from->self, 0, &it);
128
129 do {
130 task = css_task_iter_next(&it);
131 } while (task && (task->flags & PF_EXITING));
132
133 if (task)
134 get_task_struct(task);
135 css_task_iter_end(&it);
136
137 if (task) {
138 ret = cgroup_migrate(task, false, &mgctx);
139 if (!ret)
140 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
141 put_task_struct(task);
142 }
143 } while (task && !ret);
144out_err:
145 cgroup_migrate_finish(&mgctx);
146 percpu_up_write(&cgroup_threadgroup_rwsem);
147 mutex_unlock(&cgroup_mutex);
148 return ret;
149}
150
151/*
152 * Stuff for reading the 'tasks'/'procs' files.
153 *
154 * Reading this file can return large amounts of data if a cgroup has
155 * *lots* of attached tasks. So it may need several calls to read(),
156 * but we cannot guarantee that the information we produce is correct
157 * unless we produce it entirely atomically.
158 *
159 */
160
161/* which pidlist file are we talking about? */
162enum cgroup_filetype {
163 CGROUP_FILE_PROCS,
164 CGROUP_FILE_TASKS,
165};
166
167/*
168 * A pidlist is a list of pids that virtually represents the contents of one
169 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
170 * a pair (one each for procs, tasks) for each pid namespace that's relevant
171 * to the cgroup.
172 */
173struct cgroup_pidlist {
174 /*
175 * used to find which pidlist is wanted. doesn't change as long as
176 * this particular list stays in the list.
177 */
178 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
179 /* array of xids */
180 pid_t *list;
181 /* how many elements the above list has */
182 int length;
183 /* each of these stored in a list by its cgroup */
184 struct list_head links;
185 /* pointer to the cgroup we belong to, for list removal purposes */
186 struct cgroup *owner;
187 /* for delayed destruction */
188 struct delayed_work destroy_dwork;
189};
190
191/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 * Used to destroy all pidlists lingering waiting for destroy timer. None
193 * should be left afterwards.
194 */
195void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
196{
197 struct cgroup_pidlist *l, *tmp_l;
198
199 mutex_lock(&cgrp->pidlist_mutex);
200 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
201 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
202 mutex_unlock(&cgrp->pidlist_mutex);
203
204 flush_workqueue(cgroup_pidlist_destroy_wq);
205 BUG_ON(!list_empty(&cgrp->pidlists));
206}
207
208static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
209{
210 struct delayed_work *dwork = to_delayed_work(work);
211 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
212 destroy_dwork);
213 struct cgroup_pidlist *tofree = NULL;
214
215 mutex_lock(&l->owner->pidlist_mutex);
216
217 /*
218 * Destroy iff we didn't get queued again. The state won't change
219 * as destroy_dwork can only be queued while locked.
220 */
221 if (!delayed_work_pending(dwork)) {
222 list_del(&l->links);
David Brazdil0f672f62019-12-10 10:32:29 +0000223 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224 put_pid_ns(l->key.ns);
225 tofree = l;
226 }
227
228 mutex_unlock(&l->owner->pidlist_mutex);
229 kfree(tofree);
230}
231
232/*
233 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
234 * Returns the number of unique elements.
235 */
236static int pidlist_uniq(pid_t *list, int length)
237{
238 int src, dest = 1;
239
240 /*
241 * we presume the 0th element is unique, so i starts at 1. trivial
242 * edge cases first; no work needs to be done for either
243 */
244 if (length == 0 || length == 1)
245 return length;
246 /* src and dest walk down the list; dest counts unique elements */
247 for (src = 1; src < length; src++) {
248 /* find next unique element */
249 while (list[src] == list[src-1]) {
250 src++;
251 if (src == length)
252 goto after;
253 }
254 /* dest always points to where the next unique element goes */
255 list[dest] = list[src];
256 dest++;
257 }
258after:
259 return dest;
260}
261
262/*
263 * The two pid files - task and cgroup.procs - guaranteed that the result
264 * is sorted, which forced this whole pidlist fiasco. As pid order is
265 * different per namespace, each namespace needs differently sorted list,
266 * making it impossible to use, for example, single rbtree of member tasks
267 * sorted by task pointer. As pidlists can be fairly large, allocating one
268 * per open file is dangerous, so cgroup had to implement shared pool of
269 * pidlists keyed by cgroup and namespace.
270 */
271static int cmppid(const void *a, const void *b)
272{
273 return *(pid_t *)a - *(pid_t *)b;
274}
275
276static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
277 enum cgroup_filetype type)
278{
279 struct cgroup_pidlist *l;
280 /* don't need task_nsproxy() if we're looking at ourself */
281 struct pid_namespace *ns = task_active_pid_ns(current);
282
283 lockdep_assert_held(&cgrp->pidlist_mutex);
284
285 list_for_each_entry(l, &cgrp->pidlists, links)
286 if (l->key.type == type && l->key.ns == ns)
287 return l;
288 return NULL;
289}
290
291/*
292 * find the appropriate pidlist for our purpose (given procs vs tasks)
293 * returns with the lock on that pidlist already held, and takes care
294 * of the use count, or returns NULL with no locks held if we're out of
295 * memory.
296 */
297static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
298 enum cgroup_filetype type)
299{
300 struct cgroup_pidlist *l;
301
302 lockdep_assert_held(&cgrp->pidlist_mutex);
303
304 l = cgroup_pidlist_find(cgrp, type);
305 if (l)
306 return l;
307
308 /* entry not found; create a new one */
309 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
310 if (!l)
311 return l;
312
313 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
314 l->key.type = type;
315 /* don't need task_nsproxy() if we're looking at ourself */
316 l->key.ns = get_pid_ns(task_active_pid_ns(current));
317 l->owner = cgrp;
318 list_add(&l->links, &cgrp->pidlists);
319 return l;
320}
321
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000322/*
323 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
324 */
325static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
326 struct cgroup_pidlist **lp)
327{
328 pid_t *array;
329 int length;
330 int pid, n = 0; /* used for populating the array */
331 struct css_task_iter it;
332 struct task_struct *tsk;
333 struct cgroup_pidlist *l;
334
335 lockdep_assert_held(&cgrp->pidlist_mutex);
336
337 /*
338 * If cgroup gets more users after we read count, we won't have
339 * enough space - tough. This race is indistinguishable to the
340 * caller from the case that the additional cgroup users didn't
341 * show up until sometime later on.
342 */
343 length = cgroup_task_count(cgrp);
David Brazdil0f672f62019-12-10 10:32:29 +0000344 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 if (!array)
346 return -ENOMEM;
347 /* now, populate the array */
348 css_task_iter_start(&cgrp->self, 0, &it);
349 while ((tsk = css_task_iter_next(&it))) {
350 if (unlikely(n == length))
351 break;
352 /* get tgid or pid for procs or tasks file respectively */
353 if (type == CGROUP_FILE_PROCS)
354 pid = task_tgid_vnr(tsk);
355 else
356 pid = task_pid_vnr(tsk);
357 if (pid > 0) /* make sure to only use valid results */
358 array[n++] = pid;
359 }
360 css_task_iter_end(&it);
361 length = n;
362 /* now sort & (if procs) strip out duplicates */
363 sort(array, length, sizeof(pid_t), cmppid, NULL);
364 if (type == CGROUP_FILE_PROCS)
365 length = pidlist_uniq(array, length);
366
367 l = cgroup_pidlist_find_create(cgrp, type);
368 if (!l) {
David Brazdil0f672f62019-12-10 10:32:29 +0000369 kvfree(array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000370 return -ENOMEM;
371 }
372
373 /* store array, freeing old if necessary */
David Brazdil0f672f62019-12-10 10:32:29 +0000374 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 l->list = array;
376 l->length = length;
377 *lp = l;
378 return 0;
379}
380
381/*
382 * seq_file methods for the tasks/procs files. The seq_file position is the
383 * next pid to display; the seq_file iterator is a pointer to the pid
384 * in the cgroup->l->list array.
385 */
386
387static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
388{
389 /*
390 * Initially we receive a position value that corresponds to
391 * one more than the last pid shown (or 0 on the first call or
392 * after a seek to the start). Use a binary-search to find the
393 * next pid to display, if any
394 */
395 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200396 struct cgroup_file_ctx *ctx = of->priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397 struct cgroup *cgrp = seq_css(s)->cgroup;
398 struct cgroup_pidlist *l;
399 enum cgroup_filetype type = seq_cft(s)->private;
400 int index = 0, pid = *pos;
401 int *iter, ret;
402
403 mutex_lock(&cgrp->pidlist_mutex);
404
405 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200406 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
407 * start() after open. If the matching pidlist is around, we can use
408 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
409 * directly. It could already have been destroyed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200411 if (ctx->procs1.pidlist)
412 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413
414 /*
415 * Either this is the first start() after open or the matching
416 * pidlist has been destroyed inbetween. Create a new one.
417 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200418 if (!ctx->procs1.pidlist) {
419 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 if (ret)
421 return ERR_PTR(ret);
422 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200423 l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424
425 if (pid) {
426 int end = l->length;
427
428 while (index < end) {
429 int mid = (index + end) / 2;
430 if (l->list[mid] == pid) {
431 index = mid;
432 break;
433 } else if (l->list[mid] <= pid)
434 index = mid + 1;
435 else
436 end = mid;
437 }
438 }
439 /* If we're off the end of the array, we're done */
440 if (index >= l->length)
441 return NULL;
442 /* Update the abstract position to be the actual pid that we found */
443 iter = l->list + index;
444 *pos = *iter;
445 return iter;
446}
447
448static void cgroup_pidlist_stop(struct seq_file *s, void *v)
449{
450 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200451 struct cgroup_file_ctx *ctx = of->priv;
452 struct cgroup_pidlist *l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453
454 if (l)
455 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
456 CGROUP_PIDLIST_DESTROY_DELAY);
457 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
458}
459
460static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
461{
462 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200463 struct cgroup_file_ctx *ctx = of->priv;
464 struct cgroup_pidlist *l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465 pid_t *p = v;
466 pid_t *end = l->list + l->length;
467 /*
468 * Advance to the next pid in the array. If this goes off the
469 * end, we're done
470 */
471 p++;
472 if (p >= end) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200473 (*pos)++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474 return NULL;
475 } else {
476 *pos = *p;
477 return p;
478 }
479}
480
481static int cgroup_pidlist_show(struct seq_file *s, void *v)
482{
483 seq_printf(s, "%d\n", *(int *)v);
484
485 return 0;
486}
487
488static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
489 char *buf, size_t nbytes, loff_t off,
490 bool threadgroup)
491{
492 struct cgroup *cgrp;
493 struct task_struct *task;
494 const struct cred *cred, *tcred;
495 ssize_t ret;
Olivier Deprez157378f2022-04-04 15:47:50 +0200496 bool locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497
498 cgrp = cgroup_kn_lock_live(of->kn, false);
499 if (!cgrp)
500 return -ENODEV;
501
Olivier Deprez157378f2022-04-04 15:47:50 +0200502 task = cgroup_procs_write_start(buf, threadgroup, &locked);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 ret = PTR_ERR_OR_ZERO(task);
504 if (ret)
505 goto out_unlock;
506
507 /*
508 * Even if we're attaching all tasks in the thread group, we only
509 * need to check permissions on one of them.
510 */
511 cred = current_cred();
512 tcred = get_task_cred(task);
513 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
514 !uid_eq(cred->euid, tcred->uid) &&
515 !uid_eq(cred->euid, tcred->suid))
516 ret = -EACCES;
517 put_cred(tcred);
518 if (ret)
519 goto out_finish;
520
521 ret = cgroup_attach_task(cgrp, task, threadgroup);
522
523out_finish:
Olivier Deprez157378f2022-04-04 15:47:50 +0200524 cgroup_procs_write_finish(task, locked);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000525out_unlock:
526 cgroup_kn_unlock(of->kn);
527
528 return ret ?: nbytes;
529}
530
531static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
532 char *buf, size_t nbytes, loff_t off)
533{
534 return __cgroup1_procs_write(of, buf, nbytes, off, true);
535}
536
537static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
538 char *buf, size_t nbytes, loff_t off)
539{
540 return __cgroup1_procs_write(of, buf, nbytes, off, false);
541}
542
543static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
544 char *buf, size_t nbytes, loff_t off)
545{
546 struct cgroup *cgrp;
Olivier Deprez157378f2022-04-04 15:47:50 +0200547 struct cgroup_file_ctx *ctx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548
549 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
550
Olivier Deprez157378f2022-04-04 15:47:50 +0200551 /*
552 * Release agent gets called with all capabilities,
553 * require capabilities to set release agent.
554 */
555 ctx = of->priv;
556 if ((ctx->ns->user_ns != &init_user_ns) ||
557 !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
558 return -EPERM;
559
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 cgrp = cgroup_kn_lock_live(of->kn, false);
561 if (!cgrp)
562 return -ENODEV;
563 spin_lock(&release_agent_path_lock);
564 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
565 sizeof(cgrp->root->release_agent_path));
566 spin_unlock(&release_agent_path_lock);
567 cgroup_kn_unlock(of->kn);
568 return nbytes;
569}
570
571static int cgroup_release_agent_show(struct seq_file *seq, void *v)
572{
573 struct cgroup *cgrp = seq_css(seq)->cgroup;
574
575 spin_lock(&release_agent_path_lock);
576 seq_puts(seq, cgrp->root->release_agent_path);
577 spin_unlock(&release_agent_path_lock);
578 seq_putc(seq, '\n');
579 return 0;
580}
581
582static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
583{
584 seq_puts(seq, "0\n");
585 return 0;
586}
587
588static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
589 struct cftype *cft)
590{
591 return notify_on_release(css->cgroup);
592}
593
594static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
595 struct cftype *cft, u64 val)
596{
597 if (val)
598 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
599 else
600 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
601 return 0;
602}
603
604static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
605 struct cftype *cft)
606{
607 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
608}
609
610static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
611 struct cftype *cft, u64 val)
612{
613 if (val)
614 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
615 else
616 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
617 return 0;
618}
619
620/* cgroup core interface files for the legacy hierarchies */
621struct cftype cgroup1_base_files[] = {
622 {
623 .name = "cgroup.procs",
624 .seq_start = cgroup_pidlist_start,
625 .seq_next = cgroup_pidlist_next,
626 .seq_stop = cgroup_pidlist_stop,
627 .seq_show = cgroup_pidlist_show,
628 .private = CGROUP_FILE_PROCS,
629 .write = cgroup1_procs_write,
630 },
631 {
632 .name = "cgroup.clone_children",
633 .read_u64 = cgroup_clone_children_read,
634 .write_u64 = cgroup_clone_children_write,
635 },
636 {
637 .name = "cgroup.sane_behavior",
638 .flags = CFTYPE_ONLY_ON_ROOT,
639 .seq_show = cgroup_sane_behavior_show,
640 },
641 {
642 .name = "tasks",
643 .seq_start = cgroup_pidlist_start,
644 .seq_next = cgroup_pidlist_next,
645 .seq_stop = cgroup_pidlist_stop,
646 .seq_show = cgroup_pidlist_show,
647 .private = CGROUP_FILE_TASKS,
648 .write = cgroup1_tasks_write,
649 },
650 {
651 .name = "notify_on_release",
652 .read_u64 = cgroup_read_notify_on_release,
653 .write_u64 = cgroup_write_notify_on_release,
654 },
655 {
656 .name = "release_agent",
657 .flags = CFTYPE_ONLY_ON_ROOT,
658 .seq_show = cgroup_release_agent_show,
659 .write = cgroup_release_agent_write,
660 .max_write_len = PATH_MAX - 1,
661 },
662 { } /* terminate */
663};
664
665/* Display information about each subsystem and each hierarchy */
666int proc_cgroupstats_show(struct seq_file *m, void *v)
667{
668 struct cgroup_subsys *ss;
669 int i;
670
671 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
672 /*
673 * ideally we don't want subsystems moving around while we do this.
674 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
675 * subsys/hierarchy state.
676 */
677 mutex_lock(&cgroup_mutex);
678
679 for_each_subsys(ss, i)
680 seq_printf(m, "%s\t%d\t%d\t%d\n",
681 ss->legacy_name, ss->root->hierarchy_id,
682 atomic_read(&ss->root->nr_cgrps),
683 cgroup_ssid_enabled(i));
684
685 mutex_unlock(&cgroup_mutex);
686 return 0;
687}
688
689/**
690 * cgroupstats_build - build and fill cgroupstats
691 * @stats: cgroupstats to fill information into
692 * @dentry: A dentry entry belonging to the cgroup for which stats have
693 * been requested.
694 *
695 * Build and fill cgroupstats so that taskstats can export it to user
696 * space.
697 */
698int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
699{
700 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
701 struct cgroup *cgrp;
702 struct css_task_iter it;
703 struct task_struct *tsk;
704
705 /* it should be kernfs_node belonging to cgroupfs and is a directory */
706 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
707 kernfs_type(kn) != KERNFS_DIR)
708 return -EINVAL;
709
710 mutex_lock(&cgroup_mutex);
711
712 /*
713 * We aren't being called from kernfs and there's no guarantee on
714 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
715 * @kn->priv is RCU safe. Let's do the RCU dancing.
716 */
717 rcu_read_lock();
718 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
719 if (!cgrp || cgroup_is_dead(cgrp)) {
720 rcu_read_unlock();
721 mutex_unlock(&cgroup_mutex);
722 return -ENOENT;
723 }
724 rcu_read_unlock();
725
726 css_task_iter_start(&cgrp->self, 0, &it);
727 while ((tsk = css_task_iter_next(&it))) {
728 switch (tsk->state) {
729 case TASK_RUNNING:
730 stats->nr_running++;
731 break;
732 case TASK_INTERRUPTIBLE:
733 stats->nr_sleeping++;
734 break;
735 case TASK_UNINTERRUPTIBLE:
736 stats->nr_uninterruptible++;
737 break;
738 case TASK_STOPPED:
739 stats->nr_stopped++;
740 break;
741 default:
742 if (delayacct_is_task_waiting_on_io(tsk))
743 stats->nr_io_wait++;
744 break;
745 }
746 }
747 css_task_iter_end(&it);
748
749 mutex_unlock(&cgroup_mutex);
750 return 0;
751}
752
753void cgroup1_check_for_release(struct cgroup *cgrp)
754{
755 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
756 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
757 schedule_work(&cgrp->release_agent_work);
758}
759
760/*
761 * Notify userspace when a cgroup is released, by running the
762 * configured release agent with the name of the cgroup (path
763 * relative to the root of cgroup file system) as the argument.
764 *
765 * Most likely, this user command will try to rmdir this cgroup.
766 *
767 * This races with the possibility that some other task will be
768 * attached to this cgroup before it is removed, or that some other
769 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
770 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
771 * unused, and this cgroup will be reprieved from its death sentence,
772 * to continue to serve a useful existence. Next time it's released,
773 * we will get notified again, if it still has 'notify_on_release' set.
774 *
775 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
776 * means only wait until the task is successfully execve()'d. The
777 * separate release agent task is forked by call_usermodehelper(),
778 * then control in this thread returns here, without waiting for the
779 * release agent task. We don't bother to wait because the caller of
780 * this routine has no use for the exit status of the release agent
781 * task, so no sense holding our caller up for that.
782 */
783void cgroup1_release_agent(struct work_struct *work)
784{
785 struct cgroup *cgrp =
786 container_of(work, struct cgroup, release_agent_work);
Olivier Deprez157378f2022-04-04 15:47:50 +0200787 char *pathbuf, *agentbuf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000788 char *argv[3], *envp[3];
789 int ret;
790
Olivier Deprez157378f2022-04-04 15:47:50 +0200791 /* snoop agent path and exit early if empty */
792 if (!cgrp->root->release_agent_path[0])
793 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000794
Olivier Deprez157378f2022-04-04 15:47:50 +0200795 /* prepare argument buffers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
Olivier Deprez157378f2022-04-04 15:47:50 +0200797 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
798 if (!pathbuf || !agentbuf)
799 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800
Olivier Deprez157378f2022-04-04 15:47:50 +0200801 spin_lock(&release_agent_path_lock);
802 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
803 spin_unlock(&release_agent_path_lock);
804 if (!agentbuf[0])
805 goto out_free;
806
807 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808 if (ret < 0 || ret >= PATH_MAX)
Olivier Deprez157378f2022-04-04 15:47:50 +0200809 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000810
811 argv[0] = agentbuf;
812 argv[1] = pathbuf;
813 argv[2] = NULL;
814
815 /* minimal command environment */
816 envp[0] = "HOME=/";
817 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
818 envp[2] = NULL;
819
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000821out_free:
822 kfree(agentbuf);
823 kfree(pathbuf);
824}
825
826/*
827 * cgroup_rename - Only allow simple rename of directories in place.
828 */
829static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
830 const char *new_name_str)
831{
832 struct cgroup *cgrp = kn->priv;
833 int ret;
834
Olivier Deprez0e641232021-09-23 10:07:05 +0200835 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
836 if (strchr(new_name_str, '\n'))
837 return -EINVAL;
838
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 if (kernfs_type(kn) != KERNFS_DIR)
840 return -ENOTDIR;
841 if (kn->parent != new_parent)
842 return -EIO;
843
844 /*
845 * We're gonna grab cgroup_mutex which nests outside kernfs
846 * active_ref. kernfs_rename() doesn't require active_ref
847 * protection. Break them before grabbing cgroup_mutex.
848 */
849 kernfs_break_active_protection(new_parent);
850 kernfs_break_active_protection(kn);
851
852 mutex_lock(&cgroup_mutex);
853
854 ret = kernfs_rename(kn, new_parent, new_name_str);
855 if (!ret)
856 TRACE_CGROUP_PATH(rename, cgrp);
857
858 mutex_unlock(&cgroup_mutex);
859
860 kernfs_unbreak_active_protection(kn);
861 kernfs_unbreak_active_protection(new_parent);
862 return ret;
863}
864
865static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
866{
867 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
868 struct cgroup_subsys *ss;
869 int ssid;
870
871 for_each_subsys(ss, ssid)
872 if (root->subsys_mask & (1 << ssid))
873 seq_show_option(seq, ss->legacy_name, NULL);
874 if (root->flags & CGRP_ROOT_NOPREFIX)
875 seq_puts(seq, ",noprefix");
876 if (root->flags & CGRP_ROOT_XATTR)
877 seq_puts(seq, ",xattr");
878 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
879 seq_puts(seq, ",cpuset_v2_mode");
880
881 spin_lock(&release_agent_path_lock);
882 if (strlen(root->release_agent_path))
883 seq_show_option(seq, "release_agent",
884 root->release_agent_path);
885 spin_unlock(&release_agent_path_lock);
886
887 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
888 seq_puts(seq, ",clone_children");
889 if (strlen(root->name))
890 seq_show_option(seq, "name", root->name);
891 return 0;
892}
893
David Brazdil0f672f62019-12-10 10:32:29 +0000894enum cgroup1_param {
895 Opt_all,
896 Opt_clone_children,
897 Opt_cpuset_v2_mode,
898 Opt_name,
899 Opt_none,
900 Opt_noprefix,
901 Opt_release_agent,
902 Opt_xattr,
903};
904
Olivier Deprez157378f2022-04-04 15:47:50 +0200905const struct fs_parameter_spec cgroup1_fs_parameters[] = {
David Brazdil0f672f62019-12-10 10:32:29 +0000906 fsparam_flag ("all", Opt_all),
907 fsparam_flag ("clone_children", Opt_clone_children),
908 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
909 fsparam_string("name", Opt_name),
910 fsparam_flag ("none", Opt_none),
911 fsparam_flag ("noprefix", Opt_noprefix),
912 fsparam_string("release_agent", Opt_release_agent),
913 fsparam_flag ("xattr", Opt_xattr),
914 {}
915};
916
David Brazdil0f672f62019-12-10 10:32:29 +0000917int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000918{
David Brazdil0f672f62019-12-10 10:32:29 +0000919 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000920 struct cgroup_subsys *ss;
David Brazdil0f672f62019-12-10 10:32:29 +0000921 struct fs_parse_result result;
922 int opt, i;
923
Olivier Deprez157378f2022-04-04 15:47:50 +0200924 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
David Brazdil0f672f62019-12-10 10:32:29 +0000925 if (opt == -ENOPARAM) {
926 if (strcmp(param->key, "source") == 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200927 if (param->type != fs_value_is_string)
928 return invalf(fc, "Non-string source");
929 if (fc->source)
930 return invalf(fc, "Multiple sources not supported");
David Brazdil0f672f62019-12-10 10:32:29 +0000931 fc->source = param->string;
932 param->string = NULL;
933 return 0;
934 }
935 for_each_subsys(ss, i) {
936 if (strcmp(param->key, ss->legacy_name))
937 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200938 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
939 return invalfc(fc, "Disabled controller '%s'",
940 param->key);
David Brazdil0f672f62019-12-10 10:32:29 +0000941 ctx->subsys_mask |= (1 << i);
942 return 0;
943 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200944 return invalfc(fc, "Unknown subsys name '%s'", param->key);
David Brazdil0f672f62019-12-10 10:32:29 +0000945 }
946 if (opt < 0)
947 return opt;
948
949 switch (opt) {
950 case Opt_none:
951 /* Explicitly have no subsystems */
952 ctx->none = true;
953 break;
954 case Opt_all:
955 ctx->all_ss = true;
956 break;
957 case Opt_noprefix:
958 ctx->flags |= CGRP_ROOT_NOPREFIX;
959 break;
960 case Opt_clone_children:
961 ctx->cpuset_clone_children = true;
962 break;
963 case Opt_cpuset_v2_mode:
964 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
965 break;
966 case Opt_xattr:
967 ctx->flags |= CGRP_ROOT_XATTR;
968 break;
969 case Opt_release_agent:
970 /* Specifying two release agents is forbidden */
971 if (ctx->release_agent)
Olivier Deprez157378f2022-04-04 15:47:50 +0200972 return invalfc(fc, "release_agent respecified");
973 /*
974 * Release agent gets called with all capabilities,
975 * require capabilities to set release agent.
976 */
977 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
978 return invalfc(fc, "Setting release_agent not allowed");
David Brazdil0f672f62019-12-10 10:32:29 +0000979 ctx->release_agent = param->string;
980 param->string = NULL;
981 break;
982 case Opt_name:
983 /* blocked by boot param? */
984 if (cgroup_no_v1_named)
985 return -ENOENT;
986 /* Can't specify an empty name */
987 if (!param->size)
Olivier Deprez157378f2022-04-04 15:47:50 +0200988 return invalfc(fc, "Empty name");
David Brazdil0f672f62019-12-10 10:32:29 +0000989 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
Olivier Deprez157378f2022-04-04 15:47:50 +0200990 return invalfc(fc, "Name too long");
David Brazdil0f672f62019-12-10 10:32:29 +0000991 /* Must match [\w.-]+ */
992 for (i = 0; i < param->size; i++) {
993 char c = param->string[i];
994 if (isalnum(c))
995 continue;
996 if ((c == '.') || (c == '-') || (c == '_'))
997 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200998 return invalfc(fc, "Invalid name");
David Brazdil0f672f62019-12-10 10:32:29 +0000999 }
1000 /* Specifying two names is forbidden */
1001 if (ctx->name)
Olivier Deprez157378f2022-04-04 15:47:50 +02001002 return invalfc(fc, "name respecified");
David Brazdil0f672f62019-12-10 10:32:29 +00001003 ctx->name = param->string;
1004 param->string = NULL;
1005 break;
1006 }
1007 return 0;
1008}
1009
1010static int check_cgroupfs_options(struct fs_context *fc)
1011{
1012 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1013 u16 mask = U16_MAX;
1014 u16 enabled = 0;
1015 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016 int i;
1017
1018#ifdef CONFIG_CPUSETS
1019 mask = ~((u16)1 << cpuset_cgrp_id);
1020#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001021 for_each_subsys(ss, i)
1022 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1023 enabled |= 1 << i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001024
David Brazdil0f672f62019-12-10 10:32:29 +00001025 ctx->subsys_mask &= enabled;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026
1027 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001028 * In absense of 'none', 'name=' or subsystem name options,
1029 * let's default to 'all'.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001030 */
David Brazdil0f672f62019-12-10 10:32:29 +00001031 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1032 ctx->all_ss = true;
1033
1034 if (ctx->all_ss) {
1035 /* Mutually exclusive option 'all' + subsystem name */
1036 if (ctx->subsys_mask)
Olivier Deprez157378f2022-04-04 15:47:50 +02001037 return invalfc(fc, "subsys name conflicts with all");
David Brazdil0f672f62019-12-10 10:32:29 +00001038 /* 'all' => select all the subsystems */
1039 ctx->subsys_mask = enabled;
1040 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 /*
1043 * We either have to specify by name or by subsystems. (So all
1044 * empty hierarchies must have a name).
1045 */
David Brazdil0f672f62019-12-10 10:32:29 +00001046 if (!ctx->subsys_mask && !ctx->name)
Olivier Deprez157378f2022-04-04 15:47:50 +02001047 return invalfc(fc, "Need name or subsystem set");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001048
1049 /*
1050 * Option noprefix was introduced just for backward compatibility
1051 * with the old cpuset, so we allow noprefix only if mounting just
1052 * the cpuset subsystem.
1053 */
David Brazdil0f672f62019-12-10 10:32:29 +00001054 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
Olivier Deprez157378f2022-04-04 15:47:50 +02001055 return invalfc(fc, "noprefix used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001056
1057 /* Can't specify "none" and some subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001058 if (ctx->subsys_mask && ctx->none)
Olivier Deprez157378f2022-04-04 15:47:50 +02001059 return invalfc(fc, "none used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001060
1061 return 0;
1062}
1063
David Brazdil0f672f62019-12-10 10:32:29 +00001064int cgroup1_reconfigure(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001065{
David Brazdil0f672f62019-12-10 10:32:29 +00001066 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1067 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001068 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
David Brazdil0f672f62019-12-10 10:32:29 +00001069 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001070 u16 added_mask, removed_mask;
1071
1072 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1073
1074 /* See what subsystems are wanted */
David Brazdil0f672f62019-12-10 10:32:29 +00001075 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001076 if (ret)
1077 goto out_unlock;
1078
David Brazdil0f672f62019-12-10 10:32:29 +00001079 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1081 task_tgid_nr(current), current->comm);
1082
David Brazdil0f672f62019-12-10 10:32:29 +00001083 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1084 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001085
1086 /* Don't allow flags or name to change at remount */
David Brazdil0f672f62019-12-10 10:32:29 +00001087 if ((ctx->flags ^ root->flags) ||
1088 (ctx->name && strcmp(ctx->name, root->name))) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001089 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
David Brazdil0f672f62019-12-10 10:32:29 +00001090 ctx->flags, ctx->name ?: "", root->flags, root->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001091 ret = -EINVAL;
1092 goto out_unlock;
1093 }
1094
1095 /* remounting is not allowed for populated hierarchies */
1096 if (!list_empty(&root->cgrp.self.children)) {
1097 ret = -EBUSY;
1098 goto out_unlock;
1099 }
1100
1101 ret = rebind_subsystems(root, added_mask);
1102 if (ret)
1103 goto out_unlock;
1104
1105 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1106
David Brazdil0f672f62019-12-10 10:32:29 +00001107 if (ctx->release_agent) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 spin_lock(&release_agent_path_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001109 strcpy(root->release_agent_path, ctx->release_agent);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001110 spin_unlock(&release_agent_path_lock);
1111 }
1112
1113 trace_cgroup_remount(root);
1114
1115 out_unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001116 mutex_unlock(&cgroup_mutex);
1117 return ret;
1118}
1119
1120struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1121 .rename = cgroup1_rename,
1122 .show_options = cgroup1_show_options,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123 .mkdir = cgroup_mkdir,
1124 .rmdir = cgroup_rmdir,
1125 .show_path = cgroup_show_path,
1126};
1127
David Brazdil0f672f62019-12-10 10:32:29 +00001128/*
1129 * The guts of cgroup1 mount - find or create cgroup_root to use.
1130 * Called with cgroup_mutex held; returns 0 on success, -E... on
1131 * error and positive - in case when the candidate is busy dying.
1132 * On success it stashes a reference to cgroup_root into given
1133 * cgroup_fs_context; that reference is *NOT* counting towards the
1134 * cgroup_root refcount.
1135 */
1136static int cgroup1_root_to_use(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137{
David Brazdil0f672f62019-12-10 10:32:29 +00001138 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001139 struct cgroup_root *root;
1140 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001142
1143 /* First find the desired set of subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001144 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001146 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001147
1148 /*
1149 * Destruction of cgroup root is asynchronous, so subsystems may
1150 * still be dying after the previous unmount. Let's drain the
1151 * dying subsystems. We just need to ensure that the ones
1152 * unmounted previously finish dying and don't care about new ones
1153 * starting. Testing ref liveliness is good enough.
1154 */
1155 for_each_subsys(ss, i) {
David Brazdil0f672f62019-12-10 10:32:29 +00001156 if (!(ctx->subsys_mask & (1 << i)) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001157 ss->root == &cgrp_dfl_root)
1158 continue;
1159
David Brazdil0f672f62019-12-10 10:32:29 +00001160 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1161 return 1; /* restart */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001162 cgroup_put(&ss->root->cgrp);
1163 }
1164
1165 for_each_root(root) {
1166 bool name_match = false;
1167
1168 if (root == &cgrp_dfl_root)
1169 continue;
1170
1171 /*
1172 * If we asked for a name then it must match. Also, if
1173 * name matches but sybsys_mask doesn't, we should fail.
1174 * Remember whether name matched.
1175 */
David Brazdil0f672f62019-12-10 10:32:29 +00001176 if (ctx->name) {
1177 if (strcmp(ctx->name, root->name))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001178 continue;
1179 name_match = true;
1180 }
1181
1182 /*
1183 * If we asked for subsystems (or explicitly for no
1184 * subsystems) then they must match.
1185 */
David Brazdil0f672f62019-12-10 10:32:29 +00001186 if ((ctx->subsys_mask || ctx->none) &&
1187 (ctx->subsys_mask != root->subsys_mask)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001188 if (!name_match)
1189 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00001190 return -EBUSY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191 }
1192
David Brazdil0f672f62019-12-10 10:32:29 +00001193 if (root->flags ^ ctx->flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001194 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1195
David Brazdil0f672f62019-12-10 10:32:29 +00001196 ctx->root = root;
1197 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001198 }
1199
1200 /*
1201 * No such thing, create a new one. name= matching without subsys
1202 * specification is allowed for already existing hierarchies but we
1203 * can't create new one without subsys specification.
1204 */
David Brazdil0f672f62019-12-10 10:32:29 +00001205 if (!ctx->subsys_mask && !ctx->none)
Olivier Deprez157378f2022-04-04 15:47:50 +02001206 return invalfc(fc, "No subsys list or none specified");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207
1208 /* Hierarchies may only be created in the initial cgroup namespace. */
David Brazdil0f672f62019-12-10 10:32:29 +00001209 if (ctx->ns != &init_cgroup_ns)
1210 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001211
1212 root = kzalloc(sizeof(*root), GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001213 if (!root)
1214 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001215
David Brazdil0f672f62019-12-10 10:32:29 +00001216 ctx->root = root;
1217 init_cgroup_root(ctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218
David Brazdil0f672f62019-12-10 10:32:29 +00001219 ret = cgroup_setup_root(root, ctx->subsys_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001220 if (ret)
1221 cgroup_free_root(root);
David Brazdil0f672f62019-12-10 10:32:29 +00001222 return ret;
1223}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001224
David Brazdil0f672f62019-12-10 10:32:29 +00001225int cgroup1_get_tree(struct fs_context *fc)
1226{
1227 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1228 int ret;
1229
1230 /* Check if the caller has permission to mount. */
1231 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1232 return -EPERM;
1233
1234 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1235
1236 ret = cgroup1_root_to_use(fc);
1237 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1238 ret = 1; /* restart */
1239
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240 mutex_unlock(&cgroup_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001241
David Brazdil0f672f62019-12-10 10:32:29 +00001242 if (!ret)
1243 ret = cgroup_do_get_tree(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244
David Brazdil0f672f62019-12-10 10:32:29 +00001245 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001246 fc_drop_locked(fc);
David Brazdil0f672f62019-12-10 10:32:29 +00001247 ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 }
1249
David Brazdil0f672f62019-12-10 10:32:29 +00001250 if (unlikely(ret > 0)) {
1251 msleep(10);
1252 return restart_syscall();
1253 }
1254 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255}
1256
1257static int __init cgroup1_wq_init(void)
1258{
1259 /*
1260 * Used to destroy pidlists and separate to serve as flush domain.
1261 * Cap @max_active to 1 too.
1262 */
1263 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1264 0, 1);
1265 BUG_ON(!cgroup_pidlist_destroy_wq);
1266 return 0;
1267}
1268core_initcall(cgroup1_wq_init);
1269
1270static int __init cgroup_no_v1(char *str)
1271{
1272 struct cgroup_subsys *ss;
1273 char *token;
1274 int i;
1275
1276 while ((token = strsep(&str, ",")) != NULL) {
1277 if (!*token)
1278 continue;
1279
1280 if (!strcmp(token, "all")) {
1281 cgroup_no_v1_mask = U16_MAX;
David Brazdil0f672f62019-12-10 10:32:29 +00001282 continue;
1283 }
1284
1285 if (!strcmp(token, "named")) {
1286 cgroup_no_v1_named = true;
1287 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001288 }
1289
1290 for_each_subsys(ss, i) {
1291 if (strcmp(token, ss->name) &&
1292 strcmp(token, ss->legacy_name))
1293 continue;
1294
1295 cgroup_no_v1_mask |= 1 << i;
1296 }
1297 }
1298 return 1;
1299}
1300__setup("cgroup_no_v1=", cgroup_no_v1);