blob: 433b9e840b387fb5e705fd41d193ab86b96a6bd3 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <linux/fs_parser.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
19#include <trace/events/cgroup.h>
20
21/*
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
26 */
27#define CGROUP_PIDLIST_DESTROY_DELAY HZ
28
29/* Controllers blocked by the commandline in v1 */
30static u16 cgroup_no_v1_mask;
31
David Brazdil0f672f62019-12-10 10:32:29 +000032/* disable named v1 mounts */
33static bool cgroup_no_v1_named;
34
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035/*
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
38 */
39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40
Olivier Deprez157378f2022-04-04 15:47:50 +020041/* protects cgroup_subsys->release_agent_path */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042static DEFINE_SPINLOCK(release_agent_path_lock);
43
44bool cgroup1_ssid_disabled(int ssid)
45{
46 return cgroup_no_v1_mask & (1 << ssid);
47}
48
49/**
50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51 * @from: attach to all cgroups of a given task
52 * @tsk: the task to be attached
53 */
54int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
55{
56 struct cgroup_root *root;
57 int retval = 0;
58
59 mutex_lock(&cgroup_mutex);
Olivier Deprez92d4c212022-12-06 15:05:30 +010060 cpus_read_lock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 percpu_down_write(&cgroup_threadgroup_rwsem);
62 for_each_root(root) {
63 struct cgroup *from_cgrp;
64
65 if (root == &cgrp_dfl_root)
66 continue;
67
68 spin_lock_irq(&css_set_lock);
69 from_cgrp = task_cgroup_from_root(from, root);
70 spin_unlock_irq(&css_set_lock);
71
72 retval = cgroup_attach_task(from_cgrp, tsk, false);
73 if (retval)
74 break;
75 }
76 percpu_up_write(&cgroup_threadgroup_rwsem);
Olivier Deprez92d4c212022-12-06 15:05:30 +010077 cpus_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078 mutex_unlock(&cgroup_mutex);
79
80 return retval;
81}
82EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
83
84/**
85 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
86 * @to: cgroup to which the tasks will be moved
87 * @from: cgroup in which the tasks currently reside
88 *
89 * Locking rules between cgroup_post_fork() and the migration path
90 * guarantee that, if a task is forking while being migrated, the new child
91 * is guaranteed to be either visible in the source cgroup after the
92 * parent's migration is complete or put into the target cgroup. No task
93 * can slip out of migration through forking.
94 */
95int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
96{
97 DEFINE_CGROUP_MGCTX(mgctx);
98 struct cgrp_cset_link *link;
99 struct css_task_iter it;
100 struct task_struct *task;
101 int ret;
102
103 if (cgroup_on_dfl(to))
104 return -EINVAL;
105
106 ret = cgroup_migrate_vet_dst(to);
107 if (ret)
108 return ret;
109
110 mutex_lock(&cgroup_mutex);
111
112 percpu_down_write(&cgroup_threadgroup_rwsem);
113
114 /* all tasks in @from are being moved, all csets are source */
115 spin_lock_irq(&css_set_lock);
116 list_for_each_entry(link, &from->cset_links, cset_link)
117 cgroup_migrate_add_src(link->cset, to, &mgctx);
118 spin_unlock_irq(&css_set_lock);
119
120 ret = cgroup_migrate_prepare_dst(&mgctx);
121 if (ret)
122 goto out_err;
123
124 /*
125 * Migrate tasks one-by-one until @from is empty. This fails iff
126 * ->can_attach() fails.
127 */
128 do {
129 css_task_iter_start(&from->self, 0, &it);
130
131 do {
132 task = css_task_iter_next(&it);
133 } while (task && (task->flags & PF_EXITING));
134
135 if (task)
136 get_task_struct(task);
137 css_task_iter_end(&it);
138
139 if (task) {
140 ret = cgroup_migrate(task, false, &mgctx);
141 if (!ret)
142 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
143 put_task_struct(task);
144 }
145 } while (task && !ret);
146out_err:
147 cgroup_migrate_finish(&mgctx);
148 percpu_up_write(&cgroup_threadgroup_rwsem);
149 mutex_unlock(&cgroup_mutex);
150 return ret;
151}
152
153/*
154 * Stuff for reading the 'tasks'/'procs' files.
155 *
156 * Reading this file can return large amounts of data if a cgroup has
157 * *lots* of attached tasks. So it may need several calls to read(),
158 * but we cannot guarantee that the information we produce is correct
159 * unless we produce it entirely atomically.
160 *
161 */
162
163/* which pidlist file are we talking about? */
164enum cgroup_filetype {
165 CGROUP_FILE_PROCS,
166 CGROUP_FILE_TASKS,
167};
168
169/*
170 * A pidlist is a list of pids that virtually represents the contents of one
171 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
172 * a pair (one each for procs, tasks) for each pid namespace that's relevant
173 * to the cgroup.
174 */
175struct cgroup_pidlist {
176 /*
177 * used to find which pidlist is wanted. doesn't change as long as
178 * this particular list stays in the list.
179 */
180 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
181 /* array of xids */
182 pid_t *list;
183 /* how many elements the above list has */
184 int length;
185 /* each of these stored in a list by its cgroup */
186 struct list_head links;
187 /* pointer to the cgroup we belong to, for list removal purposes */
188 struct cgroup *owner;
189 /* for delayed destruction */
190 struct delayed_work destroy_dwork;
191};
192
193/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194 * Used to destroy all pidlists lingering waiting for destroy timer. None
195 * should be left afterwards.
196 */
197void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
198{
199 struct cgroup_pidlist *l, *tmp_l;
200
201 mutex_lock(&cgrp->pidlist_mutex);
202 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
203 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
204 mutex_unlock(&cgrp->pidlist_mutex);
205
206 flush_workqueue(cgroup_pidlist_destroy_wq);
207 BUG_ON(!list_empty(&cgrp->pidlists));
208}
209
210static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
211{
212 struct delayed_work *dwork = to_delayed_work(work);
213 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
214 destroy_dwork);
215 struct cgroup_pidlist *tofree = NULL;
216
217 mutex_lock(&l->owner->pidlist_mutex);
218
219 /*
220 * Destroy iff we didn't get queued again. The state won't change
221 * as destroy_dwork can only be queued while locked.
222 */
223 if (!delayed_work_pending(dwork)) {
224 list_del(&l->links);
David Brazdil0f672f62019-12-10 10:32:29 +0000225 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226 put_pid_ns(l->key.ns);
227 tofree = l;
228 }
229
230 mutex_unlock(&l->owner->pidlist_mutex);
231 kfree(tofree);
232}
233
234/*
235 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
236 * Returns the number of unique elements.
237 */
238static int pidlist_uniq(pid_t *list, int length)
239{
240 int src, dest = 1;
241
242 /*
243 * we presume the 0th element is unique, so i starts at 1. trivial
244 * edge cases first; no work needs to be done for either
245 */
246 if (length == 0 || length == 1)
247 return length;
248 /* src and dest walk down the list; dest counts unique elements */
249 for (src = 1; src < length; src++) {
250 /* find next unique element */
251 while (list[src] == list[src-1]) {
252 src++;
253 if (src == length)
254 goto after;
255 }
256 /* dest always points to where the next unique element goes */
257 list[dest] = list[src];
258 dest++;
259 }
260after:
261 return dest;
262}
263
264/*
265 * The two pid files - task and cgroup.procs - guaranteed that the result
266 * is sorted, which forced this whole pidlist fiasco. As pid order is
267 * different per namespace, each namespace needs differently sorted list,
268 * making it impossible to use, for example, single rbtree of member tasks
269 * sorted by task pointer. As pidlists can be fairly large, allocating one
270 * per open file is dangerous, so cgroup had to implement shared pool of
271 * pidlists keyed by cgroup and namespace.
272 */
273static int cmppid(const void *a, const void *b)
274{
275 return *(pid_t *)a - *(pid_t *)b;
276}
277
278static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
279 enum cgroup_filetype type)
280{
281 struct cgroup_pidlist *l;
282 /* don't need task_nsproxy() if we're looking at ourself */
283 struct pid_namespace *ns = task_active_pid_ns(current);
284
285 lockdep_assert_held(&cgrp->pidlist_mutex);
286
287 list_for_each_entry(l, &cgrp->pidlists, links)
288 if (l->key.type == type && l->key.ns == ns)
289 return l;
290 return NULL;
291}
292
293/*
294 * find the appropriate pidlist for our purpose (given procs vs tasks)
295 * returns with the lock on that pidlist already held, and takes care
296 * of the use count, or returns NULL with no locks held if we're out of
297 * memory.
298 */
299static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
300 enum cgroup_filetype type)
301{
302 struct cgroup_pidlist *l;
303
304 lockdep_assert_held(&cgrp->pidlist_mutex);
305
306 l = cgroup_pidlist_find(cgrp, type);
307 if (l)
308 return l;
309
310 /* entry not found; create a new one */
311 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
312 if (!l)
313 return l;
314
315 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
316 l->key.type = type;
317 /* don't need task_nsproxy() if we're looking at ourself */
318 l->key.ns = get_pid_ns(task_active_pid_ns(current));
319 l->owner = cgrp;
320 list_add(&l->links, &cgrp->pidlists);
321 return l;
322}
323
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324/*
325 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
326 */
327static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
328 struct cgroup_pidlist **lp)
329{
330 pid_t *array;
331 int length;
332 int pid, n = 0; /* used for populating the array */
333 struct css_task_iter it;
334 struct task_struct *tsk;
335 struct cgroup_pidlist *l;
336
337 lockdep_assert_held(&cgrp->pidlist_mutex);
338
339 /*
340 * If cgroup gets more users after we read count, we won't have
341 * enough space - tough. This race is indistinguishable to the
342 * caller from the case that the additional cgroup users didn't
343 * show up until sometime later on.
344 */
345 length = cgroup_task_count(cgrp);
David Brazdil0f672f62019-12-10 10:32:29 +0000346 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347 if (!array)
348 return -ENOMEM;
349 /* now, populate the array */
350 css_task_iter_start(&cgrp->self, 0, &it);
351 while ((tsk = css_task_iter_next(&it))) {
352 if (unlikely(n == length))
353 break;
354 /* get tgid or pid for procs or tasks file respectively */
355 if (type == CGROUP_FILE_PROCS)
356 pid = task_tgid_vnr(tsk);
357 else
358 pid = task_pid_vnr(tsk);
359 if (pid > 0) /* make sure to only use valid results */
360 array[n++] = pid;
361 }
362 css_task_iter_end(&it);
363 length = n;
364 /* now sort & (if procs) strip out duplicates */
365 sort(array, length, sizeof(pid_t), cmppid, NULL);
366 if (type == CGROUP_FILE_PROCS)
367 length = pidlist_uniq(array, length);
368
369 l = cgroup_pidlist_find_create(cgrp, type);
370 if (!l) {
David Brazdil0f672f62019-12-10 10:32:29 +0000371 kvfree(array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372 return -ENOMEM;
373 }
374
375 /* store array, freeing old if necessary */
David Brazdil0f672f62019-12-10 10:32:29 +0000376 kvfree(l->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377 l->list = array;
378 l->length = length;
379 *lp = l;
380 return 0;
381}
382
383/*
384 * seq_file methods for the tasks/procs files. The seq_file position is the
385 * next pid to display; the seq_file iterator is a pointer to the pid
386 * in the cgroup->l->list array.
387 */
388
389static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
390{
391 /*
392 * Initially we receive a position value that corresponds to
393 * one more than the last pid shown (or 0 on the first call or
394 * after a seek to the start). Use a binary-search to find the
395 * next pid to display, if any
396 */
397 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200398 struct cgroup_file_ctx *ctx = of->priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399 struct cgroup *cgrp = seq_css(s)->cgroup;
400 struct cgroup_pidlist *l;
401 enum cgroup_filetype type = seq_cft(s)->private;
402 int index = 0, pid = *pos;
403 int *iter, ret;
404
405 mutex_lock(&cgrp->pidlist_mutex);
406
407 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200408 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
409 * start() after open. If the matching pidlist is around, we can use
410 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
411 * directly. It could already have been destroyed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200413 if (ctx->procs1.pidlist)
414 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415
416 /*
417 * Either this is the first start() after open or the matching
418 * pidlist has been destroyed inbetween. Create a new one.
419 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200420 if (!ctx->procs1.pidlist) {
421 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 if (ret)
423 return ERR_PTR(ret);
424 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200425 l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426
427 if (pid) {
428 int end = l->length;
429
430 while (index < end) {
431 int mid = (index + end) / 2;
432 if (l->list[mid] == pid) {
433 index = mid;
434 break;
435 } else if (l->list[mid] <= pid)
436 index = mid + 1;
437 else
438 end = mid;
439 }
440 }
441 /* If we're off the end of the array, we're done */
442 if (index >= l->length)
443 return NULL;
444 /* Update the abstract position to be the actual pid that we found */
445 iter = l->list + index;
446 *pos = *iter;
447 return iter;
448}
449
450static void cgroup_pidlist_stop(struct seq_file *s, void *v)
451{
452 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200453 struct cgroup_file_ctx *ctx = of->priv;
454 struct cgroup_pidlist *l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455
456 if (l)
457 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
458 CGROUP_PIDLIST_DESTROY_DELAY);
459 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
460}
461
462static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
463{
464 struct kernfs_open_file *of = s->private;
Olivier Deprez157378f2022-04-04 15:47:50 +0200465 struct cgroup_file_ctx *ctx = of->priv;
466 struct cgroup_pidlist *l = ctx->procs1.pidlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 pid_t *p = v;
468 pid_t *end = l->list + l->length;
469 /*
470 * Advance to the next pid in the array. If this goes off the
471 * end, we're done
472 */
473 p++;
474 if (p >= end) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200475 (*pos)++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476 return NULL;
477 } else {
478 *pos = *p;
479 return p;
480 }
481}
482
483static int cgroup_pidlist_show(struct seq_file *s, void *v)
484{
485 seq_printf(s, "%d\n", *(int *)v);
486
487 return 0;
488}
489
490static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
491 char *buf, size_t nbytes, loff_t off,
492 bool threadgroup)
493{
494 struct cgroup *cgrp;
495 struct task_struct *task;
496 const struct cred *cred, *tcred;
497 ssize_t ret;
Olivier Deprez157378f2022-04-04 15:47:50 +0200498 bool locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499
500 cgrp = cgroup_kn_lock_live(of->kn, false);
501 if (!cgrp)
502 return -ENODEV;
503
Olivier Deprez157378f2022-04-04 15:47:50 +0200504 task = cgroup_procs_write_start(buf, threadgroup, &locked);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505 ret = PTR_ERR_OR_ZERO(task);
506 if (ret)
507 goto out_unlock;
508
509 /*
Olivier Deprez92d4c212022-12-06 15:05:30 +0100510 * Even if we're attaching all tasks in the thread group, we only need
511 * to check permissions on one of them. Check permissions using the
512 * credentials from file open to protect against inherited fd attacks.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513 */
Olivier Deprez92d4c212022-12-06 15:05:30 +0100514 cred = of->file->f_cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 tcred = get_task_cred(task);
516 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
517 !uid_eq(cred->euid, tcred->uid) &&
518 !uid_eq(cred->euid, tcred->suid))
519 ret = -EACCES;
520 put_cred(tcred);
521 if (ret)
522 goto out_finish;
523
524 ret = cgroup_attach_task(cgrp, task, threadgroup);
525
526out_finish:
Olivier Deprez157378f2022-04-04 15:47:50 +0200527 cgroup_procs_write_finish(task, locked);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528out_unlock:
529 cgroup_kn_unlock(of->kn);
530
531 return ret ?: nbytes;
532}
533
534static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
535 char *buf, size_t nbytes, loff_t off)
536{
537 return __cgroup1_procs_write(of, buf, nbytes, off, true);
538}
539
540static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
541 char *buf, size_t nbytes, loff_t off)
542{
543 return __cgroup1_procs_write(of, buf, nbytes, off, false);
544}
545
546static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
547 char *buf, size_t nbytes, loff_t off)
548{
549 struct cgroup *cgrp;
Olivier Deprez157378f2022-04-04 15:47:50 +0200550 struct cgroup_file_ctx *ctx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551
552 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
553
Olivier Deprez157378f2022-04-04 15:47:50 +0200554 /*
555 * Release agent gets called with all capabilities,
556 * require capabilities to set release agent.
557 */
558 ctx = of->priv;
559 if ((ctx->ns->user_ns != &init_user_ns) ||
560 !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
561 return -EPERM;
562
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 cgrp = cgroup_kn_lock_live(of->kn, false);
564 if (!cgrp)
565 return -ENODEV;
566 spin_lock(&release_agent_path_lock);
567 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
568 sizeof(cgrp->root->release_agent_path));
569 spin_unlock(&release_agent_path_lock);
570 cgroup_kn_unlock(of->kn);
571 return nbytes;
572}
573
574static int cgroup_release_agent_show(struct seq_file *seq, void *v)
575{
576 struct cgroup *cgrp = seq_css(seq)->cgroup;
577
578 spin_lock(&release_agent_path_lock);
579 seq_puts(seq, cgrp->root->release_agent_path);
580 spin_unlock(&release_agent_path_lock);
581 seq_putc(seq, '\n');
582 return 0;
583}
584
585static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
586{
587 seq_puts(seq, "0\n");
588 return 0;
589}
590
591static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
592 struct cftype *cft)
593{
594 return notify_on_release(css->cgroup);
595}
596
597static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
598 struct cftype *cft, u64 val)
599{
600 if (val)
601 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
602 else
603 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
604 return 0;
605}
606
607static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
608 struct cftype *cft)
609{
610 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
611}
612
613static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
614 struct cftype *cft, u64 val)
615{
616 if (val)
617 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
618 else
619 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
620 return 0;
621}
622
623/* cgroup core interface files for the legacy hierarchies */
624struct cftype cgroup1_base_files[] = {
625 {
626 .name = "cgroup.procs",
627 .seq_start = cgroup_pidlist_start,
628 .seq_next = cgroup_pidlist_next,
629 .seq_stop = cgroup_pidlist_stop,
630 .seq_show = cgroup_pidlist_show,
631 .private = CGROUP_FILE_PROCS,
632 .write = cgroup1_procs_write,
633 },
634 {
635 .name = "cgroup.clone_children",
636 .read_u64 = cgroup_clone_children_read,
637 .write_u64 = cgroup_clone_children_write,
638 },
639 {
640 .name = "cgroup.sane_behavior",
641 .flags = CFTYPE_ONLY_ON_ROOT,
642 .seq_show = cgroup_sane_behavior_show,
643 },
644 {
645 .name = "tasks",
646 .seq_start = cgroup_pidlist_start,
647 .seq_next = cgroup_pidlist_next,
648 .seq_stop = cgroup_pidlist_stop,
649 .seq_show = cgroup_pidlist_show,
650 .private = CGROUP_FILE_TASKS,
651 .write = cgroup1_tasks_write,
652 },
653 {
654 .name = "notify_on_release",
655 .read_u64 = cgroup_read_notify_on_release,
656 .write_u64 = cgroup_write_notify_on_release,
657 },
658 {
659 .name = "release_agent",
660 .flags = CFTYPE_ONLY_ON_ROOT,
661 .seq_show = cgroup_release_agent_show,
662 .write = cgroup_release_agent_write,
663 .max_write_len = PATH_MAX - 1,
664 },
665 { } /* terminate */
666};
667
668/* Display information about each subsystem and each hierarchy */
669int proc_cgroupstats_show(struct seq_file *m, void *v)
670{
671 struct cgroup_subsys *ss;
672 int i;
673
674 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
675 /*
676 * ideally we don't want subsystems moving around while we do this.
677 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
678 * subsys/hierarchy state.
679 */
680 mutex_lock(&cgroup_mutex);
681
682 for_each_subsys(ss, i)
683 seq_printf(m, "%s\t%d\t%d\t%d\n",
684 ss->legacy_name, ss->root->hierarchy_id,
685 atomic_read(&ss->root->nr_cgrps),
686 cgroup_ssid_enabled(i));
687
688 mutex_unlock(&cgroup_mutex);
689 return 0;
690}
691
692/**
693 * cgroupstats_build - build and fill cgroupstats
694 * @stats: cgroupstats to fill information into
695 * @dentry: A dentry entry belonging to the cgroup for which stats have
696 * been requested.
697 *
698 * Build and fill cgroupstats so that taskstats can export it to user
699 * space.
700 */
701int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
702{
703 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
704 struct cgroup *cgrp;
705 struct css_task_iter it;
706 struct task_struct *tsk;
707
708 /* it should be kernfs_node belonging to cgroupfs and is a directory */
709 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
710 kernfs_type(kn) != KERNFS_DIR)
711 return -EINVAL;
712
713 mutex_lock(&cgroup_mutex);
714
715 /*
716 * We aren't being called from kernfs and there's no guarantee on
717 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
718 * @kn->priv is RCU safe. Let's do the RCU dancing.
719 */
720 rcu_read_lock();
721 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
722 if (!cgrp || cgroup_is_dead(cgrp)) {
723 rcu_read_unlock();
724 mutex_unlock(&cgroup_mutex);
725 return -ENOENT;
726 }
727 rcu_read_unlock();
728
729 css_task_iter_start(&cgrp->self, 0, &it);
730 while ((tsk = css_task_iter_next(&it))) {
731 switch (tsk->state) {
732 case TASK_RUNNING:
733 stats->nr_running++;
734 break;
735 case TASK_INTERRUPTIBLE:
736 stats->nr_sleeping++;
737 break;
738 case TASK_UNINTERRUPTIBLE:
739 stats->nr_uninterruptible++;
740 break;
741 case TASK_STOPPED:
742 stats->nr_stopped++;
743 break;
744 default:
745 if (delayacct_is_task_waiting_on_io(tsk))
746 stats->nr_io_wait++;
747 break;
748 }
749 }
750 css_task_iter_end(&it);
751
752 mutex_unlock(&cgroup_mutex);
753 return 0;
754}
755
756void cgroup1_check_for_release(struct cgroup *cgrp)
757{
758 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
759 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
760 schedule_work(&cgrp->release_agent_work);
761}
762
763/*
764 * Notify userspace when a cgroup is released, by running the
765 * configured release agent with the name of the cgroup (path
766 * relative to the root of cgroup file system) as the argument.
767 *
768 * Most likely, this user command will try to rmdir this cgroup.
769 *
770 * This races with the possibility that some other task will be
771 * attached to this cgroup before it is removed, or that some other
772 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
773 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
774 * unused, and this cgroup will be reprieved from its death sentence,
775 * to continue to serve a useful existence. Next time it's released,
776 * we will get notified again, if it still has 'notify_on_release' set.
777 *
778 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
779 * means only wait until the task is successfully execve()'d. The
780 * separate release agent task is forked by call_usermodehelper(),
781 * then control in this thread returns here, without waiting for the
782 * release agent task. We don't bother to wait because the caller of
783 * this routine has no use for the exit status of the release agent
784 * task, so no sense holding our caller up for that.
785 */
786void cgroup1_release_agent(struct work_struct *work)
787{
788 struct cgroup *cgrp =
789 container_of(work, struct cgroup, release_agent_work);
Olivier Deprez157378f2022-04-04 15:47:50 +0200790 char *pathbuf, *agentbuf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791 char *argv[3], *envp[3];
792 int ret;
793
Olivier Deprez157378f2022-04-04 15:47:50 +0200794 /* snoop agent path and exit early if empty */
795 if (!cgrp->root->release_agent_path[0])
796 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000797
Olivier Deprez157378f2022-04-04 15:47:50 +0200798 /* prepare argument buffers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
Olivier Deprez157378f2022-04-04 15:47:50 +0200800 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
801 if (!pathbuf || !agentbuf)
802 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000803
Olivier Deprez157378f2022-04-04 15:47:50 +0200804 spin_lock(&release_agent_path_lock);
805 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
806 spin_unlock(&release_agent_path_lock);
807 if (!agentbuf[0])
808 goto out_free;
809
810 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 if (ret < 0 || ret >= PATH_MAX)
Olivier Deprez157378f2022-04-04 15:47:50 +0200812 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813
814 argv[0] = agentbuf;
815 argv[1] = pathbuf;
816 argv[2] = NULL;
817
818 /* minimal command environment */
819 envp[0] = "HOME=/";
820 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
821 envp[2] = NULL;
822
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000823 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000824out_free:
825 kfree(agentbuf);
826 kfree(pathbuf);
827}
828
829/*
830 * cgroup_rename - Only allow simple rename of directories in place.
831 */
832static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
833 const char *new_name_str)
834{
835 struct cgroup *cgrp = kn->priv;
836 int ret;
837
Olivier Deprez0e641232021-09-23 10:07:05 +0200838 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
839 if (strchr(new_name_str, '\n'))
840 return -EINVAL;
841
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000842 if (kernfs_type(kn) != KERNFS_DIR)
843 return -ENOTDIR;
844 if (kn->parent != new_parent)
845 return -EIO;
846
847 /*
848 * We're gonna grab cgroup_mutex which nests outside kernfs
849 * active_ref. kernfs_rename() doesn't require active_ref
850 * protection. Break them before grabbing cgroup_mutex.
851 */
852 kernfs_break_active_protection(new_parent);
853 kernfs_break_active_protection(kn);
854
855 mutex_lock(&cgroup_mutex);
856
857 ret = kernfs_rename(kn, new_parent, new_name_str);
858 if (!ret)
859 TRACE_CGROUP_PATH(rename, cgrp);
860
861 mutex_unlock(&cgroup_mutex);
862
863 kernfs_unbreak_active_protection(kn);
864 kernfs_unbreak_active_protection(new_parent);
865 return ret;
866}
867
868static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
869{
870 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
871 struct cgroup_subsys *ss;
872 int ssid;
873
874 for_each_subsys(ss, ssid)
875 if (root->subsys_mask & (1 << ssid))
876 seq_show_option(seq, ss->legacy_name, NULL);
877 if (root->flags & CGRP_ROOT_NOPREFIX)
878 seq_puts(seq, ",noprefix");
879 if (root->flags & CGRP_ROOT_XATTR)
880 seq_puts(seq, ",xattr");
881 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
882 seq_puts(seq, ",cpuset_v2_mode");
883
884 spin_lock(&release_agent_path_lock);
885 if (strlen(root->release_agent_path))
886 seq_show_option(seq, "release_agent",
887 root->release_agent_path);
888 spin_unlock(&release_agent_path_lock);
889
890 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
891 seq_puts(seq, ",clone_children");
892 if (strlen(root->name))
893 seq_show_option(seq, "name", root->name);
894 return 0;
895}
896
David Brazdil0f672f62019-12-10 10:32:29 +0000897enum cgroup1_param {
898 Opt_all,
899 Opt_clone_children,
900 Opt_cpuset_v2_mode,
901 Opt_name,
902 Opt_none,
903 Opt_noprefix,
904 Opt_release_agent,
905 Opt_xattr,
906};
907
Olivier Deprez157378f2022-04-04 15:47:50 +0200908const struct fs_parameter_spec cgroup1_fs_parameters[] = {
David Brazdil0f672f62019-12-10 10:32:29 +0000909 fsparam_flag ("all", Opt_all),
910 fsparam_flag ("clone_children", Opt_clone_children),
911 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
912 fsparam_string("name", Opt_name),
913 fsparam_flag ("none", Opt_none),
914 fsparam_flag ("noprefix", Opt_noprefix),
915 fsparam_string("release_agent", Opt_release_agent),
916 fsparam_flag ("xattr", Opt_xattr),
917 {}
918};
919
David Brazdil0f672f62019-12-10 10:32:29 +0000920int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000921{
David Brazdil0f672f62019-12-10 10:32:29 +0000922 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000923 struct cgroup_subsys *ss;
David Brazdil0f672f62019-12-10 10:32:29 +0000924 struct fs_parse_result result;
925 int opt, i;
926
Olivier Deprez157378f2022-04-04 15:47:50 +0200927 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
David Brazdil0f672f62019-12-10 10:32:29 +0000928 if (opt == -ENOPARAM) {
929 if (strcmp(param->key, "source") == 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200930 if (param->type != fs_value_is_string)
931 return invalf(fc, "Non-string source");
932 if (fc->source)
933 return invalf(fc, "Multiple sources not supported");
David Brazdil0f672f62019-12-10 10:32:29 +0000934 fc->source = param->string;
935 param->string = NULL;
936 return 0;
937 }
938 for_each_subsys(ss, i) {
939 if (strcmp(param->key, ss->legacy_name))
940 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200941 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
942 return invalfc(fc, "Disabled controller '%s'",
943 param->key);
David Brazdil0f672f62019-12-10 10:32:29 +0000944 ctx->subsys_mask |= (1 << i);
945 return 0;
946 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200947 return invalfc(fc, "Unknown subsys name '%s'", param->key);
David Brazdil0f672f62019-12-10 10:32:29 +0000948 }
949 if (opt < 0)
950 return opt;
951
952 switch (opt) {
953 case Opt_none:
954 /* Explicitly have no subsystems */
955 ctx->none = true;
956 break;
957 case Opt_all:
958 ctx->all_ss = true;
959 break;
960 case Opt_noprefix:
961 ctx->flags |= CGRP_ROOT_NOPREFIX;
962 break;
963 case Opt_clone_children:
964 ctx->cpuset_clone_children = true;
965 break;
966 case Opt_cpuset_v2_mode:
967 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
968 break;
969 case Opt_xattr:
970 ctx->flags |= CGRP_ROOT_XATTR;
971 break;
972 case Opt_release_agent:
973 /* Specifying two release agents is forbidden */
974 if (ctx->release_agent)
Olivier Deprez157378f2022-04-04 15:47:50 +0200975 return invalfc(fc, "release_agent respecified");
976 /*
977 * Release agent gets called with all capabilities,
978 * require capabilities to set release agent.
979 */
980 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
981 return invalfc(fc, "Setting release_agent not allowed");
David Brazdil0f672f62019-12-10 10:32:29 +0000982 ctx->release_agent = param->string;
983 param->string = NULL;
984 break;
985 case Opt_name:
986 /* blocked by boot param? */
987 if (cgroup_no_v1_named)
988 return -ENOENT;
989 /* Can't specify an empty name */
990 if (!param->size)
Olivier Deprez157378f2022-04-04 15:47:50 +0200991 return invalfc(fc, "Empty name");
David Brazdil0f672f62019-12-10 10:32:29 +0000992 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
Olivier Deprez157378f2022-04-04 15:47:50 +0200993 return invalfc(fc, "Name too long");
David Brazdil0f672f62019-12-10 10:32:29 +0000994 /* Must match [\w.-]+ */
995 for (i = 0; i < param->size; i++) {
996 char c = param->string[i];
997 if (isalnum(c))
998 continue;
999 if ((c == '.') || (c == '-') || (c == '_'))
1000 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02001001 return invalfc(fc, "Invalid name");
David Brazdil0f672f62019-12-10 10:32:29 +00001002 }
1003 /* Specifying two names is forbidden */
1004 if (ctx->name)
Olivier Deprez157378f2022-04-04 15:47:50 +02001005 return invalfc(fc, "name respecified");
David Brazdil0f672f62019-12-10 10:32:29 +00001006 ctx->name = param->string;
1007 param->string = NULL;
1008 break;
1009 }
1010 return 0;
1011}
1012
1013static int check_cgroupfs_options(struct fs_context *fc)
1014{
1015 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1016 u16 mask = U16_MAX;
1017 u16 enabled = 0;
1018 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001019 int i;
1020
1021#ifdef CONFIG_CPUSETS
1022 mask = ~((u16)1 << cpuset_cgrp_id);
1023#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001024 for_each_subsys(ss, i)
1025 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1026 enabled |= 1 << i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001027
David Brazdil0f672f62019-12-10 10:32:29 +00001028 ctx->subsys_mask &= enabled;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029
1030 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001031 * In absense of 'none', 'name=' or subsystem name options,
1032 * let's default to 'all'.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 */
David Brazdil0f672f62019-12-10 10:32:29 +00001034 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1035 ctx->all_ss = true;
1036
1037 if (ctx->all_ss) {
1038 /* Mutually exclusive option 'all' + subsystem name */
1039 if (ctx->subsys_mask)
Olivier Deprez157378f2022-04-04 15:47:50 +02001040 return invalfc(fc, "subsys name conflicts with all");
David Brazdil0f672f62019-12-10 10:32:29 +00001041 /* 'all' => select all the subsystems */
1042 ctx->subsys_mask = enabled;
1043 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001044
1045 /*
1046 * We either have to specify by name or by subsystems. (So all
1047 * empty hierarchies must have a name).
1048 */
David Brazdil0f672f62019-12-10 10:32:29 +00001049 if (!ctx->subsys_mask && !ctx->name)
Olivier Deprez157378f2022-04-04 15:47:50 +02001050 return invalfc(fc, "Need name or subsystem set");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001051
1052 /*
1053 * Option noprefix was introduced just for backward compatibility
1054 * with the old cpuset, so we allow noprefix only if mounting just
1055 * the cpuset subsystem.
1056 */
David Brazdil0f672f62019-12-10 10:32:29 +00001057 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
Olivier Deprez157378f2022-04-04 15:47:50 +02001058 return invalfc(fc, "noprefix used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059
1060 /* Can't specify "none" and some subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001061 if (ctx->subsys_mask && ctx->none)
Olivier Deprez157378f2022-04-04 15:47:50 +02001062 return invalfc(fc, "none used incorrectly");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001063
1064 return 0;
1065}
1066
David Brazdil0f672f62019-12-10 10:32:29 +00001067int cgroup1_reconfigure(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001068{
David Brazdil0f672f62019-12-10 10:32:29 +00001069 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1070 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
David Brazdil0f672f62019-12-10 10:32:29 +00001072 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001073 u16 added_mask, removed_mask;
1074
1075 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1076
1077 /* See what subsystems are wanted */
David Brazdil0f672f62019-12-10 10:32:29 +00001078 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001079 if (ret)
1080 goto out_unlock;
1081
David Brazdil0f672f62019-12-10 10:32:29 +00001082 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1084 task_tgid_nr(current), current->comm);
1085
David Brazdil0f672f62019-12-10 10:32:29 +00001086 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1087 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001088
1089 /* Don't allow flags or name to change at remount */
David Brazdil0f672f62019-12-10 10:32:29 +00001090 if ((ctx->flags ^ root->flags) ||
1091 (ctx->name && strcmp(ctx->name, root->name))) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001092 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
David Brazdil0f672f62019-12-10 10:32:29 +00001093 ctx->flags, ctx->name ?: "", root->flags, root->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001094 ret = -EINVAL;
1095 goto out_unlock;
1096 }
1097
1098 /* remounting is not allowed for populated hierarchies */
1099 if (!list_empty(&root->cgrp.self.children)) {
1100 ret = -EBUSY;
1101 goto out_unlock;
1102 }
1103
1104 ret = rebind_subsystems(root, added_mask);
1105 if (ret)
1106 goto out_unlock;
1107
1108 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1109
David Brazdil0f672f62019-12-10 10:32:29 +00001110 if (ctx->release_agent) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 spin_lock(&release_agent_path_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001112 strcpy(root->release_agent_path, ctx->release_agent);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001113 spin_unlock(&release_agent_path_lock);
1114 }
1115
1116 trace_cgroup_remount(root);
1117
1118 out_unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119 mutex_unlock(&cgroup_mutex);
1120 return ret;
1121}
1122
1123struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1124 .rename = cgroup1_rename,
1125 .show_options = cgroup1_show_options,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126 .mkdir = cgroup_mkdir,
1127 .rmdir = cgroup_rmdir,
1128 .show_path = cgroup_show_path,
1129};
1130
David Brazdil0f672f62019-12-10 10:32:29 +00001131/*
1132 * The guts of cgroup1 mount - find or create cgroup_root to use.
1133 * Called with cgroup_mutex held; returns 0 on success, -E... on
1134 * error and positive - in case when the candidate is busy dying.
1135 * On success it stashes a reference to cgroup_root into given
1136 * cgroup_fs_context; that reference is *NOT* counting towards the
1137 * cgroup_root refcount.
1138 */
1139static int cgroup1_root_to_use(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001140{
David Brazdil0f672f62019-12-10 10:32:29 +00001141 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001142 struct cgroup_root *root;
1143 struct cgroup_subsys *ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145
1146 /* First find the desired set of subsystems */
David Brazdil0f672f62019-12-10 10:32:29 +00001147 ret = check_cgroupfs_options(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001148 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001149 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150
1151 /*
1152 * Destruction of cgroup root is asynchronous, so subsystems may
1153 * still be dying after the previous unmount. Let's drain the
1154 * dying subsystems. We just need to ensure that the ones
1155 * unmounted previously finish dying and don't care about new ones
1156 * starting. Testing ref liveliness is good enough.
1157 */
1158 for_each_subsys(ss, i) {
David Brazdil0f672f62019-12-10 10:32:29 +00001159 if (!(ctx->subsys_mask & (1 << i)) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001160 ss->root == &cgrp_dfl_root)
1161 continue;
1162
David Brazdil0f672f62019-12-10 10:32:29 +00001163 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1164 return 1; /* restart */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001165 cgroup_put(&ss->root->cgrp);
1166 }
1167
1168 for_each_root(root) {
1169 bool name_match = false;
1170
1171 if (root == &cgrp_dfl_root)
1172 continue;
1173
1174 /*
1175 * If we asked for a name then it must match. Also, if
1176 * name matches but sybsys_mask doesn't, we should fail.
1177 * Remember whether name matched.
1178 */
David Brazdil0f672f62019-12-10 10:32:29 +00001179 if (ctx->name) {
1180 if (strcmp(ctx->name, root->name))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001181 continue;
1182 name_match = true;
1183 }
1184
1185 /*
1186 * If we asked for subsystems (or explicitly for no
1187 * subsystems) then they must match.
1188 */
David Brazdil0f672f62019-12-10 10:32:29 +00001189 if ((ctx->subsys_mask || ctx->none) &&
1190 (ctx->subsys_mask != root->subsys_mask)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191 if (!name_match)
1192 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00001193 return -EBUSY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001194 }
1195
David Brazdil0f672f62019-12-10 10:32:29 +00001196 if (root->flags ^ ctx->flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001197 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1198
David Brazdil0f672f62019-12-10 10:32:29 +00001199 ctx->root = root;
1200 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001201 }
1202
1203 /*
1204 * No such thing, create a new one. name= matching without subsys
1205 * specification is allowed for already existing hierarchies but we
1206 * can't create new one without subsys specification.
1207 */
David Brazdil0f672f62019-12-10 10:32:29 +00001208 if (!ctx->subsys_mask && !ctx->none)
Olivier Deprez157378f2022-04-04 15:47:50 +02001209 return invalfc(fc, "No subsys list or none specified");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210
1211 /* Hierarchies may only be created in the initial cgroup namespace. */
David Brazdil0f672f62019-12-10 10:32:29 +00001212 if (ctx->ns != &init_cgroup_ns)
1213 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214
1215 root = kzalloc(sizeof(*root), GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001216 if (!root)
1217 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218
David Brazdil0f672f62019-12-10 10:32:29 +00001219 ctx->root = root;
1220 init_cgroup_root(ctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221
David Brazdil0f672f62019-12-10 10:32:29 +00001222 ret = cgroup_setup_root(root, ctx->subsys_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223 if (ret)
1224 cgroup_free_root(root);
David Brazdil0f672f62019-12-10 10:32:29 +00001225 return ret;
1226}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001227
David Brazdil0f672f62019-12-10 10:32:29 +00001228int cgroup1_get_tree(struct fs_context *fc)
1229{
1230 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1231 int ret;
1232
1233 /* Check if the caller has permission to mount. */
1234 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1235 return -EPERM;
1236
1237 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1238
1239 ret = cgroup1_root_to_use(fc);
1240 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1241 ret = 1; /* restart */
1242
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001243 mutex_unlock(&cgroup_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244
David Brazdil0f672f62019-12-10 10:32:29 +00001245 if (!ret)
1246 ret = cgroup_do_get_tree(fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247
David Brazdil0f672f62019-12-10 10:32:29 +00001248 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001249 fc_drop_locked(fc);
David Brazdil0f672f62019-12-10 10:32:29 +00001250 ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001251 }
1252
David Brazdil0f672f62019-12-10 10:32:29 +00001253 if (unlikely(ret > 0)) {
1254 msleep(10);
1255 return restart_syscall();
1256 }
1257 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001258}
1259
1260static int __init cgroup1_wq_init(void)
1261{
1262 /*
1263 * Used to destroy pidlists and separate to serve as flush domain.
1264 * Cap @max_active to 1 too.
1265 */
1266 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1267 0, 1);
1268 BUG_ON(!cgroup_pidlist_destroy_wq);
1269 return 0;
1270}
1271core_initcall(cgroup1_wq_init);
1272
1273static int __init cgroup_no_v1(char *str)
1274{
1275 struct cgroup_subsys *ss;
1276 char *token;
1277 int i;
1278
1279 while ((token = strsep(&str, ",")) != NULL) {
1280 if (!*token)
1281 continue;
1282
1283 if (!strcmp(token, "all")) {
1284 cgroup_no_v1_mask = U16_MAX;
David Brazdil0f672f62019-12-10 10:32:29 +00001285 continue;
1286 }
1287
1288 if (!strcmp(token, "named")) {
1289 cgroup_no_v1_named = true;
1290 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001291 }
1292
1293 for_each_subsys(ss, i) {
1294 if (strcmp(token, ss->name) &&
1295 strcmp(token, ss->legacy_name))
1296 continue;
1297
1298 cgroup_no_v1_mask |= 1 << i;
1299 }
1300 }
1301 return 1;
1302}
1303__setup("cgroup_no_v1=", cgroup_no_v1);