blob: a78c0b02edd55658f5e67c7c43a3559614823740 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/kernel/fork.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8/*
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13 */
14
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <linux/anon_inodes.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016#include <linux/slab.h>
17#include <linux/sched/autogroup.h>
18#include <linux/sched/mm.h>
19#include <linux/sched/coredump.h>
20#include <linux/sched/user.h>
21#include <linux/sched/numa_balancing.h>
22#include <linux/sched/stat.h>
23#include <linux/sched/task.h>
24#include <linux/sched/task_stack.h>
25#include <linux/sched/cputime.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/seq_file.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#include <linux/rtmutex.h>
28#include <linux/init.h>
29#include <linux/unistd.h>
30#include <linux/module.h>
31#include <linux/vmalloc.h>
32#include <linux/completion.h>
33#include <linux/personality.h>
34#include <linux/mempolicy.h>
35#include <linux/sem.h>
36#include <linux/file.h>
37#include <linux/fdtable.h>
38#include <linux/iocontext.h>
39#include <linux/key.h>
40#include <linux/binfmts.h>
41#include <linux/mman.h>
42#include <linux/mmu_notifier.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043#include <linux/fs.h>
44#include <linux/mm.h>
45#include <linux/vmacache.h>
46#include <linux/nsproxy.h>
47#include <linux/capability.h>
48#include <linux/cpu.h>
49#include <linux/cgroup.h>
50#include <linux/security.h>
51#include <linux/hugetlb.h>
52#include <linux/seccomp.h>
53#include <linux/swap.h>
54#include <linux/syscalls.h>
55#include <linux/jiffies.h>
56#include <linux/futex.h>
57#include <linux/compat.h>
58#include <linux/kthread.h>
59#include <linux/task_io_accounting_ops.h>
60#include <linux/rcupdate.h>
61#include <linux/ptrace.h>
62#include <linux/mount.h>
63#include <linux/audit.h>
64#include <linux/memcontrol.h>
65#include <linux/ftrace.h>
66#include <linux/proc_fs.h>
67#include <linux/profile.h>
68#include <linux/rmap.h>
69#include <linux/ksm.h>
70#include <linux/acct.h>
71#include <linux/userfaultfd_k.h>
72#include <linux/tsacct_kern.h>
73#include <linux/cn_proc.h>
74#include <linux/freezer.h>
75#include <linux/delayacct.h>
76#include <linux/taskstats_kern.h>
77#include <linux/random.h>
78#include <linux/tty.h>
79#include <linux/blkdev.h>
80#include <linux/fs_struct.h>
81#include <linux/magic.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082#include <linux/perf_event.h>
83#include <linux/posix-timers.h>
84#include <linux/user-return-notifier.h>
85#include <linux/oom.h>
86#include <linux/khugepaged.h>
87#include <linux/signalfd.h>
88#include <linux/uprobes.h>
89#include <linux/aio.h>
90#include <linux/compiler.h>
91#include <linux/sysctl.h>
92#include <linux/kcov.h>
93#include <linux/livepatch.h>
94#include <linux/thread_info.h>
David Brazdil0f672f62019-12-10 10:32:29 +000095#include <linux/stackleak.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020096#include <linux/kasan.h>
97#include <linux/scs.h>
98#include <linux/io_uring.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100#include <asm/pgalloc.h>
101#include <linux/uaccess.h>
102#include <asm/mmu_context.h>
103#include <asm/cacheflush.h>
104#include <asm/tlbflush.h>
105
106#include <trace/events/sched.h>
107
108#define CREATE_TRACE_POINTS
109#include <trace/events/task.h>
110
111/*
112 * Minimum number of threads to boot the kernel
113 */
114#define MIN_THREADS 20
115
116/*
117 * Maximum number of threads
118 */
119#define MAX_THREADS FUTEX_TID_MASK
120
121/*
122 * Protected counters by write_lock_irq(&tasklist_lock)
123 */
124unsigned long total_forks; /* Handle normal Linux uptimes. */
125int nr_threads; /* The idle threads do not count.. */
126
David Brazdil0f672f62019-12-10 10:32:29 +0000127static int max_threads; /* tunable limit on nr_threads */
128
129#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
130
131static const char * const resident_page_types[] = {
132 NAMED_ARRAY_INDEX(MM_FILEPAGES),
133 NAMED_ARRAY_INDEX(MM_ANONPAGES),
134 NAMED_ARRAY_INDEX(MM_SWAPENTS),
135 NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
136};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
138DEFINE_PER_CPU(unsigned long, process_counts) = 0;
139
140__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
141
142#ifdef CONFIG_PROVE_RCU
143int lockdep_tasklist_lock_is_held(void)
144{
145 return lockdep_is_held(&tasklist_lock);
146}
147EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
148#endif /* #ifdef CONFIG_PROVE_RCU */
149
150int nr_processes(void)
151{
152 int cpu;
153 int total = 0;
154
155 for_each_possible_cpu(cpu)
156 total += per_cpu(process_counts, cpu);
157
158 return total;
159}
160
161void __weak arch_release_task_struct(struct task_struct *tsk)
162{
163}
164
165#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
166static struct kmem_cache *task_struct_cachep;
167
168static inline struct task_struct *alloc_task_struct_node(int node)
169{
170 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
171}
172
173static inline void free_task_struct(struct task_struct *tsk)
174{
175 kmem_cache_free(task_struct_cachep, tsk);
176}
177#endif
178
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
180
181/*
182 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
183 * kmemcache based allocator.
184 */
185# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
186
187#ifdef CONFIG_VMAP_STACK
188/*
189 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
190 * flush. Try to minimize the number of calls by caching stacks.
191 */
192#define NR_CACHED_STACKS 2
193static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
194
195static int free_vm_stack_cache(unsigned int cpu)
196{
197 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
198 int i;
199
200 for (i = 0; i < NR_CACHED_STACKS; i++) {
201 struct vm_struct *vm_stack = cached_vm_stacks[i];
202
203 if (!vm_stack)
204 continue;
205
206 vfree(vm_stack->addr);
207 cached_vm_stacks[i] = NULL;
208 }
209
210 return 0;
211}
212#endif
213
214static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
215{
216#ifdef CONFIG_VMAP_STACK
217 void *stack;
218 int i;
219
220 for (i = 0; i < NR_CACHED_STACKS; i++) {
221 struct vm_struct *s;
222
223 s = this_cpu_xchg(cached_stacks[i], NULL);
224
225 if (!s)
226 continue;
227
Olivier Deprez157378f2022-04-04 15:47:50 +0200228 /* Clear the KASAN shadow of the stack. */
229 kasan_unpoison_shadow(s->addr, THREAD_SIZE);
230
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 /* Clear stale pointers from reused stack. */
232 memset(s->addr, 0, THREAD_SIZE);
233
234 tsk->stack_vm_area = s;
David Brazdil0f672f62019-12-10 10:32:29 +0000235 tsk->stack = s->addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 return s->addr;
237 }
238
David Brazdil0f672f62019-12-10 10:32:29 +0000239 /*
240 * Allocated stacks are cached and later reused by new threads,
241 * so memcg accounting is performed manually on assigning/releasing
242 * stacks to tasks. Drop __GFP_ACCOUNT.
243 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
245 VMALLOC_START, VMALLOC_END,
David Brazdil0f672f62019-12-10 10:32:29 +0000246 THREADINFO_GFP & ~__GFP_ACCOUNT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000247 PAGE_KERNEL,
248 0, node, __builtin_return_address(0));
249
250 /*
251 * We can't call find_vm_area() in interrupt context, and
252 * free_thread_stack() can be called in interrupt context,
253 * so cache the vm_struct.
254 */
David Brazdil0f672f62019-12-10 10:32:29 +0000255 if (stack) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 tsk->stack_vm_area = find_vm_area(stack);
David Brazdil0f672f62019-12-10 10:32:29 +0000257 tsk->stack = stack;
258 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259 return stack;
260#else
261 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
262 THREAD_SIZE_ORDER);
263
David Brazdil0f672f62019-12-10 10:32:29 +0000264 if (likely(page)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200265 tsk->stack = kasan_reset_tag(page_address(page));
David Brazdil0f672f62019-12-10 10:32:29 +0000266 return tsk->stack;
267 }
268 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269#endif
270}
271
272static inline void free_thread_stack(struct task_struct *tsk)
273{
274#ifdef CONFIG_VMAP_STACK
David Brazdil0f672f62019-12-10 10:32:29 +0000275 struct vm_struct *vm = task_stack_vm_area(tsk);
276
277 if (vm) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 int i;
279
Olivier Deprez157378f2022-04-04 15:47:50 +0200280 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
281 memcg_kmem_uncharge_page(vm->pages[i], 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000282
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283 for (i = 0; i < NR_CACHED_STACKS; i++) {
284 if (this_cpu_cmpxchg(cached_stacks[i],
285 NULL, tsk->stack_vm_area) != NULL)
286 continue;
287
288 return;
289 }
290
291 vfree_atomic(tsk->stack);
292 return;
293 }
294#endif
295
296 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
297}
298# else
299static struct kmem_cache *thread_stack_cache;
300
301static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
302 int node)
303{
David Brazdil0f672f62019-12-10 10:32:29 +0000304 unsigned long *stack;
305 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
Olivier Deprez157378f2022-04-04 15:47:50 +0200306 stack = kasan_reset_tag(stack);
David Brazdil0f672f62019-12-10 10:32:29 +0000307 tsk->stack = stack;
308 return stack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309}
310
311static void free_thread_stack(struct task_struct *tsk)
312{
313 kmem_cache_free(thread_stack_cache, tsk->stack);
314}
315
316void thread_stack_cache_init(void)
317{
318 thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
319 THREAD_SIZE, THREAD_SIZE, 0, 0,
320 THREAD_SIZE, NULL);
321 BUG_ON(thread_stack_cache == NULL);
322}
323# endif
324#endif
325
326/* SLAB cache for signal_struct structures (tsk->signal) */
327static struct kmem_cache *signal_cachep;
328
329/* SLAB cache for sighand_struct structures (tsk->sighand) */
330struct kmem_cache *sighand_cachep;
331
332/* SLAB cache for files_struct structures (tsk->files) */
333struct kmem_cache *files_cachep;
334
335/* SLAB cache for fs_struct structures (tsk->fs) */
336struct kmem_cache *fs_cachep;
337
338/* SLAB cache for vm_area_struct structures */
339static struct kmem_cache *vm_area_cachep;
340
341/* SLAB cache for mm_struct structures (tsk->mm) */
342static struct kmem_cache *mm_cachep;
343
344struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
345{
346 struct vm_area_struct *vma;
347
348 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
349 if (vma)
350 vma_init(vma, mm);
351 return vma;
352}
353
354struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
355{
356 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
357
358 if (new) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200359 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
360 ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
361 /*
362 * orig->shared.rb may be modified concurrently, but the clone
363 * will be reinitialized.
364 */
365 *new = data_race(*orig);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366 INIT_LIST_HEAD(&new->anon_vma_chain);
Olivier Deprez157378f2022-04-04 15:47:50 +0200367 new->vm_next = new->vm_prev = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368 }
369 return new;
370}
371
372void vm_area_free(struct vm_area_struct *vma)
373{
374 kmem_cache_free(vm_area_cachep, vma);
375}
376
377static void account_kernel_stack(struct task_struct *tsk, int account)
378{
379 void *stack = task_stack_page(tsk);
380 struct vm_struct *vm = task_stack_vm_area(tsk);
381
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382
Olivier Deprez157378f2022-04-04 15:47:50 +0200383 /* All stack pages are in the same node. */
384 if (vm)
385 mod_lruvec_page_state(vm->pages[0], NR_KERNEL_STACK_KB,
386 account * (THREAD_SIZE / 1024));
387 else
388 mod_lruvec_slab_state(stack, NR_KERNEL_STACK_KB,
389 account * (THREAD_SIZE / 1024));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390}
391
David Brazdil0f672f62019-12-10 10:32:29 +0000392static int memcg_charge_kernel_stack(struct task_struct *tsk)
393{
394#ifdef CONFIG_VMAP_STACK
395 struct vm_struct *vm = task_stack_vm_area(tsk);
396 int ret;
397
Olivier Deprez157378f2022-04-04 15:47:50 +0200398 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
399
David Brazdil0f672f62019-12-10 10:32:29 +0000400 if (vm) {
401 int i;
402
Olivier Deprez157378f2022-04-04 15:47:50 +0200403 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
404
David Brazdil0f672f62019-12-10 10:32:29 +0000405 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
406 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200407 * If memcg_kmem_charge_page() fails, page->mem_cgroup
408 * pointer is NULL, and memcg_kmem_uncharge_page() in
409 * free_thread_stack() will ignore this page.
David Brazdil0f672f62019-12-10 10:32:29 +0000410 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200411 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL,
412 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000413 if (ret)
414 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000415 }
416 }
417#endif
418 return 0;
419}
420
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421static void release_task_stack(struct task_struct *tsk)
422{
423 if (WARN_ON(tsk->state != TASK_DEAD))
424 return; /* Better to leak the stack than to free prematurely */
425
426 account_kernel_stack(tsk, -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 free_thread_stack(tsk);
428 tsk->stack = NULL;
429#ifdef CONFIG_VMAP_STACK
430 tsk->stack_vm_area = NULL;
431#endif
432}
433
434#ifdef CONFIG_THREAD_INFO_IN_TASK
435void put_task_stack(struct task_struct *tsk)
436{
David Brazdil0f672f62019-12-10 10:32:29 +0000437 if (refcount_dec_and_test(&tsk->stack_refcount))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438 release_task_stack(tsk);
439}
440#endif
441
442void free_task(struct task_struct *tsk)
443{
Olivier Deprez157378f2022-04-04 15:47:50 +0200444 scs_release(tsk);
445
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446#ifndef CONFIG_THREAD_INFO_IN_TASK
447 /*
448 * The task is finally done with both the stack and thread_info,
449 * so free both.
450 */
451 release_task_stack(tsk);
452#else
453 /*
454 * If the task had a separate stack allocation, it should be gone
455 * by now.
456 */
David Brazdil0f672f62019-12-10 10:32:29 +0000457 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458#endif
459 rt_mutex_debug_task_free(tsk);
460 ftrace_graph_exit_task(tsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461 arch_release_task_struct(tsk);
462 if (tsk->flags & PF_KTHREAD)
463 free_kthread_struct(tsk);
464 free_task_struct(tsk);
465}
466EXPORT_SYMBOL(free_task);
467
468#ifdef CONFIG_MMU
469static __latent_entropy int dup_mmap(struct mm_struct *mm,
470 struct mm_struct *oldmm)
471{
472 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
473 struct rb_node **rb_link, *rb_parent;
474 int retval;
475 unsigned long charge;
476 LIST_HEAD(uf);
477
478 uprobe_start_dup_mmap();
Olivier Deprez157378f2022-04-04 15:47:50 +0200479 if (mmap_write_lock_killable(oldmm)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 retval = -EINTR;
481 goto fail_uprobe_end;
482 }
483 flush_cache_dup_mm(oldmm);
484 uprobe_dup_mmap(oldmm, mm);
485 /*
486 * Not linked in yet - no deadlock potential:
487 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200488 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489
490 /* No ordering required: file already has been exposed. */
491 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
492
493 mm->total_vm = oldmm->total_vm;
494 mm->data_vm = oldmm->data_vm;
495 mm->exec_vm = oldmm->exec_vm;
496 mm->stack_vm = oldmm->stack_vm;
497
498 rb_link = &mm->mm_rb.rb_node;
499 rb_parent = NULL;
500 pprev = &mm->mmap;
501 retval = ksm_fork(mm, oldmm);
502 if (retval)
503 goto out;
504 retval = khugepaged_fork(mm, oldmm);
505 if (retval)
506 goto out;
507
508 prev = NULL;
509 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
510 struct file *file;
511
512 if (mpnt->vm_flags & VM_DONTCOPY) {
513 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
514 continue;
515 }
516 charge = 0;
517 /*
518 * Don't duplicate many vmas if we've been oom-killed (for
519 * example)
520 */
521 if (fatal_signal_pending(current)) {
522 retval = -EINTR;
523 goto out;
524 }
525 if (mpnt->vm_flags & VM_ACCOUNT) {
526 unsigned long len = vma_pages(mpnt);
527
528 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
529 goto fail_nomem;
530 charge = len;
531 }
532 tmp = vm_area_dup(mpnt);
533 if (!tmp)
534 goto fail_nomem;
535 retval = vma_dup_policy(mpnt, tmp);
536 if (retval)
537 goto fail_nomem_policy;
538 tmp->vm_mm = mm;
539 retval = dup_userfaultfd(tmp, &uf);
540 if (retval)
541 goto fail_nomem_anon_vma_fork;
542 if (tmp->vm_flags & VM_WIPEONFORK) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200543 /*
544 * VM_WIPEONFORK gets a clean slate in the child.
545 * Don't prepare anon_vma until fault since we don't
546 * copy page for current vma.
547 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 tmp->anon_vma = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 } else if (anon_vma_fork(tmp, mpnt))
550 goto fail_nomem_anon_vma_fork;
551 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 file = tmp->vm_file;
553 if (file) {
554 struct inode *inode = file_inode(file);
555 struct address_space *mapping = file->f_mapping;
556
557 get_file(file);
558 if (tmp->vm_flags & VM_DENYWRITE)
Olivier Deprez157378f2022-04-04 15:47:50 +0200559 put_write_access(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 i_mmap_lock_write(mapping);
561 if (tmp->vm_flags & VM_SHARED)
Olivier Deprez157378f2022-04-04 15:47:50 +0200562 mapping_allow_writable(mapping);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 flush_dcache_mmap_lock(mapping);
564 /* insert tmp into the share list, just after mpnt */
565 vma_interval_tree_insert_after(tmp, mpnt,
566 &mapping->i_mmap);
567 flush_dcache_mmap_unlock(mapping);
568 i_mmap_unlock_write(mapping);
569 }
570
571 /*
572 * Clear hugetlb-related page reserves for children. This only
573 * affects MAP_PRIVATE mappings. Faults generated by the child
574 * are not guaranteed to succeed, even if read-only
575 */
576 if (is_vm_hugetlb_page(tmp))
577 reset_vma_resv_huge_pages(tmp);
578
579 /*
580 * Link in the new vma and copy the page table entries.
581 */
582 *pprev = tmp;
583 pprev = &tmp->vm_next;
584 tmp->vm_prev = prev;
585 prev = tmp;
586
587 __vma_link_rb(mm, tmp, rb_link, rb_parent);
588 rb_link = &tmp->vm_rb.rb_right;
589 rb_parent = &tmp->vm_rb;
590
591 mm->map_count++;
592 if (!(tmp->vm_flags & VM_WIPEONFORK))
Olivier Deprez157378f2022-04-04 15:47:50 +0200593 retval = copy_page_range(tmp, mpnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594
595 if (tmp->vm_ops && tmp->vm_ops->open)
596 tmp->vm_ops->open(tmp);
597
598 if (retval)
599 goto out;
600 }
601 /* a new mm has just been created */
602 retval = arch_dup_mmap(oldmm, mm);
603out:
Olivier Deprez157378f2022-04-04 15:47:50 +0200604 mmap_write_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000605 flush_tlb_mm(oldmm);
Olivier Deprez157378f2022-04-04 15:47:50 +0200606 mmap_write_unlock(oldmm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607 dup_userfaultfd_complete(&uf);
608fail_uprobe_end:
609 uprobe_end_dup_mmap();
610 return retval;
611fail_nomem_anon_vma_fork:
612 mpol_put(vma_policy(tmp));
613fail_nomem_policy:
614 vm_area_free(tmp);
615fail_nomem:
616 retval = -ENOMEM;
617 vm_unacct_memory(charge);
618 goto out;
619}
620
621static inline int mm_alloc_pgd(struct mm_struct *mm)
622{
623 mm->pgd = pgd_alloc(mm);
624 if (unlikely(!mm->pgd))
625 return -ENOMEM;
626 return 0;
627}
628
629static inline void mm_free_pgd(struct mm_struct *mm)
630{
631 pgd_free(mm, mm->pgd);
632}
633#else
634static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
635{
Olivier Deprez157378f2022-04-04 15:47:50 +0200636 mmap_write_lock(oldmm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000637 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
Olivier Deprez157378f2022-04-04 15:47:50 +0200638 mmap_write_unlock(oldmm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639 return 0;
640}
641#define mm_alloc_pgd(mm) (0)
642#define mm_free_pgd(mm)
643#endif /* CONFIG_MMU */
644
645static void check_mm(struct mm_struct *mm)
646{
647 int i;
648
David Brazdil0f672f62019-12-10 10:32:29 +0000649 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
650 "Please make sure 'struct resident_page_types[]' is updated as well");
651
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652 for (i = 0; i < NR_MM_COUNTERS; i++) {
653 long x = atomic_long_read(&mm->rss_stat.count[i]);
654
655 if (unlikely(x))
David Brazdil0f672f62019-12-10 10:32:29 +0000656 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
657 mm, resident_page_types[i], x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658 }
659
660 if (mm_pgtables_bytes(mm))
661 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
662 mm_pgtables_bytes(mm));
663
664#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
665 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
666#endif
667}
668
669#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
670#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
671
672/*
673 * Called when the last reference to the mm
674 * is dropped: either by a lazy thread or by
675 * mmput. Free the page directory and the mm.
676 */
677void __mmdrop(struct mm_struct *mm)
678{
679 BUG_ON(mm == &init_mm);
680 WARN_ON_ONCE(mm == current->mm);
681 WARN_ON_ONCE(mm == current->active_mm);
682 mm_free_pgd(mm);
683 destroy_context(mm);
Olivier Deprez157378f2022-04-04 15:47:50 +0200684 mmu_notifier_subscriptions_destroy(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 check_mm(mm);
686 put_user_ns(mm->user_ns);
687 free_mm(mm);
688}
689EXPORT_SYMBOL_GPL(__mmdrop);
690
691static void mmdrop_async_fn(struct work_struct *work)
692{
693 struct mm_struct *mm;
694
695 mm = container_of(work, struct mm_struct, async_put_work);
696 __mmdrop(mm);
697}
698
699static void mmdrop_async(struct mm_struct *mm)
700{
701 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
702 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
703 schedule_work(&mm->async_put_work);
704 }
705}
706
707static inline void free_signal_struct(struct signal_struct *sig)
708{
709 taskstats_tgid_free(sig);
710 sched_autogroup_exit(sig);
711 /*
712 * __mmdrop is not safe to call from softirq context on x86 due to
713 * pgd_dtor so postpone it to the async context
714 */
715 if (sig->oom_mm)
716 mmdrop_async(sig->oom_mm);
717 kmem_cache_free(signal_cachep, sig);
718}
719
720static inline void put_signal_struct(struct signal_struct *sig)
721{
David Brazdil0f672f62019-12-10 10:32:29 +0000722 if (refcount_dec_and_test(&sig->sigcnt))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 free_signal_struct(sig);
724}
725
726void __put_task_struct(struct task_struct *tsk)
727{
728 WARN_ON(!tsk->exit_state);
David Brazdil0f672f62019-12-10 10:32:29 +0000729 WARN_ON(refcount_read(&tsk->usage));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 WARN_ON(tsk == current);
731
Olivier Deprez157378f2022-04-04 15:47:50 +0200732 io_uring_free(tsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733 cgroup_free(tsk);
David Brazdil0f672f62019-12-10 10:32:29 +0000734 task_numa_free(tsk, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735 security_task_free(tsk);
736 exit_creds(tsk);
737 delayacct_tsk_free(tsk);
738 put_signal_struct(tsk->signal);
739
740 if (!profile_handoff_task(tsk))
741 free_task(tsk);
742}
743EXPORT_SYMBOL_GPL(__put_task_struct);
744
745void __init __weak arch_task_cache_init(void) { }
746
747/*
748 * set_max_threads
749 */
750static void set_max_threads(unsigned int max_threads_suggested)
751{
752 u64 threads;
David Brazdil0f672f62019-12-10 10:32:29 +0000753 unsigned long nr_pages = totalram_pages();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754
755 /*
756 * The number of threads shall be limited such that the thread
757 * structures may only consume a small part of the available memory.
758 */
David Brazdil0f672f62019-12-10 10:32:29 +0000759 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000760 threads = MAX_THREADS;
761 else
David Brazdil0f672f62019-12-10 10:32:29 +0000762 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000763 (u64) THREAD_SIZE * 8UL);
764
765 if (threads > max_threads_suggested)
766 threads = max_threads_suggested;
767
768 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
769}
770
771#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
772/* Initialized by the architecture: */
773int arch_task_struct_size __read_mostly;
774#endif
775
David Brazdil0f672f62019-12-10 10:32:29 +0000776#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000777static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
778{
779 /* Fetch thread_struct whitelist for the architecture. */
780 arch_thread_struct_whitelist(offset, size);
781
782 /*
783 * Handle zero-sized whitelist or empty thread_struct, otherwise
784 * adjust offset to position of thread_struct in task_struct.
785 */
786 if (unlikely(*size == 0))
787 *offset = 0;
788 else
789 *offset += offsetof(struct task_struct, thread);
790}
David Brazdil0f672f62019-12-10 10:32:29 +0000791#endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000792
793void __init fork_init(void)
794{
795 int i;
796#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
797#ifndef ARCH_MIN_TASKALIGN
798#define ARCH_MIN_TASKALIGN 0
799#endif
800 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
801 unsigned long useroffset, usersize;
802
803 /* create a slab on which task_structs can be allocated */
804 task_struct_whitelist(&useroffset, &usersize);
805 task_struct_cachep = kmem_cache_create_usercopy("task_struct",
806 arch_task_struct_size, align,
807 SLAB_PANIC|SLAB_ACCOUNT,
808 useroffset, usersize, NULL);
809#endif
810
811 /* do the arch specific task caches init */
812 arch_task_cache_init();
813
814 set_max_threads(MAX_THREADS);
815
816 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
817 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
818 init_task.signal->rlim[RLIMIT_SIGPENDING] =
819 init_task.signal->rlim[RLIMIT_NPROC];
820
821 for (i = 0; i < UCOUNT_COUNTS; i++) {
822 init_user_ns.ucount_max[i] = max_threads/2;
823 }
824
825#ifdef CONFIG_VMAP_STACK
826 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
827 NULL, free_vm_stack_cache);
828#endif
829
Olivier Deprez157378f2022-04-04 15:47:50 +0200830 scs_init();
831
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000832 lockdep_init_task(&init_task);
David Brazdil0f672f62019-12-10 10:32:29 +0000833 uprobes_init();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000834}
835
836int __weak arch_dup_task_struct(struct task_struct *dst,
837 struct task_struct *src)
838{
839 *dst = *src;
840 return 0;
841}
842
843void set_task_stack_end_magic(struct task_struct *tsk)
844{
845 unsigned long *stackend;
846
847 stackend = end_of_stack(tsk);
848 *stackend = STACK_END_MAGIC; /* for overflow detection */
849}
850
851static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
852{
853 struct task_struct *tsk;
854 unsigned long *stack;
David Brazdil0f672f62019-12-10 10:32:29 +0000855 struct vm_struct *stack_vm_area __maybe_unused;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000856 int err;
857
858 if (node == NUMA_NO_NODE)
859 node = tsk_fork_get_node(orig);
860 tsk = alloc_task_struct_node(node);
861 if (!tsk)
862 return NULL;
863
864 stack = alloc_thread_stack_node(tsk, node);
865 if (!stack)
866 goto free_tsk;
867
David Brazdil0f672f62019-12-10 10:32:29 +0000868 if (memcg_charge_kernel_stack(tsk))
869 goto free_stack;
870
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000871 stack_vm_area = task_stack_vm_area(tsk);
872
873 err = arch_dup_task_struct(tsk, orig);
874
875 /*
876 * arch_dup_task_struct() clobbers the stack-related fields. Make
877 * sure they're properly initialized before using any stack-related
878 * functions again.
879 */
880 tsk->stack = stack;
881#ifdef CONFIG_VMAP_STACK
882 tsk->stack_vm_area = stack_vm_area;
883#endif
884#ifdef CONFIG_THREAD_INFO_IN_TASK
David Brazdil0f672f62019-12-10 10:32:29 +0000885 refcount_set(&tsk->stack_refcount, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886#endif
887
888 if (err)
889 goto free_stack;
890
Olivier Deprez157378f2022-04-04 15:47:50 +0200891 err = scs_prepare(tsk, node);
892 if (err)
893 goto free_stack;
894
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895#ifdef CONFIG_SECCOMP
896 /*
897 * We must handle setting up seccomp filters once we're under
898 * the sighand lock in case orig has changed between now and
899 * then. Until then, filter must be NULL to avoid messing up
900 * the usage counts on the error path calling free_task.
901 */
902 tsk->seccomp.filter = NULL;
903#endif
904
905 setup_thread_stack(tsk, orig);
906 clear_user_return_notifier(tsk);
907 clear_tsk_need_resched(tsk);
908 set_task_stack_end_magic(tsk);
909
910#ifdef CONFIG_STACKPROTECTOR
911 tsk->stack_canary = get_random_canary();
912#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000913 if (orig->cpus_ptr == &orig->cpus_mask)
914 tsk->cpus_ptr = &tsk->cpus_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915
916 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000917 * One for the user space visible state that goes away when reaped.
918 * One for the scheduler.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000919 */
David Brazdil0f672f62019-12-10 10:32:29 +0000920 refcount_set(&tsk->rcu_users, 2);
921 /* One for the rcu users */
922 refcount_set(&tsk->usage, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000923#ifdef CONFIG_BLK_DEV_IO_TRACE
924 tsk->btrace_seq = 0;
925#endif
926 tsk->splice_pipe = NULL;
927 tsk->task_frag.page = NULL;
928 tsk->wake_q.next = NULL;
929
930 account_kernel_stack(tsk, 1);
931
932 kcov_task_init(tsk);
933
934#ifdef CONFIG_FAULT_INJECTION
935 tsk->fail_nth = 0;
936#endif
937
938#ifdef CONFIG_BLK_CGROUP
939 tsk->throttle_queue = NULL;
940 tsk->use_memdelay = 0;
941#endif
942
943#ifdef CONFIG_MEMCG
944 tsk->active_memcg = NULL;
945#endif
946 return tsk;
947
948free_stack:
949 free_thread_stack(tsk);
950free_tsk:
951 free_task_struct(tsk);
952 return NULL;
953}
954
955__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
956
957static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
958
959static int __init coredump_filter_setup(char *s)
960{
961 default_dump_filter =
962 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
963 MMF_DUMP_FILTER_MASK;
964 return 1;
965}
966
967__setup("coredump_filter=", coredump_filter_setup);
968
969#include <linux/init_task.h>
970
971static void mm_init_aio(struct mm_struct *mm)
972{
973#ifdef CONFIG_AIO
974 spin_lock_init(&mm->ioctx_lock);
975 mm->ioctx_table = NULL;
976#endif
977}
978
David Brazdil0f672f62019-12-10 10:32:29 +0000979static __always_inline void mm_clear_owner(struct mm_struct *mm,
980 struct task_struct *p)
981{
982#ifdef CONFIG_MEMCG
983 if (mm->owner == p)
984 WRITE_ONCE(mm->owner, NULL);
985#endif
986}
987
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000988static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
989{
990#ifdef CONFIG_MEMCG
991 mm->owner = p;
992#endif
993}
994
Olivier Deprez157378f2022-04-04 15:47:50 +0200995static void mm_init_pasid(struct mm_struct *mm)
996{
997#ifdef CONFIG_IOMMU_SUPPORT
998 mm->pasid = INIT_PASID;
999#endif
1000}
1001
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001002static void mm_init_uprobes_state(struct mm_struct *mm)
1003{
1004#ifdef CONFIG_UPROBES
1005 mm->uprobes_state.xol_area = NULL;
1006#endif
1007}
1008
1009static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1010 struct user_namespace *user_ns)
1011{
1012 mm->mmap = NULL;
1013 mm->mm_rb = RB_ROOT;
1014 mm->vmacache_seqnum = 0;
1015 atomic_set(&mm->mm_users, 1);
1016 atomic_set(&mm->mm_count, 1);
Olivier Deprez157378f2022-04-04 15:47:50 +02001017 seqcount_init(&mm->write_protect_seq);
1018 mmap_init_lock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001019 INIT_LIST_HEAD(&mm->mmlist);
1020 mm->core_state = NULL;
1021 mm_pgtables_bytes_init(mm);
1022 mm->map_count = 0;
1023 mm->locked_vm = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001024 atomic_set(&mm->has_pinned, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001025 atomic64_set(&mm->pinned_vm, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
1027 spin_lock_init(&mm->page_table_lock);
1028 spin_lock_init(&mm->arg_lock);
1029 mm_init_cpumask(mm);
1030 mm_init_aio(mm);
1031 mm_init_owner(mm, p);
Olivier Deprez157378f2022-04-04 15:47:50 +02001032 mm_init_pasid(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 RCU_INIT_POINTER(mm->exe_file, NULL);
Olivier Deprez157378f2022-04-04 15:47:50 +02001034 mmu_notifier_subscriptions_init(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 init_tlb_flush_pending(mm);
1036#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1037 mm->pmd_huge_pte = NULL;
1038#endif
1039 mm_init_uprobes_state(mm);
Olivier Deprez0e641232021-09-23 10:07:05 +02001040 hugetlb_count_init(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 if (current->mm) {
1043 mm->flags = current->mm->flags & MMF_INIT_MASK;
1044 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1045 } else {
1046 mm->flags = default_dump_filter;
1047 mm->def_flags = 0;
1048 }
1049
1050 if (mm_alloc_pgd(mm))
1051 goto fail_nopgd;
1052
1053 if (init_new_context(p, mm))
1054 goto fail_nocontext;
1055
1056 mm->user_ns = get_user_ns(user_ns);
1057 return mm;
1058
1059fail_nocontext:
1060 mm_free_pgd(mm);
1061fail_nopgd:
1062 free_mm(mm);
1063 return NULL;
1064}
1065
1066/*
1067 * Allocate and initialize an mm_struct.
1068 */
1069struct mm_struct *mm_alloc(void)
1070{
1071 struct mm_struct *mm;
1072
1073 mm = allocate_mm();
1074 if (!mm)
1075 return NULL;
1076
1077 memset(mm, 0, sizeof(*mm));
1078 return mm_init(mm, current, current_user_ns());
1079}
1080
1081static inline void __mmput(struct mm_struct *mm)
1082{
1083 VM_BUG_ON(atomic_read(&mm->mm_users));
1084
1085 uprobe_clear_state(mm);
1086 exit_aio(mm);
1087 ksm_exit(mm);
1088 khugepaged_exit(mm); /* must run before exit_mmap */
1089 exit_mmap(mm);
1090 mm_put_huge_zero_page(mm);
1091 set_mm_exe_file(mm, NULL);
1092 if (!list_empty(&mm->mmlist)) {
1093 spin_lock(&mmlist_lock);
1094 list_del(&mm->mmlist);
1095 spin_unlock(&mmlist_lock);
1096 }
1097 if (mm->binfmt)
1098 module_put(mm->binfmt->module);
1099 mmdrop(mm);
1100}
1101
1102/*
1103 * Decrement the use count and release all resources for an mm.
1104 */
1105void mmput(struct mm_struct *mm)
1106{
1107 might_sleep();
1108
1109 if (atomic_dec_and_test(&mm->mm_users))
1110 __mmput(mm);
1111}
1112EXPORT_SYMBOL_GPL(mmput);
1113
1114#ifdef CONFIG_MMU
1115static void mmput_async_fn(struct work_struct *work)
1116{
1117 struct mm_struct *mm = container_of(work, struct mm_struct,
1118 async_put_work);
1119
1120 __mmput(mm);
1121}
1122
1123void mmput_async(struct mm_struct *mm)
1124{
1125 if (atomic_dec_and_test(&mm->mm_users)) {
1126 INIT_WORK(&mm->async_put_work, mmput_async_fn);
1127 schedule_work(&mm->async_put_work);
1128 }
1129}
1130#endif
1131
1132/**
1133 * set_mm_exe_file - change a reference to the mm's executable file
1134 *
1135 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1136 *
1137 * Main users are mmput() and sys_execve(). Callers prevent concurrent
1138 * invocations: in mmput() nobody alive left, in execve task is single
1139 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
1140 * mm->exe_file, but does so without using set_mm_exe_file() in order
1141 * to do avoid the need for any locks.
1142 */
1143void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1144{
1145 struct file *old_exe_file;
1146
1147 /*
1148 * It is safe to dereference the exe_file without RCU as
1149 * this function is only called if nobody else can access
1150 * this mm -- see comment above for justification.
1151 */
1152 old_exe_file = rcu_dereference_raw(mm->exe_file);
1153
1154 if (new_exe_file)
1155 get_file(new_exe_file);
1156 rcu_assign_pointer(mm->exe_file, new_exe_file);
1157 if (old_exe_file)
1158 fput(old_exe_file);
1159}
1160
1161/**
1162 * get_mm_exe_file - acquire a reference to the mm's executable file
1163 *
1164 * Returns %NULL if mm has no associated executable file.
1165 * User must release file via fput().
1166 */
1167struct file *get_mm_exe_file(struct mm_struct *mm)
1168{
1169 struct file *exe_file;
1170
1171 rcu_read_lock();
1172 exe_file = rcu_dereference(mm->exe_file);
1173 if (exe_file && !get_file_rcu(exe_file))
1174 exe_file = NULL;
1175 rcu_read_unlock();
1176 return exe_file;
1177}
1178EXPORT_SYMBOL(get_mm_exe_file);
1179
1180/**
1181 * get_task_exe_file - acquire a reference to the task's executable file
1182 *
1183 * Returns %NULL if task's mm (if any) has no associated executable file or
1184 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1185 * User must release file via fput().
1186 */
1187struct file *get_task_exe_file(struct task_struct *task)
1188{
1189 struct file *exe_file = NULL;
1190 struct mm_struct *mm;
1191
1192 task_lock(task);
1193 mm = task->mm;
1194 if (mm) {
1195 if (!(task->flags & PF_KTHREAD))
1196 exe_file = get_mm_exe_file(mm);
1197 }
1198 task_unlock(task);
1199 return exe_file;
1200}
1201EXPORT_SYMBOL(get_task_exe_file);
1202
1203/**
1204 * get_task_mm - acquire a reference to the task's mm
1205 *
1206 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1207 * this kernel workthread has transiently adopted a user mm with use_mm,
1208 * to do its AIO) is not set and if so returns a reference to it, after
1209 * bumping up the use count. User must release the mm via mmput()
1210 * after use. Typically used by /proc and ptrace.
1211 */
1212struct mm_struct *get_task_mm(struct task_struct *task)
1213{
1214 struct mm_struct *mm;
1215
1216 task_lock(task);
1217 mm = task->mm;
1218 if (mm) {
1219 if (task->flags & PF_KTHREAD)
1220 mm = NULL;
1221 else
1222 mmget(mm);
1223 }
1224 task_unlock(task);
1225 return mm;
1226}
1227EXPORT_SYMBOL_GPL(get_task_mm);
1228
1229struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1230{
1231 struct mm_struct *mm;
1232 int err;
1233
Olivier Deprez0e641232021-09-23 10:07:05 +02001234 err = down_read_killable(&task->signal->exec_update_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001235 if (err)
1236 return ERR_PTR(err);
1237
1238 mm = get_task_mm(task);
1239 if (mm && mm != current->mm &&
1240 !ptrace_may_access(task, mode)) {
1241 mmput(mm);
1242 mm = ERR_PTR(-EACCES);
1243 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001244 up_read(&task->signal->exec_update_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245
1246 return mm;
1247}
1248
1249static void complete_vfork_done(struct task_struct *tsk)
1250{
1251 struct completion *vfork;
1252
1253 task_lock(tsk);
1254 vfork = tsk->vfork_done;
1255 if (likely(vfork)) {
1256 tsk->vfork_done = NULL;
1257 complete(vfork);
1258 }
1259 task_unlock(tsk);
1260}
1261
1262static int wait_for_vfork_done(struct task_struct *child,
1263 struct completion *vfork)
1264{
1265 int killed;
1266
1267 freezer_do_not_count();
David Brazdil0f672f62019-12-10 10:32:29 +00001268 cgroup_enter_frozen();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269 killed = wait_for_completion_killable(vfork);
David Brazdil0f672f62019-12-10 10:32:29 +00001270 cgroup_leave_frozen(false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271 freezer_count();
1272
1273 if (killed) {
1274 task_lock(child);
1275 child->vfork_done = NULL;
1276 task_unlock(child);
1277 }
1278
1279 put_task_struct(child);
1280 return killed;
1281}
1282
1283/* Please note the differences between mmput and mm_release.
1284 * mmput is called whenever we stop holding onto a mm_struct,
1285 * error success whatever.
1286 *
1287 * mm_release is called after a mm_struct has been removed
1288 * from the current process.
1289 *
1290 * This difference is important for error handling, when we
1291 * only half set up a mm_struct for a new process and need to restore
1292 * the old one. Because we mmput the new mm_struct before
1293 * restoring the old one. . .
1294 * Eric Biederman 10 January 1998
1295 */
David Brazdil0f672f62019-12-10 10:32:29 +00001296static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001297{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001298 uprobe_free_utask(tsk);
1299
1300 /* Get rid of any cached register state */
1301 deactivate_mm(tsk, mm);
1302
1303 /*
1304 * Signal userspace if we're not exiting with a core dump
1305 * because we want to leave the value intact for debugging
1306 * purposes.
1307 */
1308 if (tsk->clear_child_tid) {
1309 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1310 atomic_read(&mm->mm_users) > 1) {
1311 /*
1312 * We don't check the error code - if userspace has
1313 * not set up a proper pointer then tough luck.
1314 */
1315 put_user(0, tsk->clear_child_tid);
1316 do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1317 1, NULL, NULL, 0, 0);
1318 }
1319 tsk->clear_child_tid = NULL;
1320 }
1321
1322 /*
1323 * All done, finally we can wake up parent and return this mm to him.
1324 * Also kthread_stop() uses this completion for synchronization.
1325 */
1326 if (tsk->vfork_done)
1327 complete_vfork_done(tsk);
1328}
1329
David Brazdil0f672f62019-12-10 10:32:29 +00001330void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331{
David Brazdil0f672f62019-12-10 10:32:29 +00001332 futex_exit_release(tsk);
1333 mm_release(tsk, mm);
1334}
1335
1336void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1337{
1338 futex_exec_release(tsk);
1339 mm_release(tsk, mm);
1340}
1341
1342/**
1343 * dup_mm() - duplicates an existing mm structure
1344 * @tsk: the task_struct with which the new mm will be associated.
1345 * @oldmm: the mm to duplicate.
1346 *
1347 * Allocates a new mm structure and duplicates the provided @oldmm structure
1348 * content into it.
1349 *
1350 * Return: the duplicated mm or NULL on failure.
1351 */
1352static struct mm_struct *dup_mm(struct task_struct *tsk,
1353 struct mm_struct *oldmm)
1354{
1355 struct mm_struct *mm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001356 int err;
1357
1358 mm = allocate_mm();
1359 if (!mm)
1360 goto fail_nomem;
1361
1362 memcpy(mm, oldmm, sizeof(*mm));
1363
1364 if (!mm_init(mm, tsk, mm->user_ns))
1365 goto fail_nomem;
1366
1367 err = dup_mmap(mm, oldmm);
1368 if (err)
1369 goto free_pt;
1370
1371 mm->hiwater_rss = get_mm_rss(mm);
1372 mm->hiwater_vm = mm->total_vm;
1373
1374 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1375 goto free_pt;
1376
1377 return mm;
1378
1379free_pt:
1380 /* don't put binfmt in mmput, we haven't got module yet */
1381 mm->binfmt = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001382 mm_init_owner(mm, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001383 mmput(mm);
1384
1385fail_nomem:
1386 return NULL;
1387}
1388
1389static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1390{
1391 struct mm_struct *mm, *oldmm;
1392 int retval;
1393
1394 tsk->min_flt = tsk->maj_flt = 0;
1395 tsk->nvcsw = tsk->nivcsw = 0;
1396#ifdef CONFIG_DETECT_HUNG_TASK
1397 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1398 tsk->last_switch_time = 0;
1399#endif
1400
1401 tsk->mm = NULL;
1402 tsk->active_mm = NULL;
1403
1404 /*
1405 * Are we cloning a kernel thread?
1406 *
1407 * We need to steal a active VM for that..
1408 */
1409 oldmm = current->mm;
1410 if (!oldmm)
1411 return 0;
1412
1413 /* initialize the new vmacache entries */
1414 vmacache_flush(tsk);
1415
1416 if (clone_flags & CLONE_VM) {
1417 mmget(oldmm);
1418 mm = oldmm;
1419 goto good_mm;
1420 }
1421
1422 retval = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00001423 mm = dup_mm(tsk, current->mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001424 if (!mm)
1425 goto fail_nomem;
1426
1427good_mm:
1428 tsk->mm = mm;
1429 tsk->active_mm = mm;
1430 return 0;
1431
1432fail_nomem:
1433 return retval;
1434}
1435
1436static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1437{
1438 struct fs_struct *fs = current->fs;
1439 if (clone_flags & CLONE_FS) {
1440 /* tsk->fs is already what we want */
1441 spin_lock(&fs->lock);
1442 if (fs->in_exec) {
1443 spin_unlock(&fs->lock);
1444 return -EAGAIN;
1445 }
1446 fs->users++;
1447 spin_unlock(&fs->lock);
1448 return 0;
1449 }
1450 tsk->fs = copy_fs_struct(fs);
1451 if (!tsk->fs)
1452 return -ENOMEM;
1453 return 0;
1454}
1455
1456static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1457{
1458 struct files_struct *oldf, *newf;
1459 int error = 0;
1460
1461 /*
1462 * A background process may not have any files ...
1463 */
1464 oldf = current->files;
1465 if (!oldf)
1466 goto out;
1467
1468 if (clone_flags & CLONE_FILES) {
1469 atomic_inc(&oldf->count);
1470 goto out;
1471 }
1472
Olivier Deprez157378f2022-04-04 15:47:50 +02001473 newf = dup_fd(oldf, NR_OPEN_MAX, &error);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001474 if (!newf)
1475 goto out;
1476
1477 tsk->files = newf;
1478 error = 0;
1479out:
1480 return error;
1481}
1482
1483static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1484{
1485#ifdef CONFIG_BLOCK
1486 struct io_context *ioc = current->io_context;
1487 struct io_context *new_ioc;
1488
1489 if (!ioc)
1490 return 0;
1491 /*
1492 * Share io context with parent, if CLONE_IO is set
1493 */
1494 if (clone_flags & CLONE_IO) {
1495 ioc_task_link(ioc);
1496 tsk->io_context = ioc;
1497 } else if (ioprio_valid(ioc->ioprio)) {
1498 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1499 if (unlikely(!new_ioc))
1500 return -ENOMEM;
1501
1502 new_ioc->ioprio = ioc->ioprio;
1503 put_io_context(new_ioc);
1504 }
1505#endif
1506 return 0;
1507}
1508
1509static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1510{
1511 struct sighand_struct *sig;
1512
1513 if (clone_flags & CLONE_SIGHAND) {
David Brazdil0f672f62019-12-10 10:32:29 +00001514 refcount_inc(&current->sighand->count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001515 return 0;
1516 }
1517 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
Olivier Deprez157378f2022-04-04 15:47:50 +02001518 RCU_INIT_POINTER(tsk->sighand, sig);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519 if (!sig)
1520 return -ENOMEM;
1521
David Brazdil0f672f62019-12-10 10:32:29 +00001522 refcount_set(&sig->count, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001523 spin_lock_irq(&current->sighand->siglock);
1524 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1525 spin_unlock_irq(&current->sighand->siglock);
Olivier Deprez157378f2022-04-04 15:47:50 +02001526
1527 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1528 if (clone_flags & CLONE_CLEAR_SIGHAND)
1529 flush_signal_handlers(tsk, 0);
1530
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001531 return 0;
1532}
1533
1534void __cleanup_sighand(struct sighand_struct *sighand)
1535{
David Brazdil0f672f62019-12-10 10:32:29 +00001536 if (refcount_dec_and_test(&sighand->count)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001537 signalfd_cleanup(sighand);
1538 /*
1539 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1540 * without an RCU grace period, see __lock_task_sighand().
1541 */
1542 kmem_cache_free(sighand_cachep, sighand);
1543 }
1544}
1545
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546/*
1547 * Initialize POSIX timer handling for a thread group.
1548 */
1549static void posix_cpu_timers_init_group(struct signal_struct *sig)
1550{
David Brazdil0f672f62019-12-10 10:32:29 +00001551 struct posix_cputimers *pct = &sig->posix_cputimers;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001552 unsigned long cpu_limit;
1553
1554 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
David Brazdil0f672f62019-12-10 10:32:29 +00001555 posix_cputimers_group_init(pct, cpu_limit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001556}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001557
1558static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1559{
1560 struct signal_struct *sig;
1561
1562 if (clone_flags & CLONE_THREAD)
1563 return 0;
1564
1565 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1566 tsk->signal = sig;
1567 if (!sig)
1568 return -ENOMEM;
1569
1570 sig->nr_threads = 1;
1571 atomic_set(&sig->live, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00001572 refcount_set(&sig->sigcnt, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001573
1574 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1575 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1576 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1577
1578 init_waitqueue_head(&sig->wait_chldexit);
1579 sig->curr_target = tsk;
1580 init_sigpending(&sig->shared_pending);
1581 INIT_HLIST_HEAD(&sig->multiprocess);
1582 seqlock_init(&sig->stats_lock);
1583 prev_cputime_init(&sig->prev_cputime);
1584
1585#ifdef CONFIG_POSIX_TIMERS
1586 INIT_LIST_HEAD(&sig->posix_timers);
1587 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1588 sig->real_timer.function = it_real_fn;
1589#endif
1590
1591 task_lock(current->group_leader);
1592 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1593 task_unlock(current->group_leader);
1594
1595 posix_cpu_timers_init_group(sig);
1596
1597 tty_audit_fork(sig);
1598 sched_autogroup_fork(sig);
1599
1600 sig->oom_score_adj = current->signal->oom_score_adj;
1601 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1602
1603 mutex_init(&sig->cred_guard_mutex);
Olivier Deprez0e641232021-09-23 10:07:05 +02001604 init_rwsem(&sig->exec_update_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001605
1606 return 0;
1607}
1608
1609static void copy_seccomp(struct task_struct *p)
1610{
1611#ifdef CONFIG_SECCOMP
1612 /*
1613 * Must be called with sighand->lock held, which is common to
1614 * all threads in the group. Holding cred_guard_mutex is not
1615 * needed because this new task is not yet running and cannot
1616 * be racing exec.
1617 */
1618 assert_spin_locked(&current->sighand->siglock);
1619
1620 /* Ref-count the new filter user, and assign it. */
1621 get_seccomp_filter(current);
1622 p->seccomp = current->seccomp;
1623
1624 /*
1625 * Explicitly enable no_new_privs here in case it got set
1626 * between the task_struct being duplicated and holding the
1627 * sighand lock. The seccomp state and nnp must be in sync.
1628 */
1629 if (task_no_new_privs(current))
1630 task_set_no_new_privs(p);
1631
1632 /*
1633 * If the parent gained a seccomp mode after copying thread
1634 * flags and between before we held the sighand lock, we have
1635 * to manually enable the seccomp thread flag here.
1636 */
1637 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1638 set_tsk_thread_flag(p, TIF_SECCOMP);
1639#endif
1640}
1641
1642SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1643{
1644 current->clear_child_tid = tidptr;
1645
1646 return task_pid_vnr(current);
1647}
1648
1649static void rt_mutex_init_task(struct task_struct *p)
1650{
1651 raw_spin_lock_init(&p->pi_lock);
1652#ifdef CONFIG_RT_MUTEXES
1653 p->pi_waiters = RB_ROOT_CACHED;
1654 p->pi_top_task = NULL;
1655 p->pi_blocked_on = NULL;
1656#endif
1657}
1658
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001659static inline void init_task_pid_links(struct task_struct *task)
1660{
1661 enum pid_type type;
1662
1663 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1664 INIT_HLIST_NODE(&task->pid_links[type]);
1665 }
1666}
1667
1668static inline void
1669init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1670{
1671 if (type == PIDTYPE_PID)
1672 task->thread_pid = pid;
1673 else
1674 task->signal->pids[type] = pid;
1675}
1676
1677static inline void rcu_copy_process(struct task_struct *p)
1678{
1679#ifdef CONFIG_PREEMPT_RCU
1680 p->rcu_read_lock_nesting = 0;
1681 p->rcu_read_unlock_special.s = 0;
1682 p->rcu_blocked_node = NULL;
1683 INIT_LIST_HEAD(&p->rcu_node_entry);
1684#endif /* #ifdef CONFIG_PREEMPT_RCU */
1685#ifdef CONFIG_TASKS_RCU
1686 p->rcu_tasks_holdout = false;
1687 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1688 p->rcu_tasks_idle_cpu = -1;
1689#endif /* #ifdef CONFIG_TASKS_RCU */
Olivier Deprez157378f2022-04-04 15:47:50 +02001690#ifdef CONFIG_TASKS_TRACE_RCU
1691 p->trc_reader_nesting = 0;
1692 p->trc_reader_special.s = 0;
1693 INIT_LIST_HEAD(&p->trc_holdout_list);
1694#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001695}
1696
David Brazdil0f672f62019-12-10 10:32:29 +00001697struct pid *pidfd_pid(const struct file *file)
1698{
1699 if (file->f_op == &pidfd_fops)
1700 return file->private_data;
1701
1702 return ERR_PTR(-EBADF);
1703}
1704
1705static int pidfd_release(struct inode *inode, struct file *file)
1706{
1707 struct pid *pid = file->private_data;
1708
1709 file->private_data = NULL;
1710 put_pid(pid);
1711 return 0;
1712}
1713
1714#ifdef CONFIG_PROC_FS
Olivier Deprez157378f2022-04-04 15:47:50 +02001715/**
1716 * pidfd_show_fdinfo - print information about a pidfd
1717 * @m: proc fdinfo file
1718 * @f: file referencing a pidfd
1719 *
1720 * Pid:
1721 * This function will print the pid that a given pidfd refers to in the
1722 * pid namespace of the procfs instance.
1723 * If the pid namespace of the process is not a descendant of the pid
1724 * namespace of the procfs instance 0 will be shown as its pid. This is
1725 * similar to calling getppid() on a process whose parent is outside of
1726 * its pid namespace.
1727 *
1728 * NSpid:
1729 * If pid namespaces are supported then this function will also print
1730 * the pid of a given pidfd refers to for all descendant pid namespaces
1731 * starting from the current pid namespace of the instance, i.e. the
1732 * Pid field and the first entry in the NSpid field will be identical.
1733 * If the pid namespace of the process is not a descendant of the pid
1734 * namespace of the procfs instance 0 will be shown as its first NSpid
1735 * entry and no others will be shown.
1736 * Note that this differs from the Pid and NSpid fields in
1737 * /proc/<pid>/status where Pid and NSpid are always shown relative to
1738 * the pid namespace of the procfs instance. The difference becomes
1739 * obvious when sending around a pidfd between pid namespaces from a
1740 * different branch of the tree, i.e. where no ancestoral relation is
1741 * present between the pid namespaces:
1742 * - create two new pid namespaces ns1 and ns2 in the initial pid
1743 * namespace (also take care to create new mount namespaces in the
1744 * new pid namespace and mount procfs)
1745 * - create a process with a pidfd in ns1
1746 * - send pidfd from ns1 to ns2
1747 * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
1748 * have exactly one entry, which is 0
1749 */
David Brazdil0f672f62019-12-10 10:32:29 +00001750static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
1751{
David Brazdil0f672f62019-12-10 10:32:29 +00001752 struct pid *pid = f->private_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02001753 struct pid_namespace *ns;
1754 pid_t nr = -1;
David Brazdil0f672f62019-12-10 10:32:29 +00001755
Olivier Deprez157378f2022-04-04 15:47:50 +02001756 if (likely(pid_has_task(pid, PIDTYPE_PID))) {
1757 ns = proc_pid_ns(file_inode(m->file)->i_sb);
1758 nr = pid_nr_ns(pid, ns);
1759 }
1760
1761 seq_put_decimal_ll(m, "Pid:\t", nr);
1762
1763#ifdef CONFIG_PID_NS
1764 seq_put_decimal_ll(m, "\nNSpid:\t", nr);
1765 if (nr > 0) {
1766 int i;
1767
1768 /* If nr is non-zero it means that 'pid' is valid and that
1769 * ns, i.e. the pid namespace associated with the procfs
1770 * instance, is in the pid namespace hierarchy of pid.
1771 * Start at one below the already printed level.
1772 */
1773 for (i = ns->level + 1; i <= pid->level; i++)
1774 seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
1775 }
1776#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001777 seq_putc(m, '\n');
1778}
1779#endif
1780
1781/*
1782 * Poll support for process exit notification.
1783 */
1784static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
1785{
David Brazdil0f672f62019-12-10 10:32:29 +00001786 struct pid *pid = file->private_data;
1787 __poll_t poll_flags = 0;
1788
1789 poll_wait(file, &pid->wait_pidfd, pts);
1790
David Brazdil0f672f62019-12-10 10:32:29 +00001791 /*
1792 * Inform pollers only when the whole thread group exits.
1793 * If the thread group leader exits before all other threads in the
1794 * group, then poll(2) should block, similar to the wait(2) family.
1795 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001796 if (thread_group_exited(pid))
David Brazdil0f672f62019-12-10 10:32:29 +00001797 poll_flags = EPOLLIN | EPOLLRDNORM;
David Brazdil0f672f62019-12-10 10:32:29 +00001798
1799 return poll_flags;
1800}
1801
1802const struct file_operations pidfd_fops = {
1803 .release = pidfd_release,
1804 .poll = pidfd_poll,
1805#ifdef CONFIG_PROC_FS
1806 .show_fdinfo = pidfd_show_fdinfo,
1807#endif
1808};
1809
1810static void __delayed_free_task(struct rcu_head *rhp)
1811{
1812 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
1813
1814 free_task(tsk);
1815}
1816
1817static __always_inline void delayed_free_task(struct task_struct *tsk)
1818{
1819 if (IS_ENABLED(CONFIG_MEMCG))
1820 call_rcu(&tsk->rcu, __delayed_free_task);
1821 else
1822 free_task(tsk);
1823}
1824
Olivier Deprez0e641232021-09-23 10:07:05 +02001825static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
1826{
1827 /* Skip if kernel thread */
1828 if (!tsk->mm)
1829 return;
1830
1831 /* Skip if spawning a thread or using vfork */
1832 if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
1833 return;
1834
1835 /* We need to synchronize with __set_oom_adj */
1836 mutex_lock(&oom_adj_mutex);
1837 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
1838 /* Update the values in case they were changed after copy_signal */
1839 tsk->signal->oom_score_adj = current->signal->oom_score_adj;
1840 tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
1841 mutex_unlock(&oom_adj_mutex);
1842}
1843
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001844/*
1845 * This creates a new process as a copy of the old one,
1846 * but does not actually start it yet.
1847 *
1848 * It copies the registers, and all the appropriate
1849 * parts of the process environment (as per the clone
1850 * flags). The actual kick-off is left to the caller.
1851 */
1852static __latent_entropy struct task_struct *copy_process(
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001853 struct pid *pid,
1854 int trace,
David Brazdil0f672f62019-12-10 10:32:29 +00001855 int node,
1856 struct kernel_clone_args *args)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001857{
David Brazdil0f672f62019-12-10 10:32:29 +00001858 int pidfd = -1, retval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001859 struct task_struct *p;
1860 struct multiprocess_signals delayed;
David Brazdil0f672f62019-12-10 10:32:29 +00001861 struct file *pidfile = NULL;
1862 u64 clone_flags = args->flags;
Olivier Deprez157378f2022-04-04 15:47:50 +02001863 struct nsproxy *nsp = current->nsproxy;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001864
1865 /*
1866 * Don't allow sharing the root directory with processes in a different
1867 * namespace
1868 */
1869 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1870 return ERR_PTR(-EINVAL);
1871
1872 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1873 return ERR_PTR(-EINVAL);
1874
1875 /*
1876 * Thread groups must share signals as well, and detached threads
1877 * can only be started up within the thread group.
1878 */
1879 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1880 return ERR_PTR(-EINVAL);
1881
1882 /*
1883 * Shared signal handlers imply shared VM. By way of the above,
1884 * thread groups also imply shared VM. Blocking this case allows
1885 * for various simplifications in other code.
1886 */
1887 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1888 return ERR_PTR(-EINVAL);
1889
1890 /*
1891 * Siblings of global init remain as zombies on exit since they are
1892 * not reaped by their parent (swapper). To solve this and to avoid
1893 * multi-rooted process trees, prevent global and container-inits
1894 * from creating siblings.
1895 */
1896 if ((clone_flags & CLONE_PARENT) &&
1897 current->signal->flags & SIGNAL_UNKILLABLE)
1898 return ERR_PTR(-EINVAL);
1899
1900 /*
1901 * If the new process will be in a different pid or user namespace
1902 * do not allow it to share a thread group with the forking task.
1903 */
1904 if (clone_flags & CLONE_THREAD) {
1905 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
Olivier Deprez157378f2022-04-04 15:47:50 +02001906 (task_active_pid_ns(current) != nsp->pid_ns_for_children))
1907 return ERR_PTR(-EINVAL);
1908 }
1909
1910 /*
1911 * If the new process will be in a different time namespace
1912 * do not allow it to share VM or a thread group with the forking task.
1913 */
1914 if (clone_flags & (CLONE_THREAD | CLONE_VM)) {
1915 if (nsp->time_ns != nsp->time_ns_for_children)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001916 return ERR_PTR(-EINVAL);
1917 }
1918
David Brazdil0f672f62019-12-10 10:32:29 +00001919 if (clone_flags & CLONE_PIDFD) {
1920 /*
1921 * - CLONE_DETACHED is blocked so that we can potentially
1922 * reuse it later for CLONE_PIDFD.
1923 * - CLONE_THREAD is blocked until someone really needs it.
1924 */
1925 if (clone_flags & (CLONE_DETACHED | CLONE_THREAD))
1926 return ERR_PTR(-EINVAL);
1927 }
1928
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001929 /*
1930 * Force any signals received before this point to be delivered
1931 * before the fork happens. Collect up signals sent to multiple
1932 * processes that happen during the fork and delay them so that
1933 * they appear to happen after the fork.
1934 */
1935 sigemptyset(&delayed.signal);
1936 INIT_HLIST_NODE(&delayed.node);
1937
1938 spin_lock_irq(&current->sighand->siglock);
1939 if (!(clone_flags & CLONE_THREAD))
1940 hlist_add_head(&delayed.node, &current->signal->multiprocess);
1941 recalc_sigpending();
1942 spin_unlock_irq(&current->sighand->siglock);
1943 retval = -ERESTARTNOINTR;
1944 if (signal_pending(current))
1945 goto fork_out;
1946
1947 retval = -ENOMEM;
1948 p = dup_task_struct(current, node);
1949 if (!p)
1950 goto fork_out;
1951
1952 /*
1953 * This _must_ happen before we call free_task(), i.e. before we jump
1954 * to any of the bad_fork_* labels. This is to avoid freeing
1955 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1956 * kernel threads (PF_KTHREAD).
1957 */
David Brazdil0f672f62019-12-10 10:32:29 +00001958 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001959 /*
1960 * Clear TID on mm_release()?
1961 */
David Brazdil0f672f62019-12-10 10:32:29 +00001962 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001963
1964 ftrace_graph_init_task(p);
1965
1966 rt_mutex_init_task(p);
1967
Olivier Deprez157378f2022-04-04 15:47:50 +02001968 lockdep_assert_irqs_enabled();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001969#ifdef CONFIG_PROVE_LOCKING
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001970 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1971#endif
1972 retval = -EAGAIN;
1973 if (atomic_read(&p->real_cred->user->processes) >=
1974 task_rlimit(p, RLIMIT_NPROC)) {
1975 if (p->real_cred->user != INIT_USER &&
1976 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1977 goto bad_fork_free;
1978 }
1979 current->flags &= ~PF_NPROC_EXCEEDED;
1980
1981 retval = copy_creds(p, clone_flags);
1982 if (retval < 0)
1983 goto bad_fork_free;
1984
1985 /*
1986 * If multiple threads are within copy_process(), then this check
1987 * triggers too late. This doesn't hurt, the check is only there
1988 * to stop root fork bombs.
1989 */
1990 retval = -EAGAIN;
Olivier Deprez157378f2022-04-04 15:47:50 +02001991 if (data_race(nr_threads >= max_threads))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001992 goto bad_fork_cleanup_count;
1993
1994 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1995 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1996 p->flags |= PF_FORKNOEXEC;
1997 INIT_LIST_HEAD(&p->children);
1998 INIT_LIST_HEAD(&p->sibling);
1999 rcu_copy_process(p);
2000 p->vfork_done = NULL;
2001 spin_lock_init(&p->alloc_lock);
2002
2003 init_sigpending(&p->pending);
2004
2005 p->utime = p->stime = p->gtime = 0;
2006#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2007 p->utimescaled = p->stimescaled = 0;
2008#endif
2009 prev_cputime_init(&p->prev_cputime);
2010
2011#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2012 seqcount_init(&p->vtime.seqcount);
2013 p->vtime.starttime = 0;
2014 p->vtime.state = VTIME_INACTIVE;
2015#endif
2016
Olivier Deprez157378f2022-04-04 15:47:50 +02002017#ifdef CONFIG_IO_URING
2018 p->io_uring = NULL;
2019#endif
2020
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002021#if defined(SPLIT_RSS_COUNTING)
2022 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
2023#endif
2024
2025 p->default_timer_slack_ns = current->timer_slack_ns;
2026
David Brazdil0f672f62019-12-10 10:32:29 +00002027#ifdef CONFIG_PSI
2028 p->psi_flags = 0;
2029#endif
2030
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002031 task_io_accounting_init(&p->ioac);
2032 acct_clear_integrals(p);
2033
David Brazdil0f672f62019-12-10 10:32:29 +00002034 posix_cputimers_init(&p->posix_cputimers);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002035
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036 p->io_context = NULL;
2037 audit_set_context(p, NULL);
2038 cgroup_fork(p);
2039#ifdef CONFIG_NUMA
2040 p->mempolicy = mpol_dup(p->mempolicy);
2041 if (IS_ERR(p->mempolicy)) {
2042 retval = PTR_ERR(p->mempolicy);
2043 p->mempolicy = NULL;
2044 goto bad_fork_cleanup_threadgroup_lock;
2045 }
2046#endif
2047#ifdef CONFIG_CPUSETS
2048 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2049 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
Olivier Deprez157378f2022-04-04 15:47:50 +02002050 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002051#endif
2052#ifdef CONFIG_TRACE_IRQFLAGS
Olivier Deprez157378f2022-04-04 15:47:50 +02002053 memset(&p->irqtrace, 0, sizeof(p->irqtrace));
2054 p->irqtrace.hardirq_disable_ip = _THIS_IP_;
2055 p->irqtrace.softirq_enable_ip = _THIS_IP_;
2056 p->softirqs_enabled = 1;
2057 p->softirq_context = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002058#endif
2059
2060 p->pagefault_disabled = 0;
2061
2062#ifdef CONFIG_LOCKDEP
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002063 lockdep_init_task(p);
2064#endif
2065
2066#ifdef CONFIG_DEBUG_MUTEXES
2067 p->blocked_on = NULL; /* not blocked yet */
2068#endif
2069#ifdef CONFIG_BCACHE
2070 p->sequential_io = 0;
2071 p->sequential_io_avg = 0;
2072#endif
2073
2074 /* Perform scheduler related setup. Assign this task to a CPU. */
2075 retval = sched_fork(clone_flags, p);
2076 if (retval)
2077 goto bad_fork_cleanup_policy;
2078
2079 retval = perf_event_init_task(p);
2080 if (retval)
2081 goto bad_fork_cleanup_policy;
2082 retval = audit_alloc(p);
2083 if (retval)
2084 goto bad_fork_cleanup_perf;
2085 /* copy all the process information */
2086 shm_init_task(p);
2087 retval = security_task_alloc(p, clone_flags);
2088 if (retval)
2089 goto bad_fork_cleanup_audit;
2090 retval = copy_semundo(clone_flags, p);
2091 if (retval)
2092 goto bad_fork_cleanup_security;
2093 retval = copy_files(clone_flags, p);
2094 if (retval)
2095 goto bad_fork_cleanup_semundo;
2096 retval = copy_fs(clone_flags, p);
2097 if (retval)
2098 goto bad_fork_cleanup_files;
2099 retval = copy_sighand(clone_flags, p);
2100 if (retval)
2101 goto bad_fork_cleanup_fs;
2102 retval = copy_signal(clone_flags, p);
2103 if (retval)
2104 goto bad_fork_cleanup_sighand;
2105 retval = copy_mm(clone_flags, p);
2106 if (retval)
2107 goto bad_fork_cleanup_signal;
2108 retval = copy_namespaces(clone_flags, p);
2109 if (retval)
2110 goto bad_fork_cleanup_mm;
2111 retval = copy_io(clone_flags, p);
2112 if (retval)
2113 goto bad_fork_cleanup_namespaces;
Olivier Deprez157378f2022-04-04 15:47:50 +02002114 retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002115 if (retval)
2116 goto bad_fork_cleanup_io;
2117
David Brazdil0f672f62019-12-10 10:32:29 +00002118 stackleak_task_init(p);
2119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002120 if (pid != &init_struct_pid) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002121 pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
2122 args->set_tid_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002123 if (IS_ERR(pid)) {
2124 retval = PTR_ERR(pid);
2125 goto bad_fork_cleanup_thread;
2126 }
2127 }
2128
David Brazdil0f672f62019-12-10 10:32:29 +00002129 /*
2130 * This has to happen after we've potentially unshared the file
2131 * descriptor table (so that the pidfd doesn't leak into the child
2132 * if the fd table isn't shared).
2133 */
2134 if (clone_flags & CLONE_PIDFD) {
2135 retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2136 if (retval < 0)
2137 goto bad_fork_free_pid;
2138
2139 pidfd = retval;
2140
2141 pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
2142 O_RDWR | O_CLOEXEC);
2143 if (IS_ERR(pidfile)) {
2144 put_unused_fd(pidfd);
2145 retval = PTR_ERR(pidfile);
2146 goto bad_fork_free_pid;
2147 }
2148 get_pid(pid); /* held by pidfile now */
2149
2150 retval = put_user(pidfd, args->pidfd);
2151 if (retval)
2152 goto bad_fork_put_pidfd;
2153 }
2154
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002155#ifdef CONFIG_BLOCK
2156 p->plug = NULL;
2157#endif
David Brazdil0f672f62019-12-10 10:32:29 +00002158 futex_init_task(p);
2159
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002160 /*
2161 * sigaltstack should be cleared when sharing the same VM
2162 */
2163 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2164 sas_ss_reset(p);
2165
2166 /*
2167 * Syscall tracing and stepping should be turned off in the
2168 * child regardless of CLONE_PTRACE.
2169 */
2170 user_disable_single_step(p);
2171 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
2172#ifdef TIF_SYSCALL_EMU
2173 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
2174#endif
David Brazdil0f672f62019-12-10 10:32:29 +00002175 clear_tsk_latency_tracing(p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002176
2177 /* ok, now we should be set up.. */
2178 p->pid = pid_nr(pid);
2179 if (clone_flags & CLONE_THREAD) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002180 p->group_leader = current->group_leader;
2181 p->tgid = current->tgid;
2182 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002183 p->group_leader = p;
2184 p->tgid = p->pid;
2185 }
2186
2187 p->nr_dirtied = 0;
2188 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
2189 p->dirty_paused_when = 0;
2190
2191 p->pdeath_signal = 0;
2192 INIT_LIST_HEAD(&p->thread_group);
2193 p->task_works = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02002194 clear_posix_cputimers_work(p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002195
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002196 /*
2197 * Ensure that the cgroup subsystem policies allow the new process to be
Olivier Deprez157378f2022-04-04 15:47:50 +02002198 * forked. It should be noted that the new process's css_set can be changed
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002199 * between here and cgroup_post_fork() if an organisation operation is in
2200 * progress.
2201 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002202 retval = cgroup_can_fork(p, args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002203 if (retval)
Olivier Deprez157378f2022-04-04 15:47:50 +02002204 goto bad_fork_put_pidfd;
David Brazdil0f672f62019-12-10 10:32:29 +00002205
2206 /*
2207 * From this point on we must avoid any synchronous user-space
2208 * communication until we take the tasklist-lock. In particular, we do
2209 * not want user-space to be able to predict the process start-time by
2210 * stalling fork(2) after we recorded the start_time but before it is
2211 * visible to the system.
2212 */
2213
2214 p->start_time = ktime_get_ns();
Olivier Deprez157378f2022-04-04 15:47:50 +02002215 p->start_boottime = ktime_get_boottime_ns();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002216
2217 /*
2218 * Make it visible to the rest of the system, but dont wake it up yet.
2219 * Need tasklist lock for parent etc handling!
2220 */
2221 write_lock_irq(&tasklist_lock);
2222
2223 /* CLONE_PARENT re-uses the old parent */
2224 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2225 p->real_parent = current->real_parent;
2226 p->parent_exec_id = current->parent_exec_id;
Olivier Deprez0e641232021-09-23 10:07:05 +02002227 if (clone_flags & CLONE_THREAD)
2228 p->exit_signal = -1;
2229 else
2230 p->exit_signal = current->group_leader->exit_signal;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002231 } else {
2232 p->real_parent = current;
2233 p->parent_exec_id = current->self_exec_id;
Olivier Deprez0e641232021-09-23 10:07:05 +02002234 p->exit_signal = args->exit_signal;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002235 }
2236
2237 klp_copy_process(p);
2238
2239 spin_lock(&current->sighand->siglock);
2240
2241 /*
2242 * Copy seccomp details explicitly here, in case they were changed
2243 * before holding sighand lock.
2244 */
2245 copy_seccomp(p);
2246
2247 rseq_fork(p, clone_flags);
2248
2249 /* Don't start children in a dying pid namespace */
2250 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
2251 retval = -ENOMEM;
2252 goto bad_fork_cancel_cgroup;
2253 }
2254
2255 /* Let kill terminate clone/fork in the middle */
2256 if (fatal_signal_pending(current)) {
2257 retval = -EINTR;
2258 goto bad_fork_cancel_cgroup;
2259 }
2260
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002261 init_task_pid_links(p);
2262 if (likely(p->pid)) {
2263 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
2264
2265 init_task_pid(p, PIDTYPE_PID, pid);
2266 if (thread_group_leader(p)) {
2267 init_task_pid(p, PIDTYPE_TGID, pid);
2268 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2269 init_task_pid(p, PIDTYPE_SID, task_session(current));
2270
2271 if (is_child_reaper(pid)) {
2272 ns_of_pid(pid)->child_reaper = p;
2273 p->signal->flags |= SIGNAL_UNKILLABLE;
2274 }
2275 p->signal->shared_pending.signal = delayed.signal;
2276 p->signal->tty = tty_kref_get(current->signal->tty);
2277 /*
2278 * Inherit has_child_subreaper flag under the same
2279 * tasklist_lock with adding child to the process tree
2280 * for propagate_has_child_subreaper optimization.
2281 */
2282 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2283 p->real_parent->signal->is_child_subreaper;
2284 list_add_tail(&p->sibling, &p->real_parent->children);
2285 list_add_tail_rcu(&p->tasks, &init_task.tasks);
2286 attach_pid(p, PIDTYPE_TGID);
2287 attach_pid(p, PIDTYPE_PGID);
2288 attach_pid(p, PIDTYPE_SID);
2289 __this_cpu_inc(process_counts);
2290 } else {
2291 current->signal->nr_threads++;
2292 atomic_inc(&current->signal->live);
David Brazdil0f672f62019-12-10 10:32:29 +00002293 refcount_inc(&current->signal->sigcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002294 task_join_group_stop(p);
2295 list_add_tail_rcu(&p->thread_group,
2296 &p->group_leader->thread_group);
2297 list_add_tail_rcu(&p->thread_node,
2298 &p->signal->thread_head);
2299 }
2300 attach_pid(p, PIDTYPE_PID);
2301 nr_threads++;
2302 }
2303 total_forks++;
2304 hlist_del_init(&delayed.node);
2305 spin_unlock(&current->sighand->siglock);
2306 syscall_tracepoint_update(p);
2307 write_unlock_irq(&tasklist_lock);
2308
Olivier Deprez157378f2022-04-04 15:47:50 +02002309 if (pidfile)
2310 fd_install(pidfd, pidfile);
2311
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002312 proc_fork_connector(p);
Olivier Deprez157378f2022-04-04 15:47:50 +02002313 sched_post_fork(p, args);
2314 cgroup_post_fork(p, args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002315 perf_event_fork(p);
2316
2317 trace_task_newtask(p, clone_flags);
2318 uprobe_copy_process(p, clone_flags);
2319
Olivier Deprez0e641232021-09-23 10:07:05 +02002320 copy_oom_score_adj(clone_flags, p);
2321
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002322 return p;
2323
2324bad_fork_cancel_cgroup:
2325 spin_unlock(&current->sighand->siglock);
2326 write_unlock_irq(&tasklist_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02002327 cgroup_cancel_fork(p, args);
David Brazdil0f672f62019-12-10 10:32:29 +00002328bad_fork_put_pidfd:
2329 if (clone_flags & CLONE_PIDFD) {
2330 fput(pidfile);
2331 put_unused_fd(pidfd);
2332 }
2333bad_fork_free_pid:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002334 if (pid != &init_struct_pid)
2335 free_pid(pid);
2336bad_fork_cleanup_thread:
2337 exit_thread(p);
2338bad_fork_cleanup_io:
2339 if (p->io_context)
2340 exit_io_context(p);
2341bad_fork_cleanup_namespaces:
2342 exit_task_namespaces(p);
2343bad_fork_cleanup_mm:
David Brazdil0f672f62019-12-10 10:32:29 +00002344 if (p->mm) {
2345 mm_clear_owner(p->mm, p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002346 mmput(p->mm);
David Brazdil0f672f62019-12-10 10:32:29 +00002347 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002348bad_fork_cleanup_signal:
2349 if (!(clone_flags & CLONE_THREAD))
2350 free_signal_struct(p->signal);
2351bad_fork_cleanup_sighand:
2352 __cleanup_sighand(p->sighand);
2353bad_fork_cleanup_fs:
2354 exit_fs(p); /* blocking */
2355bad_fork_cleanup_files:
2356 exit_files(p); /* blocking */
2357bad_fork_cleanup_semundo:
2358 exit_sem(p);
2359bad_fork_cleanup_security:
2360 security_task_free(p);
2361bad_fork_cleanup_audit:
2362 audit_free(p);
2363bad_fork_cleanup_perf:
2364 perf_event_free_task(p);
2365bad_fork_cleanup_policy:
2366 lockdep_free_task(p);
2367#ifdef CONFIG_NUMA
2368 mpol_put(p->mempolicy);
2369bad_fork_cleanup_threadgroup_lock:
2370#endif
2371 delayacct_tsk_free(p);
2372bad_fork_cleanup_count:
2373 atomic_dec(&p->cred->user->processes);
2374 exit_creds(p);
2375bad_fork_free:
2376 p->state = TASK_DEAD;
2377 put_task_stack(p);
David Brazdil0f672f62019-12-10 10:32:29 +00002378 delayed_free_task(p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002379fork_out:
2380 spin_lock_irq(&current->sighand->siglock);
2381 hlist_del_init(&delayed.node);
2382 spin_unlock_irq(&current->sighand->siglock);
2383 return ERR_PTR(retval);
2384}
2385
2386static inline void init_idle_pids(struct task_struct *idle)
2387{
2388 enum pid_type type;
2389
2390 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2391 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2392 init_task_pid(idle, type, &init_struct_pid);
2393 }
2394}
2395
Olivier Deprez157378f2022-04-04 15:47:50 +02002396struct task_struct * __init fork_idle(int cpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002397{
2398 struct task_struct *task;
David Brazdil0f672f62019-12-10 10:32:29 +00002399 struct kernel_clone_args args = {
2400 .flags = CLONE_VM,
2401 };
2402
2403 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002404 if (!IS_ERR(task)) {
2405 init_idle_pids(task);
2406 init_idle(task, cpu);
2407 }
2408
2409 return task;
2410}
2411
David Brazdil0f672f62019-12-10 10:32:29 +00002412struct mm_struct *copy_init_mm(void)
2413{
2414 return dup_mm(NULL, &init_mm);
2415}
2416
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002417/*
2418 * Ok, this is the main fork-routine.
2419 *
2420 * It copies the process, and if successful kick-starts
2421 * it and waits for it to finish using the VM if required.
David Brazdil0f672f62019-12-10 10:32:29 +00002422 *
2423 * args->exit_signal is expected to be checked for sanity by the caller.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002424 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002425pid_t kernel_clone(struct kernel_clone_args *args)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002426{
David Brazdil0f672f62019-12-10 10:32:29 +00002427 u64 clone_flags = args->flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002428 struct completion vfork;
2429 struct pid *pid;
2430 struct task_struct *p;
2431 int trace = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02002432 pid_t nr;
2433
2434 /*
2435 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2436 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2437 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2438 * field in struct clone_args and it still doesn't make sense to have
2439 * them both point at the same memory location. Performing this check
2440 * here has the advantage that we don't need to have a separate helper
2441 * to check for legacy clone().
2442 */
2443 if ((args->flags & CLONE_PIDFD) &&
2444 (args->flags & CLONE_PARENT_SETTID) &&
2445 (args->pidfd == args->parent_tid))
2446 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447
2448 /*
2449 * Determine whether and which event to report to ptracer. When
2450 * called from kernel_thread or CLONE_UNTRACED is explicitly
2451 * requested, no event is reported; otherwise, report if the event
2452 * for the type of forking is enabled.
2453 */
2454 if (!(clone_flags & CLONE_UNTRACED)) {
2455 if (clone_flags & CLONE_VFORK)
2456 trace = PTRACE_EVENT_VFORK;
David Brazdil0f672f62019-12-10 10:32:29 +00002457 else if (args->exit_signal != SIGCHLD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002458 trace = PTRACE_EVENT_CLONE;
2459 else
2460 trace = PTRACE_EVENT_FORK;
2461
2462 if (likely(!ptrace_event_enabled(current, trace)))
2463 trace = 0;
2464 }
2465
David Brazdil0f672f62019-12-10 10:32:29 +00002466 p = copy_process(NULL, trace, NUMA_NO_NODE, args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002467 add_latent_entropy();
2468
2469 if (IS_ERR(p))
2470 return PTR_ERR(p);
2471
2472 /*
2473 * Do this prior waking up the new thread - the thread pointer
2474 * might get invalid after that point, if the thread exits quickly.
2475 */
2476 trace_sched_process_fork(current, p);
2477
2478 pid = get_task_pid(p, PIDTYPE_PID);
2479 nr = pid_vnr(pid);
2480
2481 if (clone_flags & CLONE_PARENT_SETTID)
David Brazdil0f672f62019-12-10 10:32:29 +00002482 put_user(nr, args->parent_tid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002483
2484 if (clone_flags & CLONE_VFORK) {
2485 p->vfork_done = &vfork;
2486 init_completion(&vfork);
2487 get_task_struct(p);
2488 }
2489
2490 wake_up_new_task(p);
2491
2492 /* forking complete and child started to run, tell ptracer */
2493 if (unlikely(trace))
2494 ptrace_event_pid(trace, pid);
2495
2496 if (clone_flags & CLONE_VFORK) {
2497 if (!wait_for_vfork_done(p, &vfork))
2498 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2499 }
2500
2501 put_pid(pid);
2502 return nr;
2503}
2504
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002505/*
2506 * Create a kernel thread.
2507 */
2508pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2509{
David Brazdil0f672f62019-12-10 10:32:29 +00002510 struct kernel_clone_args args = {
Olivier Deprez0e641232021-09-23 10:07:05 +02002511 .flags = ((lower_32_bits(flags) | CLONE_VM |
2512 CLONE_UNTRACED) & ~CSIGNAL),
2513 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
David Brazdil0f672f62019-12-10 10:32:29 +00002514 .stack = (unsigned long)fn,
2515 .stack_size = (unsigned long)arg,
2516 };
2517
Olivier Deprez157378f2022-04-04 15:47:50 +02002518 return kernel_clone(&args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002519}
2520
2521#ifdef __ARCH_WANT_SYS_FORK
2522SYSCALL_DEFINE0(fork)
2523{
2524#ifdef CONFIG_MMU
David Brazdil0f672f62019-12-10 10:32:29 +00002525 struct kernel_clone_args args = {
2526 .exit_signal = SIGCHLD,
2527 };
2528
Olivier Deprez157378f2022-04-04 15:47:50 +02002529 return kernel_clone(&args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002530#else
2531 /* can not support in nommu mode */
2532 return -EINVAL;
2533#endif
2534}
2535#endif
2536
2537#ifdef __ARCH_WANT_SYS_VFORK
2538SYSCALL_DEFINE0(vfork)
2539{
David Brazdil0f672f62019-12-10 10:32:29 +00002540 struct kernel_clone_args args = {
2541 .flags = CLONE_VFORK | CLONE_VM,
2542 .exit_signal = SIGCHLD,
2543 };
2544
Olivier Deprez157378f2022-04-04 15:47:50 +02002545 return kernel_clone(&args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002546}
2547#endif
2548
2549#ifdef __ARCH_WANT_SYS_CLONE
2550#ifdef CONFIG_CLONE_BACKWARDS
2551SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2552 int __user *, parent_tidptr,
2553 unsigned long, tls,
2554 int __user *, child_tidptr)
2555#elif defined(CONFIG_CLONE_BACKWARDS2)
2556SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2557 int __user *, parent_tidptr,
2558 int __user *, child_tidptr,
2559 unsigned long, tls)
2560#elif defined(CONFIG_CLONE_BACKWARDS3)
2561SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2562 int, stack_size,
2563 int __user *, parent_tidptr,
2564 int __user *, child_tidptr,
2565 unsigned long, tls)
2566#else
2567SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2568 int __user *, parent_tidptr,
2569 int __user *, child_tidptr,
2570 unsigned long, tls)
2571#endif
2572{
David Brazdil0f672f62019-12-10 10:32:29 +00002573 struct kernel_clone_args args = {
Olivier Deprez0e641232021-09-23 10:07:05 +02002574 .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
David Brazdil0f672f62019-12-10 10:32:29 +00002575 .pidfd = parent_tidptr,
2576 .child_tid = child_tidptr,
2577 .parent_tid = parent_tidptr,
Olivier Deprez0e641232021-09-23 10:07:05 +02002578 .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
David Brazdil0f672f62019-12-10 10:32:29 +00002579 .stack = newsp,
2580 .tls = tls,
2581 };
2582
Olivier Deprez157378f2022-04-04 15:47:50 +02002583 return kernel_clone(&args);
David Brazdil0f672f62019-12-10 10:32:29 +00002584}
2585#endif
2586
2587#ifdef __ARCH_WANT_SYS_CLONE3
Olivier Deprez0e641232021-09-23 10:07:05 +02002588
David Brazdil0f672f62019-12-10 10:32:29 +00002589noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2590 struct clone_args __user *uargs,
2591 size_t usize)
2592{
2593 int err;
2594 struct clone_args args;
Olivier Deprez157378f2022-04-04 15:47:50 +02002595 pid_t *kset_tid = kargs->set_tid;
2596
2597 BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
2598 CLONE_ARGS_SIZE_VER0);
2599 BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
2600 CLONE_ARGS_SIZE_VER1);
2601 BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
2602 CLONE_ARGS_SIZE_VER2);
2603 BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
David Brazdil0f672f62019-12-10 10:32:29 +00002604
2605 if (unlikely(usize > PAGE_SIZE))
2606 return -E2BIG;
2607 if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
2608 return -EINVAL;
2609
2610 err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
2611 if (err)
2612 return err;
2613
Olivier Deprez157378f2022-04-04 15:47:50 +02002614 if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
2615 return -EINVAL;
2616
2617 if (unlikely(!args.set_tid && args.set_tid_size > 0))
2618 return -EINVAL;
2619
2620 if (unlikely(args.set_tid && args.set_tid_size == 0))
2621 return -EINVAL;
2622
David Brazdil0f672f62019-12-10 10:32:29 +00002623 /*
2624 * Verify that higher 32bits of exit_signal are unset and that
2625 * it is a valid signal
2626 */
2627 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
2628 !valid_signal(args.exit_signal)))
2629 return -EINVAL;
2630
Olivier Deprez157378f2022-04-04 15:47:50 +02002631 if ((args.flags & CLONE_INTO_CGROUP) &&
2632 (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
2633 return -EINVAL;
2634
David Brazdil0f672f62019-12-10 10:32:29 +00002635 *kargs = (struct kernel_clone_args){
2636 .flags = args.flags,
2637 .pidfd = u64_to_user_ptr(args.pidfd),
2638 .child_tid = u64_to_user_ptr(args.child_tid),
2639 .parent_tid = u64_to_user_ptr(args.parent_tid),
2640 .exit_signal = args.exit_signal,
2641 .stack = args.stack,
2642 .stack_size = args.stack_size,
2643 .tls = args.tls,
Olivier Deprez157378f2022-04-04 15:47:50 +02002644 .set_tid_size = args.set_tid_size,
2645 .cgroup = args.cgroup,
David Brazdil0f672f62019-12-10 10:32:29 +00002646 };
2647
Olivier Deprez157378f2022-04-04 15:47:50 +02002648 if (args.set_tid &&
2649 copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
2650 (kargs->set_tid_size * sizeof(pid_t))))
2651 return -EFAULT;
2652
2653 kargs->set_tid = kset_tid;
2654
David Brazdil0f672f62019-12-10 10:32:29 +00002655 return 0;
2656}
2657
2658/**
2659 * clone3_stack_valid - check and prepare stack
2660 * @kargs: kernel clone args
2661 *
2662 * Verify that the stack arguments userspace gave us are sane.
2663 * In addition, set the stack direction for userspace since it's easy for us to
2664 * determine.
2665 */
2666static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
2667{
2668 if (kargs->stack == 0) {
2669 if (kargs->stack_size > 0)
2670 return false;
2671 } else {
2672 if (kargs->stack_size == 0)
2673 return false;
2674
2675 if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
2676 return false;
2677
2678#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
2679 kargs->stack += kargs->stack_size;
2680#endif
2681 }
2682
2683 return true;
2684}
2685
2686static bool clone3_args_valid(struct kernel_clone_args *kargs)
2687{
Olivier Deprez157378f2022-04-04 15:47:50 +02002688 /* Verify that no unknown flags are passed along. */
2689 if (kargs->flags &
2690 ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
David Brazdil0f672f62019-12-10 10:32:29 +00002691 return false;
2692
2693 /*
2694 * - make the CLONE_DETACHED bit reuseable for clone3
2695 * - make the CSIGNAL bits reuseable for clone3
2696 */
2697 if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
2698 return false;
2699
Olivier Deprez157378f2022-04-04 15:47:50 +02002700 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
2701 (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
2702 return false;
2703
David Brazdil0f672f62019-12-10 10:32:29 +00002704 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
2705 kargs->exit_signal)
2706 return false;
2707
2708 if (!clone3_stack_valid(kargs))
2709 return false;
2710
2711 return true;
2712}
2713
2714/**
2715 * clone3 - create a new process with specific properties
2716 * @uargs: argument structure
2717 * @size: size of @uargs
2718 *
2719 * clone3() is the extensible successor to clone()/clone2().
2720 * It takes a struct as argument that is versioned by its size.
2721 *
2722 * Return: On success, a positive PID for the child process.
2723 * On error, a negative errno number.
2724 */
2725SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
2726{
2727 int err;
2728
2729 struct kernel_clone_args kargs;
Olivier Deprez157378f2022-04-04 15:47:50 +02002730 pid_t set_tid[MAX_PID_NS_LEVEL];
2731
2732 kargs.set_tid = set_tid;
David Brazdil0f672f62019-12-10 10:32:29 +00002733
2734 err = copy_clone_args_from_user(&kargs, uargs, size);
2735 if (err)
2736 return err;
2737
2738 if (!clone3_args_valid(&kargs))
2739 return -EINVAL;
2740
Olivier Deprez157378f2022-04-04 15:47:50 +02002741 return kernel_clone(&kargs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002742}
2743#endif
2744
2745void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
2746{
2747 struct task_struct *leader, *parent, *child;
2748 int res;
2749
2750 read_lock(&tasklist_lock);
2751 leader = top = top->group_leader;
2752down:
2753 for_each_thread(leader, parent) {
2754 list_for_each_entry(child, &parent->children, sibling) {
2755 res = visitor(child, data);
2756 if (res) {
2757 if (res < 0)
2758 goto out;
2759 leader = child;
2760 goto down;
2761 }
2762up:
2763 ;
2764 }
2765 }
2766
2767 if (leader != top) {
2768 child = leader;
2769 parent = child->real_parent;
2770 leader = parent->group_leader;
2771 goto up;
2772 }
2773out:
2774 read_unlock(&tasklist_lock);
2775}
2776
2777#ifndef ARCH_MIN_MMSTRUCT_ALIGN
2778#define ARCH_MIN_MMSTRUCT_ALIGN 0
2779#endif
2780
2781static void sighand_ctor(void *data)
2782{
2783 struct sighand_struct *sighand = data;
2784
2785 spin_lock_init(&sighand->siglock);
2786 init_waitqueue_head(&sighand->signalfd_wqh);
2787}
2788
2789void __init proc_caches_init(void)
2790{
2791 unsigned int mm_size;
2792
2793 sighand_cachep = kmem_cache_create("sighand_cache",
2794 sizeof(struct sighand_struct), 0,
2795 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2796 SLAB_ACCOUNT, sighand_ctor);
2797 signal_cachep = kmem_cache_create("signal_cache",
2798 sizeof(struct signal_struct), 0,
2799 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2800 NULL);
2801 files_cachep = kmem_cache_create("files_cache",
2802 sizeof(struct files_struct), 0,
2803 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2804 NULL);
2805 fs_cachep = kmem_cache_create("fs_cache",
2806 sizeof(struct fs_struct), 0,
2807 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2808 NULL);
2809
2810 /*
2811 * The mm_cpumask is located at the end of mm_struct, and is
2812 * dynamically sized based on the maximum CPU number this system
2813 * can have, taking hotplug into account (nr_cpu_ids).
2814 */
2815 mm_size = sizeof(struct mm_struct) + cpumask_size();
2816
2817 mm_cachep = kmem_cache_create_usercopy("mm_struct",
2818 mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
2819 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2820 offsetof(struct mm_struct, saved_auxv),
2821 sizeof_field(struct mm_struct, saved_auxv),
2822 NULL);
2823 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2824 mmap_init();
2825 nsproxy_cache_init();
2826}
2827
2828/*
2829 * Check constraints on flags passed to the unshare system call.
2830 */
2831static int check_unshare_flags(unsigned long unshare_flags)
2832{
2833 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2834 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2835 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
Olivier Deprez157378f2022-04-04 15:47:50 +02002836 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
2837 CLONE_NEWTIME))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002838 return -EINVAL;
2839 /*
2840 * Not implemented, but pretend it works if there is nothing
2841 * to unshare. Note that unsharing the address space or the
2842 * signal handlers also need to unshare the signal queues (aka
2843 * CLONE_THREAD).
2844 */
2845 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2846 if (!thread_group_empty(current))
2847 return -EINVAL;
2848 }
2849 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002850 if (refcount_read(&current->sighand->count) > 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002851 return -EINVAL;
2852 }
2853 if (unshare_flags & CLONE_VM) {
2854 if (!current_is_single_threaded())
2855 return -EINVAL;
2856 }
2857
2858 return 0;
2859}
2860
2861/*
2862 * Unshare the filesystem structure if it is being shared
2863 */
2864static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2865{
2866 struct fs_struct *fs = current->fs;
2867
2868 if (!(unshare_flags & CLONE_FS) || !fs)
2869 return 0;
2870
2871 /* don't need lock here; in the worst case we'll do useless copy */
2872 if (fs->users == 1)
2873 return 0;
2874
2875 *new_fsp = copy_fs_struct(fs);
2876 if (!*new_fsp)
2877 return -ENOMEM;
2878
2879 return 0;
2880}
2881
2882/*
2883 * Unshare file descriptor table if it is being shared
2884 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002885int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
2886 struct files_struct **new_fdp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002887{
2888 struct files_struct *fd = current->files;
2889 int error = 0;
2890
2891 if ((unshare_flags & CLONE_FILES) &&
2892 (fd && atomic_read(&fd->count) > 1)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002893 *new_fdp = dup_fd(fd, max_fds, &error);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002894 if (!*new_fdp)
2895 return error;
2896 }
2897
2898 return 0;
2899}
2900
2901/*
2902 * unshare allows a process to 'unshare' part of the process
2903 * context which was originally shared using clone. copy_*
Olivier Deprez157378f2022-04-04 15:47:50 +02002904 * functions used by kernel_clone() cannot be used here directly
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002905 * because they modify an inactive task_struct that is being
2906 * constructed. Here we are modifying the current, active,
2907 * task_struct.
2908 */
2909int ksys_unshare(unsigned long unshare_flags)
2910{
2911 struct fs_struct *fs, *new_fs = NULL;
2912 struct files_struct *fd, *new_fd = NULL;
2913 struct cred *new_cred = NULL;
2914 struct nsproxy *new_nsproxy = NULL;
2915 int do_sysvsem = 0;
2916 int err;
2917
2918 /*
2919 * If unsharing a user namespace must also unshare the thread group
2920 * and unshare the filesystem root and working directories.
2921 */
2922 if (unshare_flags & CLONE_NEWUSER)
2923 unshare_flags |= CLONE_THREAD | CLONE_FS;
2924 /*
2925 * If unsharing vm, must also unshare signal handlers.
2926 */
2927 if (unshare_flags & CLONE_VM)
2928 unshare_flags |= CLONE_SIGHAND;
2929 /*
2930 * If unsharing a signal handlers, must also unshare the signal queues.
2931 */
2932 if (unshare_flags & CLONE_SIGHAND)
2933 unshare_flags |= CLONE_THREAD;
2934 /*
2935 * If unsharing namespace, must also unshare filesystem information.
2936 */
2937 if (unshare_flags & CLONE_NEWNS)
2938 unshare_flags |= CLONE_FS;
2939
2940 err = check_unshare_flags(unshare_flags);
2941 if (err)
2942 goto bad_unshare_out;
2943 /*
2944 * CLONE_NEWIPC must also detach from the undolist: after switching
2945 * to a new ipc namespace, the semaphore arrays from the old
2946 * namespace are unreachable.
2947 */
2948 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2949 do_sysvsem = 1;
2950 err = unshare_fs(unshare_flags, &new_fs);
2951 if (err)
2952 goto bad_unshare_out;
Olivier Deprez157378f2022-04-04 15:47:50 +02002953 err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002954 if (err)
2955 goto bad_unshare_cleanup_fs;
2956 err = unshare_userns(unshare_flags, &new_cred);
2957 if (err)
2958 goto bad_unshare_cleanup_fd;
2959 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2960 new_cred, new_fs);
2961 if (err)
2962 goto bad_unshare_cleanup_cred;
2963
2964 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2965 if (do_sysvsem) {
2966 /*
2967 * CLONE_SYSVSEM is equivalent to sys_exit().
2968 */
2969 exit_sem(current);
2970 }
2971 if (unshare_flags & CLONE_NEWIPC) {
2972 /* Orphan segments in old ns (see sem above). */
2973 exit_shm(current);
2974 shm_init_task(current);
2975 }
2976
2977 if (new_nsproxy)
2978 switch_task_namespaces(current, new_nsproxy);
2979
2980 task_lock(current);
2981
2982 if (new_fs) {
2983 fs = current->fs;
2984 spin_lock(&fs->lock);
2985 current->fs = new_fs;
2986 if (--fs->users)
2987 new_fs = NULL;
2988 else
2989 new_fs = fs;
2990 spin_unlock(&fs->lock);
2991 }
2992
2993 if (new_fd) {
2994 fd = current->files;
2995 current->files = new_fd;
2996 new_fd = fd;
2997 }
2998
2999 task_unlock(current);
3000
3001 if (new_cred) {
3002 /* Install the new user namespace */
3003 commit_creds(new_cred);
3004 new_cred = NULL;
3005 }
3006 }
3007
3008 perf_event_namespaces(current);
3009
3010bad_unshare_cleanup_cred:
3011 if (new_cred)
3012 put_cred(new_cred);
3013bad_unshare_cleanup_fd:
3014 if (new_fd)
3015 put_files_struct(new_fd);
3016
3017bad_unshare_cleanup_fs:
3018 if (new_fs)
3019 free_fs_struct(new_fs);
3020
3021bad_unshare_out:
3022 return err;
3023}
3024
3025SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
3026{
3027 return ksys_unshare(unshare_flags);
3028}
3029
3030/*
3031 * Helper to unshare the files of the current task.
3032 * We don't want to expose copy_files internals to
3033 * the exec layer of the kernel.
3034 */
3035
3036int unshare_files(struct files_struct **displaced)
3037{
3038 struct task_struct *task = current;
3039 struct files_struct *copy = NULL;
3040 int error;
3041
Olivier Deprez157378f2022-04-04 15:47:50 +02003042 error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, &copy);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003043 if (error || !copy) {
3044 *displaced = NULL;
3045 return error;
3046 }
3047 *displaced = task->files;
3048 task_lock(task);
3049 task->files = copy;
3050 task_unlock(task);
3051 return 0;
3052}
3053
3054int sysctl_max_threads(struct ctl_table *table, int write,
Olivier Deprez157378f2022-04-04 15:47:50 +02003055 void *buffer, size_t *lenp, loff_t *ppos)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003056{
3057 struct ctl_table t;
3058 int ret;
3059 int threads = max_threads;
David Brazdil0f672f62019-12-10 10:32:29 +00003060 int min = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003061 int max = MAX_THREADS;
3062
3063 t = *table;
3064 t.data = &threads;
3065 t.extra1 = &min;
3066 t.extra2 = &max;
3067
3068 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3069 if (ret || !write)
3070 return ret;
3071
David Brazdil0f672f62019-12-10 10:32:29 +00003072 max_threads = threads;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003073
3074 return 0;
3075}