blob: 8904727607907033fc1a92fc781278601cff4bba [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/string.h>
5#include <linux/compiler.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/sched/mm.h>
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <linux/sched/signal.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <linux/sched/task_stack.h>
12#include <linux/security.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h>
David Brazdil0f672f62019-12-10 10:32:29 +000019#include <linux/elf.h>
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
22#include <linux/random.h>
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#include <linux/uaccess.h>
28
29#include "internal.h"
30
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031/**
32 * kfree_const - conditionally free memory
33 * @x: pointer to the memory
34 *
35 * Function calls kfree only if @x is not in .rodata section.
36 */
37void kfree_const(const void *x)
38{
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
41}
42EXPORT_SYMBOL(kfree_const);
43
44/**
45 * kstrdup - allocate space for and copy an existing string
46 * @s: the string to duplicate
47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
David Brazdil0f672f62019-12-10 10:32:29 +000048 *
49 * Return: newly allocated copy of @s or %NULL in case of error
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 */
51char *kstrdup(const char *s, gfp_t gfp)
52{
53 size_t len;
54 char *buf;
55
56 if (!s)
57 return NULL;
58
59 len = strlen(s) + 1;
60 buf = kmalloc_track_caller(len, gfp);
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
64}
65EXPORT_SYMBOL(kstrdup);
66
67/**
68 * kstrdup_const - conditionally duplicate an existing const string
69 * @s: the string to duplicate
70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
71 *
Olivier Deprez157378f2022-04-04 15:47:50 +020072 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
73 * must not be passed to krealloc().
David Brazdil0f672f62019-12-10 10:32:29 +000074 *
75 * Return: source string if it is in .rodata section otherwise
76 * fallback to kstrdup.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077 */
78const char *kstrdup_const(const char *s, gfp_t gfp)
79{
80 if (is_kernel_rodata((unsigned long)s))
81 return s;
82
83 return kstrdup(s, gfp);
84}
85EXPORT_SYMBOL(kstrdup_const);
86
87/**
88 * kstrndup - allocate space for and copy an existing string
89 * @s: the string to duplicate
90 * @max: read at most @max chars from @s
91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
92 *
93 * Note: Use kmemdup_nul() instead if the size is known exactly.
David Brazdil0f672f62019-12-10 10:32:29 +000094 *
95 * Return: newly allocated copy of @s or %NULL in case of error
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 */
97char *kstrndup(const char *s, size_t max, gfp_t gfp)
98{
99 size_t len;
100 char *buf;
101
102 if (!s)
103 return NULL;
104
105 len = strnlen(s, max);
106 buf = kmalloc_track_caller(len+1, gfp);
107 if (buf) {
108 memcpy(buf, s, len);
109 buf[len] = '\0';
110 }
111 return buf;
112}
113EXPORT_SYMBOL(kstrndup);
114
115/**
116 * kmemdup - duplicate region of memory
117 *
118 * @src: memory region to duplicate
119 * @len: memory region length
120 * @gfp: GFP mask to use
David Brazdil0f672f62019-12-10 10:32:29 +0000121 *
122 * Return: newly allocated copy of @src or %NULL in case of error
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 */
124void *kmemdup(const void *src, size_t len, gfp_t gfp)
125{
126 void *p;
127
128 p = kmalloc_track_caller(len, gfp);
129 if (p)
130 memcpy(p, src, len);
131 return p;
132}
133EXPORT_SYMBOL(kmemdup);
134
135/**
136 * kmemdup_nul - Create a NUL-terminated string from unterminated data
137 * @s: The data to stringify
138 * @len: The size of the data
139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
David Brazdil0f672f62019-12-10 10:32:29 +0000140 *
141 * Return: newly allocated copy of @s with NUL-termination or %NULL in
142 * case of error
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143 */
144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145{
146 char *buf;
147
148 if (!s)
149 return NULL;
150
151 buf = kmalloc_track_caller(len + 1, gfp);
152 if (buf) {
153 memcpy(buf, s, len);
154 buf[len] = '\0';
155 }
156 return buf;
157}
158EXPORT_SYMBOL(kmemdup_nul);
159
160/**
161 * memdup_user - duplicate memory region from user space
162 *
163 * @src: source address in user space
164 * @len: number of bytes to copy
165 *
David Brazdil0f672f62019-12-10 10:32:29 +0000166 * Return: an ERR_PTR() on failure. Result is physically
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000167 * contiguous, to be freed by kfree().
168 */
169void *memdup_user(const void __user *src, size_t len)
170{
171 void *p;
172
David Brazdil0f672f62019-12-10 10:32:29 +0000173 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 if (!p)
175 return ERR_PTR(-ENOMEM);
176
177 if (copy_from_user(p, src, len)) {
178 kfree(p);
179 return ERR_PTR(-EFAULT);
180 }
181
182 return p;
183}
184EXPORT_SYMBOL(memdup_user);
185
186/**
187 * vmemdup_user - duplicate memory region from user space
188 *
189 * @src: source address in user space
190 * @len: number of bytes to copy
191 *
David Brazdil0f672f62019-12-10 10:32:29 +0000192 * Return: an ERR_PTR() on failure. Result may be not
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193 * physically contiguous. Use kvfree() to free.
194 */
195void *vmemdup_user(const void __user *src, size_t len)
196{
197 void *p;
198
199 p = kvmalloc(len, GFP_USER);
200 if (!p)
201 return ERR_PTR(-ENOMEM);
202
203 if (copy_from_user(p, src, len)) {
204 kvfree(p);
205 return ERR_PTR(-EFAULT);
206 }
207
208 return p;
209}
210EXPORT_SYMBOL(vmemdup_user);
211
212/**
213 * strndup_user - duplicate an existing string from user space
214 * @s: The string to duplicate
215 * @n: Maximum number of bytes to copy, including the trailing NUL.
David Brazdil0f672f62019-12-10 10:32:29 +0000216 *
217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 */
219char *strndup_user(const char __user *s, long n)
220{
221 char *p;
222 long length;
223
224 length = strnlen_user(s, n);
225
226 if (!length)
227 return ERR_PTR(-EFAULT);
228
229 if (length > n)
230 return ERR_PTR(-EINVAL);
231
232 p = memdup_user(s, length);
233
234 if (IS_ERR(p))
235 return p;
236
237 p[length - 1] = '\0';
238
239 return p;
240}
241EXPORT_SYMBOL(strndup_user);
242
243/**
244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245 *
246 * @src: source address in user space
247 * @len: number of bytes to copy
248 *
David Brazdil0f672f62019-12-10 10:32:29 +0000249 * Return: an ERR_PTR() on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250 */
251void *memdup_user_nul(const void __user *src, size_t len)
252{
253 char *p;
254
255 /*
256 * Always use GFP_KERNEL, since copy_from_user() can sleep and
257 * cause pagefault, which makes it pointless to use GFP_NOFS
258 * or GFP_ATOMIC.
259 */
260 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261 if (!p)
262 return ERR_PTR(-ENOMEM);
263
264 if (copy_from_user(p, src, len)) {
265 kfree(p);
266 return ERR_PTR(-EFAULT);
267 }
268 p[len] = '\0';
269
270 return p;
271}
272EXPORT_SYMBOL(memdup_user_nul);
273
274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
Olivier Deprez157378f2022-04-04 15:47:50 +0200275 struct vm_area_struct *prev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276{
277 struct vm_area_struct *next;
278
279 vma->vm_prev = prev;
280 if (prev) {
281 next = prev->vm_next;
282 prev->vm_next = vma;
283 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200284 next = mm->mmap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 mm->mmap = vma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286 }
287 vma->vm_next = next;
288 if (next)
289 next->vm_prev = vma;
290}
291
Olivier Deprez157378f2022-04-04 15:47:50 +0200292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293{
294 struct vm_area_struct *prev, *next;
295
296 next = vma->vm_next;
297 prev = vma->vm_prev;
298 if (prev)
299 prev->vm_next = next;
300 else
301 mm->mmap = next;
302 if (next)
303 next->vm_prev = prev;
304}
305
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000306/* Check if the vma is being used as a stack by this task */
307int vma_is_stack_for_current(struct vm_area_struct *vma)
308{
309 struct task_struct * __maybe_unused t = current;
310
311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312}
313
David Brazdil0f672f62019-12-10 10:32:29 +0000314#ifndef STACK_RND_MASK
315#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
316#endif
317
318unsigned long randomize_stack_top(unsigned long stack_top)
319{
320 unsigned long random_variable = 0;
321
322 if (current->flags & PF_RANDOMIZE) {
323 random_variable = get_random_long();
324 random_variable &= STACK_RND_MASK;
325 random_variable <<= PAGE_SHIFT;
326 }
327#ifdef CONFIG_STACK_GROWSUP
328 return PAGE_ALIGN(stack_top) + random_variable;
329#else
330 return PAGE_ALIGN(stack_top) - random_variable;
331#endif
332}
333
334#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
335unsigned long arch_randomize_brk(struct mm_struct *mm)
336{
337 /* Is the current task 32bit ? */
338 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
339 return randomize_page(mm->brk, SZ_32M);
340
341 return randomize_page(mm->brk, SZ_1G);
342}
343
344unsigned long arch_mmap_rnd(void)
345{
346 unsigned long rnd;
347
348#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
349 if (is_compat_task())
350 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
351 else
352#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
353 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
354
355 return rnd << PAGE_SHIFT;
356}
357
358static int mmap_is_legacy(struct rlimit *rlim_stack)
359{
360 if (current->personality & ADDR_COMPAT_LAYOUT)
361 return 1;
362
363 if (rlim_stack->rlim_cur == RLIM_INFINITY)
364 return 1;
365
366 return sysctl_legacy_va_layout;
367}
368
369/*
370 * Leave enough space between the mmap area and the stack to honour ulimit in
371 * the face of randomisation.
372 */
373#define MIN_GAP (SZ_128M)
374#define MAX_GAP (STACK_TOP / 6 * 5)
375
376static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
377{
378 unsigned long gap = rlim_stack->rlim_cur;
379 unsigned long pad = stack_guard_gap;
380
381 /* Account for stack randomization if necessary */
382 if (current->flags & PF_RANDOMIZE)
383 pad += (STACK_RND_MASK << PAGE_SHIFT);
384
385 /* Values close to RLIM_INFINITY can overflow. */
386 if (gap + pad > gap)
387 gap += pad;
388
389 if (gap < MIN_GAP)
390 gap = MIN_GAP;
391 else if (gap > MAX_GAP)
392 gap = MAX_GAP;
393
394 return PAGE_ALIGN(STACK_TOP - gap - rnd);
395}
396
397void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
398{
399 unsigned long random_factor = 0UL;
400
401 if (current->flags & PF_RANDOMIZE)
402 random_factor = arch_mmap_rnd();
403
404 if (mmap_is_legacy(rlim_stack)) {
405 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
406 mm->get_unmapped_area = arch_get_unmapped_area;
407 } else {
408 mm->mmap_base = mmap_base(random_factor, rlim_stack);
409 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
410 }
411}
412#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
414{
415 mm->mmap_base = TASK_UNMAPPED_BASE;
416 mm->get_unmapped_area = arch_get_unmapped_area;
417}
418#endif
419
David Brazdil0f672f62019-12-10 10:32:29 +0000420/**
421 * __account_locked_vm - account locked pages to an mm's locked_vm
422 * @mm: mm to account against
423 * @pages: number of pages to account
424 * @inc: %true if @pages should be considered positive, %false if not
425 * @task: task used to check RLIMIT_MEMLOCK
426 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
427 *
428 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
Olivier Deprez157378f2022-04-04 15:47:50 +0200429 * that mmap_lock is held as writer.
David Brazdil0f672f62019-12-10 10:32:29 +0000430 *
431 * Return:
432 * * 0 on success
433 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 */
David Brazdil0f672f62019-12-10 10:32:29 +0000435int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
436 struct task_struct *task, bool bypass_rlim)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437{
David Brazdil0f672f62019-12-10 10:32:29 +0000438 unsigned long locked_vm, limit;
439 int ret = 0;
440
Olivier Deprez157378f2022-04-04 15:47:50 +0200441 mmap_assert_write_locked(mm);
David Brazdil0f672f62019-12-10 10:32:29 +0000442
443 locked_vm = mm->locked_vm;
444 if (inc) {
445 if (!bypass_rlim) {
446 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
447 if (locked_vm + pages > limit)
448 ret = -ENOMEM;
449 }
450 if (!ret)
451 mm->locked_vm = locked_vm + pages;
452 } else {
453 WARN_ON_ONCE(pages > locked_vm);
454 mm->locked_vm = locked_vm - pages;
455 }
456
457 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
458 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
459 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
460 ret ? " - exceeded" : "");
461
462 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463}
David Brazdil0f672f62019-12-10 10:32:29 +0000464EXPORT_SYMBOL_GPL(__account_locked_vm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465
466/**
David Brazdil0f672f62019-12-10 10:32:29 +0000467 * account_locked_vm - account locked pages to an mm's locked_vm
468 * @mm: mm to account against, may be NULL
469 * @pages: number of pages to account
470 * @inc: %true if @pages should be considered positive, %false if not
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471 *
David Brazdil0f672f62019-12-10 10:32:29 +0000472 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473 *
David Brazdil0f672f62019-12-10 10:32:29 +0000474 * Return:
475 * * 0 on success, or if mm is NULL
476 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477 */
David Brazdil0f672f62019-12-10 10:32:29 +0000478int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479{
David Brazdil0f672f62019-12-10 10:32:29 +0000480 int ret;
481
482 if (pages == 0 || !mm)
483 return 0;
484
Olivier Deprez157378f2022-04-04 15:47:50 +0200485 mmap_write_lock(mm);
David Brazdil0f672f62019-12-10 10:32:29 +0000486 ret = __account_locked_vm(mm, pages, inc, current,
487 capable(CAP_IPC_LOCK));
Olivier Deprez157378f2022-04-04 15:47:50 +0200488 mmap_write_unlock(mm);
David Brazdil0f672f62019-12-10 10:32:29 +0000489
490 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491}
David Brazdil0f672f62019-12-10 10:32:29 +0000492EXPORT_SYMBOL_GPL(account_locked_vm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000493
494unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
495 unsigned long len, unsigned long prot,
496 unsigned long flag, unsigned long pgoff)
497{
498 unsigned long ret;
499 struct mm_struct *mm = current->mm;
500 unsigned long populate;
501 LIST_HEAD(uf);
502
503 ret = security_mmap_file(file, prot, flag);
504 if (!ret) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200505 if (mmap_write_lock_killable(mm))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506 return -EINTR;
Olivier Deprez157378f2022-04-04 15:47:50 +0200507 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
508 &uf);
509 mmap_write_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 userfaultfd_unmap_complete(mm, &uf);
511 if (populate)
512 mm_populate(ret, populate);
513 }
514 return ret;
515}
516
517unsigned long vm_mmap(struct file *file, unsigned long addr,
518 unsigned long len, unsigned long prot,
519 unsigned long flag, unsigned long offset)
520{
521 if (unlikely(offset + PAGE_ALIGN(len) < offset))
522 return -EINVAL;
523 if (unlikely(offset_in_page(offset)))
524 return -EINVAL;
525
526 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
527}
528EXPORT_SYMBOL(vm_mmap);
529
530/**
531 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
532 * failure, fall back to non-contiguous (vmalloc) allocation.
533 * @size: size of the request.
534 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
535 * @node: numa node to allocate from
536 *
537 * Uses kmalloc to get the memory but if the allocation fails then falls back
538 * to the vmalloc allocator. Use kvfree for freeing the memory.
539 *
540 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
541 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
542 * preferable to the vmalloc fallback, due to visible performance drawbacks.
543 *
544 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
545 * fall back to vmalloc.
David Brazdil0f672f62019-12-10 10:32:29 +0000546 *
547 * Return: pointer to the allocated memory of %NULL in case of failure
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 */
549void *kvmalloc_node(size_t size, gfp_t flags, int node)
550{
551 gfp_t kmalloc_flags = flags;
552 void *ret;
553
554 /*
555 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
556 * so the given set of flags has to be compatible.
557 */
558 if ((flags & GFP_KERNEL) != GFP_KERNEL)
559 return kmalloc_node(size, flags, node);
560
561 /*
562 * We want to attempt a large physically contiguous block first because
563 * it is less likely to fragment multiple larger blocks and therefore
564 * contribute to a long term fragmentation less than vmalloc fallback.
565 * However make sure that larger requests are not too disruptive - no
566 * OOM killer and no allocation failure warnings as we have a fallback.
567 */
568 if (size > PAGE_SIZE) {
569 kmalloc_flags |= __GFP_NOWARN;
570
571 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
572 kmalloc_flags |= __GFP_NORETRY;
573 }
574
575 ret = kmalloc_node(size, kmalloc_flags, node);
576
577 /*
578 * It doesn't really make sense to fallback to vmalloc for sub page
579 * requests
580 */
581 if (ret || size <= PAGE_SIZE)
582 return ret;
583
Olivier Deprez157378f2022-04-04 15:47:50 +0200584 /* Don't even allow crazy sizes */
585 if (unlikely(size > INT_MAX)) {
586 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
587 return NULL;
588 }
589
590 return __vmalloc_node(size, 1, flags, node,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 __builtin_return_address(0));
592}
593EXPORT_SYMBOL(kvmalloc_node);
594
595/**
596 * kvfree() - Free memory.
597 * @addr: Pointer to allocated memory.
598 *
599 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
600 * It is slightly more efficient to use kfree() or vfree() if you are certain
601 * that you know which one to use.
602 *
David Brazdil0f672f62019-12-10 10:32:29 +0000603 * Context: Either preemptible task context or not-NMI interrupt.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604 */
605void kvfree(const void *addr)
606{
607 if (is_vmalloc_addr(addr))
608 vfree(addr);
609 else
610 kfree(addr);
611}
612EXPORT_SYMBOL(kvfree);
613
Olivier Deprez0e641232021-09-23 10:07:05 +0200614/**
615 * kvfree_sensitive - Free a data object containing sensitive information.
616 * @addr: address of the data object to be freed.
617 * @len: length of the data object.
618 *
619 * Use the special memzero_explicit() function to clear the content of a
620 * kvmalloc'ed object containing sensitive data to make sure that the
621 * compiler won't optimize out the data clearing.
622 */
623void kvfree_sensitive(const void *addr, size_t len)
624{
625 if (likely(!ZERO_OR_NULL_PTR(addr))) {
626 memzero_explicit((void *)addr, len);
627 kvfree(addr);
628 }
629}
630EXPORT_SYMBOL(kvfree_sensitive);
631
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632static inline void *__page_rmapping(struct page *page)
633{
634 unsigned long mapping;
635
636 mapping = (unsigned long)page->mapping;
637 mapping &= ~PAGE_MAPPING_FLAGS;
638
639 return (void *)mapping;
640}
641
642/* Neutral page->mapping pointer to address_space or anon_vma or other */
643void *page_rmapping(struct page *page)
644{
645 page = compound_head(page);
646 return __page_rmapping(page);
647}
648
649/*
650 * Return true if this page is mapped into pagetables.
651 * For compound page it returns true if any subpage of compound page is mapped.
652 */
653bool page_mapped(struct page *page)
654{
655 int i;
656
657 if (likely(!PageCompound(page)))
658 return atomic_read(&page->_mapcount) >= 0;
659 page = compound_head(page);
660 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
661 return true;
662 if (PageHuge(page))
663 return false;
David Brazdil0f672f62019-12-10 10:32:29 +0000664 for (i = 0; i < compound_nr(page); i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000665 if (atomic_read(&page[i]._mapcount) >= 0)
666 return true;
667 }
668 return false;
669}
670EXPORT_SYMBOL(page_mapped);
671
672struct anon_vma *page_anon_vma(struct page *page)
673{
674 unsigned long mapping;
675
676 page = compound_head(page);
677 mapping = (unsigned long)page->mapping;
678 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
679 return NULL;
680 return __page_rmapping(page);
681}
682
683struct address_space *page_mapping(struct page *page)
684{
685 struct address_space *mapping;
686
687 page = compound_head(page);
688
689 /* This happens if someone calls flush_dcache_page on slab page */
690 if (unlikely(PageSlab(page)))
691 return NULL;
692
693 if (unlikely(PageSwapCache(page))) {
694 swp_entry_t entry;
695
696 entry.val = page_private(page);
697 return swap_address_space(entry);
698 }
699
700 mapping = page->mapping;
701 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
702 return NULL;
703
704 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
705}
706EXPORT_SYMBOL(page_mapping);
707
708/*
709 * For file cache pages, return the address_space, otherwise return NULL
710 */
711struct address_space *page_mapping_file(struct page *page)
712{
713 if (unlikely(PageSwapCache(page)))
714 return NULL;
715 return page_mapping(page);
716}
717
718/* Slow path of page_mapcount() for compound pages */
719int __page_mapcount(struct page *page)
720{
721 int ret;
722
723 ret = atomic_read(&page->_mapcount) + 1;
724 /*
725 * For file THP page->_mapcount contains total number of mapping
726 * of the page: no need to look into compound_mapcount.
727 */
728 if (!PageAnon(page) && !PageHuge(page))
729 return ret;
730 page = compound_head(page);
731 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
732 if (PageDoubleMap(page))
733 ret--;
734 return ret;
735}
736EXPORT_SYMBOL_GPL(__page_mapcount);
737
738int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
739int sysctl_overcommit_ratio __read_mostly = 50;
740unsigned long sysctl_overcommit_kbytes __read_mostly;
741int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
742unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
743unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
744
Olivier Deprez157378f2022-04-04 15:47:50 +0200745int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
746 size_t *lenp, loff_t *ppos)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000747{
748 int ret;
749
750 ret = proc_dointvec(table, write, buffer, lenp, ppos);
751 if (ret == 0 && write)
752 sysctl_overcommit_kbytes = 0;
753 return ret;
754}
755
Olivier Deprez157378f2022-04-04 15:47:50 +0200756static void sync_overcommit_as(struct work_struct *dummy)
757{
758 percpu_counter_sync(&vm_committed_as);
759}
760
761int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
762 size_t *lenp, loff_t *ppos)
763{
764 struct ctl_table t;
765 int new_policy = -1;
766 int ret;
767
768 /*
769 * The deviation of sync_overcommit_as could be big with loose policy
770 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
771 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
772 * with the strict "NEVER", and to avoid possible race condtion (even
773 * though user usually won't too frequently do the switching to policy
774 * OVERCOMMIT_NEVER), the switch is done in the following order:
775 * 1. changing the batch
776 * 2. sync percpu count on each CPU
777 * 3. switch the policy
778 */
779 if (write) {
780 t = *table;
781 t.data = &new_policy;
782 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
783 if (ret || new_policy == -1)
784 return ret;
785
786 mm_compute_batch(new_policy);
787 if (new_policy == OVERCOMMIT_NEVER)
788 schedule_on_each_cpu(sync_overcommit_as);
789 sysctl_overcommit_memory = new_policy;
790 } else {
791 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
792 }
793
794 return ret;
795}
796
797int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
798 size_t *lenp, loff_t *ppos)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799{
800 int ret;
801
802 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
803 if (ret == 0 && write)
804 sysctl_overcommit_ratio = 0;
805 return ret;
806}
807
808/*
809 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
810 */
811unsigned long vm_commit_limit(void)
812{
813 unsigned long allowed;
814
815 if (sysctl_overcommit_kbytes)
816 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
817 else
David Brazdil0f672f62019-12-10 10:32:29 +0000818 allowed = ((totalram_pages() - hugetlb_total_pages())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 * sysctl_overcommit_ratio / 100);
820 allowed += total_swap_pages;
821
822 return allowed;
823}
824
825/*
826 * Make sure vm_committed_as in one cacheline and not cacheline shared with
827 * other variables. It can be updated by several CPUs frequently.
828 */
829struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
830
831/*
832 * The global memory commitment made in the system can be a metric
833 * that can be used to drive ballooning decisions when Linux is hosted
834 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
835 * balancing memory across competing virtual machines that are hosted.
836 * Several metrics drive this policy engine including the guest reported
837 * memory commitment.
Olivier Deprez157378f2022-04-04 15:47:50 +0200838 *
839 * The time cost of this is very low for small platforms, and for big
840 * platform like a 2S/36C/72T Skylake server, in worst case where
841 * vm_committed_as's spinlock is under severe contention, the time cost
842 * could be about 30~40 microseconds.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000843 */
844unsigned long vm_memory_committed(void)
845{
Olivier Deprez157378f2022-04-04 15:47:50 +0200846 return percpu_counter_sum_positive(&vm_committed_as);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000847}
848EXPORT_SYMBOL_GPL(vm_memory_committed);
849
850/*
851 * Check that a process has enough memory to allocate a new virtual
852 * mapping. 0 means there is enough memory for the allocation to
853 * succeed and -ENOMEM implies there is not.
854 *
855 * We currently support three overcommit policies, which are set via the
856 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
857 *
858 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
859 * Additional code 2002 Jul 20 by Robert Love.
860 *
861 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
862 *
863 * Note this is a helper function intended to be used by LSMs which
864 * wish to use this logic.
865 */
866int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
867{
David Brazdil0f672f62019-12-10 10:32:29 +0000868 long allowed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 vm_acct_memory(pages);
871
872 /*
873 * Sometimes we want to use more memory than we have
874 */
875 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
876 return 0;
877
878 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
David Brazdil0f672f62019-12-10 10:32:29 +0000879 if (pages > totalram_pages() + total_swap_pages)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000880 goto error;
David Brazdil0f672f62019-12-10 10:32:29 +0000881 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 }
883
884 allowed = vm_commit_limit();
885 /*
886 * Reserve some for root
887 */
888 if (!cap_sys_admin)
889 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
890
891 /*
892 * Don't let a single process grow so big a user can't recover
893 */
894 if (mm) {
David Brazdil0f672f62019-12-10 10:32:29 +0000895 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
896
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897 allowed -= min_t(long, mm->total_vm / 32, reserve);
898 }
899
900 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
901 return 0;
902error:
903 vm_unacct_memory(pages);
904
905 return -ENOMEM;
906}
907
908/**
909 * get_cmdline() - copy the cmdline value to a buffer.
910 * @task: the task whose cmdline value to copy.
911 * @buffer: the buffer to copy to.
912 * @buflen: the length of the buffer. Larger cmdline values are truncated
913 * to this length.
David Brazdil0f672f62019-12-10 10:32:29 +0000914 *
915 * Return: the size of the cmdline field copied. Note that the copy does
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000916 * not guarantee an ending NULL byte.
917 */
918int get_cmdline(struct task_struct *task, char *buffer, int buflen)
919{
920 int res = 0;
921 unsigned int len;
922 struct mm_struct *mm = get_task_mm(task);
923 unsigned long arg_start, arg_end, env_start, env_end;
924 if (!mm)
925 goto out;
926 if (!mm->arg_end)
927 goto out_mm; /* Shh! No looking before we're done */
928
David Brazdil0f672f62019-12-10 10:32:29 +0000929 spin_lock(&mm->arg_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000930 arg_start = mm->arg_start;
931 arg_end = mm->arg_end;
932 env_start = mm->env_start;
933 env_end = mm->env_end;
David Brazdil0f672f62019-12-10 10:32:29 +0000934 spin_unlock(&mm->arg_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000935
936 len = arg_end - arg_start;
937
938 if (len > buflen)
939 len = buflen;
940
941 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
942
943 /*
944 * If the nul at the end of args has been overwritten, then
945 * assume application is using setproctitle(3).
946 */
947 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
948 len = strnlen(buffer, res);
949 if (len < res) {
950 res = len;
951 } else {
952 len = env_end - env_start;
953 if (len > buflen - res)
954 len = buflen - res;
955 res += access_process_vm(task, env_start,
956 buffer+res, len,
957 FOLL_FORCE);
958 res = strnlen(buffer, res);
959 }
960 }
961out_mm:
962 mmput(mm);
963out:
964 return res;
965}
David Brazdil0f672f62019-12-10 10:32:29 +0000966
Olivier Deprez157378f2022-04-04 15:47:50 +0200967int __weak memcmp_pages(struct page *page1, struct page *page2)
David Brazdil0f672f62019-12-10 10:32:29 +0000968{
969 char *addr1, *addr2;
970 int ret;
971
972 addr1 = kmap_atomic(page1);
973 addr2 = kmap_atomic(page2);
974 ret = memcmp(addr1, addr2, PAGE_SIZE);
975 kunmap_atomic(addr2);
976 kunmap_atomic(addr1);
977 return ret;
978}