blob: 76dad53a410ac85a3d4dc965c7010865cdf63361 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
6#include <linux/init.h>
7#include <linux/list.h>
8#include <linux/llist.h>
9#include <asm/page.h> /* pgprot_t */
10#include <linux/rbtree.h>
11#include <linux/overflow.h>
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <asm/vmalloc.h>
14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16struct notifier_block; /* in notifier.h */
17
18/* bits in flags of vmalloc's vm_struct below */
19#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20#define VM_ALLOC 0x00000002 /* vmalloc() */
21#define VM_MAP 0x00000004 /* vmap()ed pages */
22#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
David Brazdil0f672f62019-12-10 10:32:29 +000023#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
25#define VM_NO_GUARD 0x00000040 /* don't add guard page */
26#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
Olivier Deprez157378f2022-04-04 15:47:50 +020027#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
29
David Brazdil0f672f62019-12-10 10:32:29 +000030/*
Olivier Deprez157378f2022-04-04 15:47:50 +020031 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
32 *
33 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
34 * shadow memory has been mapped. It's used to handle allocation errors so that
35 * we don't try to poision shadow on free if it was never allocated.
36 *
37 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
38 * determine which allocations need the module shadow freed.
David Brazdil0f672f62019-12-10 10:32:29 +000039 */
David Brazdil0f672f62019-12-10 10:32:29 +000040
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041/* bits [20..32] reserved for arch specific ioremap internals */
42
43/*
44 * Maximum alignment for ioremap() regions.
45 * Can be overriden by arch-specific value.
46 */
47#ifndef IOREMAP_MAX_ORDER
48#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
49#endif
50
51struct vm_struct {
52 struct vm_struct *next;
53 void *addr;
54 unsigned long size;
55 unsigned long flags;
56 struct page **pages;
57 unsigned int nr_pages;
58 phys_addr_t phys_addr;
59 const void *caller;
60};
61
62struct vmap_area {
63 unsigned long va_start;
64 unsigned long va_end;
David Brazdil0f672f62019-12-10 10:32:29 +000065
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 struct rb_node rb_node; /* address sorted rbtree */
67 struct list_head list; /* address sorted list */
David Brazdil0f672f62019-12-10 10:32:29 +000068
69 /*
70 * The following three variables can be packed, because
71 * a vmap_area object is always one of the three states:
72 * 1) in "free" tree (root is vmap_area_root)
73 * 2) in "busy" tree (root is free_vmap_area_root)
74 * 3) in purge list (head is vmap_purge_list)
75 */
76 union {
77 unsigned long subtree_max_size; /* in "free" tree */
78 struct vm_struct *vm; /* in "busy" tree */
79 struct llist_node purge_list; /* in purge list */
80 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081};
82
83/*
84 * Highlevel APIs for driver use
85 */
86extern void vm_unmap_ram(const void *mem, unsigned int count);
Olivier Deprez157378f2022-04-04 15:47:50 +020087extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088extern void vm_unmap_aliases(void);
89
90#ifdef CONFIG_MMU
91extern void __init vmalloc_init(void);
David Brazdil0f672f62019-12-10 10:32:29 +000092extern unsigned long vmalloc_nr_pages(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093#else
94static inline void vmalloc_init(void)
95{
96}
David Brazdil0f672f62019-12-10 10:32:29 +000097static inline unsigned long vmalloc_nr_pages(void) { return 0; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098#endif
99
100extern void *vmalloc(unsigned long size);
101extern void *vzalloc(unsigned long size);
102extern void *vmalloc_user(unsigned long size);
103extern void *vmalloc_node(unsigned long size, int node);
104extern void *vzalloc_node(unsigned long size, int node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105extern void *vmalloc_32(unsigned long size);
106extern void *vmalloc_32_user(unsigned long size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200107extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
109 unsigned long start, unsigned long end, gfp_t gfp_mask,
110 pgprot_t prot, unsigned long vm_flags, int node,
111 const void *caller);
Olivier Deprez157378f2022-04-04 15:47:50 +0200112void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
113 int node, const void *caller);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115extern void vfree(const void *addr);
116extern void vfree_atomic(const void *addr);
117
118extern void *vmap(struct page **pages, unsigned int count,
119 unsigned long flags, pgprot_t prot);
Olivier Deprez157378f2022-04-04 15:47:50 +0200120void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121extern void vunmap(const void *addr);
122
123extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
124 unsigned long uaddr, void *kaddr,
Olivier Deprez0e641232021-09-23 10:07:05 +0200125 unsigned long pgoff, unsigned long size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126
127extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
128 unsigned long pgoff);
Olivier Deprez157378f2022-04-04 15:47:50 +0200129
130/*
131 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
132 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
133 * needs to be called.
134 */
135#ifndef ARCH_PAGE_TABLE_SYNC_MASK
136#define ARCH_PAGE_TABLE_SYNC_MASK 0
137#endif
138
139/*
140 * There is no default implementation for arch_sync_kernel_mappings(). It is
141 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
142 * is 0.
143 */
144void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
Olivier Deprez0e641232021-09-23 10:07:05 +0200145
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146/*
147 * Lowlevel-APIs (not for driver use!)
148 */
149
150static inline size_t get_vm_area_size(const struct vm_struct *area)
151{
152 if (!(area->flags & VM_NO_GUARD))
153 /* return actual size without guard page */
154 return area->size - PAGE_SIZE;
155 else
156 return area->size;
157
158}
159
160extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
161extern struct vm_struct *get_vm_area_caller(unsigned long size,
162 unsigned long flags, const void *caller);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163extern struct vm_struct *__get_vm_area_caller(unsigned long size,
164 unsigned long flags,
165 unsigned long start, unsigned long end,
166 const void *caller);
Olivier Deprez157378f2022-04-04 15:47:50 +0200167void free_vm_area(struct vm_struct *area);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168extern struct vm_struct *remove_vm_area(const void *addr);
169extern struct vm_struct *find_vm_area(const void *addr);
170
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171#ifdef CONFIG_MMU
172extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
173 pgprot_t prot, struct page **pages);
Olivier Deprez157378f2022-04-04 15:47:50 +0200174int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
175 struct page **pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
177extern void unmap_kernel_range(unsigned long addr, unsigned long size);
David Brazdil0f672f62019-12-10 10:32:29 +0000178static inline void set_vm_flush_reset_perms(void *addr)
179{
180 struct vm_struct *vm = find_vm_area(addr);
181
182 if (vm)
183 vm->flags |= VM_FLUSH_RESET_PERMS;
184}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185#else
186static inline int
187map_kernel_range_noflush(unsigned long start, unsigned long size,
188 pgprot_t prot, struct page **pages)
189{
190 return size >> PAGE_SHIFT;
191}
Olivier Deprez157378f2022-04-04 15:47:50 +0200192#define map_kernel_range map_kernel_range_noflush
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193static inline void
194unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
195{
196}
Olivier Deprez157378f2022-04-04 15:47:50 +0200197#define unmap_kernel_range unmap_kernel_range_noflush
David Brazdil0f672f62019-12-10 10:32:29 +0000198static inline void set_vm_flush_reset_perms(void *addr)
199{
200}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201#endif
202
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203/* for /dev/kmem */
204extern long vread(char *buf, char *addr, unsigned long count);
205extern long vwrite(char *buf, char *addr, unsigned long count);
206
207/*
208 * Internals. Dont't use..
209 */
210extern struct list_head vmap_area_list;
211extern __init void vm_area_add_early(struct vm_struct *vm);
212extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
213
214#ifdef CONFIG_SMP
215# ifdef CONFIG_MMU
216struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
217 const size_t *sizes, int nr_vms,
218 size_t align);
219
220void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
221# else
222static inline struct vm_struct **
223pcpu_get_vm_areas(const unsigned long *offsets,
224 const size_t *sizes, int nr_vms,
225 size_t align)
226{
227 return NULL;
228}
229
230static inline void
231pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
232{
233}
234# endif
235#endif
236
237#ifdef CONFIG_MMU
238#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
239#else
240#define VMALLOC_TOTAL 0UL
241#endif
242
243int register_vmap_purge_notifier(struct notifier_block *nb);
244int unregister_vmap_purge_notifier(struct notifier_block *nb);
245
246#endif /* _LINUX_VMALLOC_H */