Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_VMALLOC_H |
| 3 | #define _LINUX_VMALLOC_H |
| 4 | |
| 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/init.h> |
| 7 | #include <linux/list.h> |
| 8 | #include <linux/llist.h> |
| 9 | #include <asm/page.h> /* pgprot_t */ |
| 10 | #include <linux/rbtree.h> |
| 11 | #include <linux/overflow.h> |
| 12 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 13 | #include <asm/vmalloc.h> |
| 14 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
| 16 | struct notifier_block; /* in notifier.h */ |
| 17 | |
| 18 | /* bits in flags of vmalloc's vm_struct below */ |
| 19 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
| 20 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
| 21 | #define VM_MAP 0x00000004 /* vmap()ed pages */ |
| 22 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 23 | #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
| 25 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
| 26 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ |
| 28 | #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ |
| 29 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 31 | * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. |
| 32 | * |
| 33 | * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after |
| 34 | * shadow memory has been mapped. It's used to handle allocation errors so that |
| 35 | * we don't try to poision shadow on free if it was never allocated. |
| 36 | * |
| 37 | * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to |
| 38 | * determine which allocations need the module shadow freed. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 39 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 40 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | /* bits [20..32] reserved for arch specific ioremap internals */ |
| 42 | |
| 43 | /* |
| 44 | * Maximum alignment for ioremap() regions. |
| 45 | * Can be overriden by arch-specific value. |
| 46 | */ |
| 47 | #ifndef IOREMAP_MAX_ORDER |
| 48 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ |
| 49 | #endif |
| 50 | |
| 51 | struct vm_struct { |
| 52 | struct vm_struct *next; |
| 53 | void *addr; |
| 54 | unsigned long size; |
| 55 | unsigned long flags; |
| 56 | struct page **pages; |
| 57 | unsigned int nr_pages; |
| 58 | phys_addr_t phys_addr; |
| 59 | const void *caller; |
| 60 | }; |
| 61 | |
| 62 | struct vmap_area { |
| 63 | unsigned long va_start; |
| 64 | unsigned long va_end; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 65 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | struct rb_node rb_node; /* address sorted rbtree */ |
| 67 | struct list_head list; /* address sorted list */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * The following three variables can be packed, because |
| 71 | * a vmap_area object is always one of the three states: |
| 72 | * 1) in "free" tree (root is vmap_area_root) |
| 73 | * 2) in "busy" tree (root is free_vmap_area_root) |
| 74 | * 3) in purge list (head is vmap_purge_list) |
| 75 | */ |
| 76 | union { |
| 77 | unsigned long subtree_max_size; /* in "free" tree */ |
| 78 | struct vm_struct *vm; /* in "busy" tree */ |
| 79 | struct llist_node purge_list; /* in purge list */ |
| 80 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | /* |
| 84 | * Highlevel APIs for driver use |
| 85 | */ |
| 86 | extern void vm_unmap_ram(const void *mem, unsigned int count); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 87 | extern void *vm_map_ram(struct page **pages, unsigned int count, int node); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | extern void vm_unmap_aliases(void); |
| 89 | |
| 90 | #ifdef CONFIG_MMU |
| 91 | extern void __init vmalloc_init(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | extern unsigned long vmalloc_nr_pages(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | #else |
| 94 | static inline void vmalloc_init(void) |
| 95 | { |
| 96 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | static inline unsigned long vmalloc_nr_pages(void) { return 0; } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | #endif |
| 99 | |
| 100 | extern void *vmalloc(unsigned long size); |
| 101 | extern void *vzalloc(unsigned long size); |
| 102 | extern void *vmalloc_user(unsigned long size); |
| 103 | extern void *vmalloc_node(unsigned long size, int node); |
| 104 | extern void *vzalloc_node(unsigned long size, int node); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | extern void *vmalloc_32(unsigned long size); |
| 106 | extern void *vmalloc_32_user(unsigned long size); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 107 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
| 109 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 110 | pgprot_t prot, unsigned long vm_flags, int node, |
| 111 | const void *caller); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 112 | void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
| 113 | int node, const void *caller); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | |
| 115 | extern void vfree(const void *addr); |
| 116 | extern void vfree_atomic(const void *addr); |
| 117 | |
| 118 | extern void *vmap(struct page **pages, unsigned int count, |
| 119 | unsigned long flags, pgprot_t prot); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 120 | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | extern void vunmap(const void *addr); |
| 122 | |
| 123 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
| 124 | unsigned long uaddr, void *kaddr, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 125 | unsigned long pgoff, unsigned long size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 126 | |
| 127 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
| 128 | unsigned long pgoff); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 129 | |
| 130 | /* |
| 131 | * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values |
| 132 | * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() |
| 133 | * needs to be called. |
| 134 | */ |
| 135 | #ifndef ARCH_PAGE_TABLE_SYNC_MASK |
| 136 | #define ARCH_PAGE_TABLE_SYNC_MASK 0 |
| 137 | #endif |
| 138 | |
| 139 | /* |
| 140 | * There is no default implementation for arch_sync_kernel_mappings(). It is |
| 141 | * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK |
| 142 | * is 0. |
| 143 | */ |
| 144 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 145 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | /* |
| 147 | * Lowlevel-APIs (not for driver use!) |
| 148 | */ |
| 149 | |
| 150 | static inline size_t get_vm_area_size(const struct vm_struct *area) |
| 151 | { |
| 152 | if (!(area->flags & VM_NO_GUARD)) |
| 153 | /* return actual size without guard page */ |
| 154 | return area->size - PAGE_SIZE; |
| 155 | else |
| 156 | return area->size; |
| 157 | |
| 158 | } |
| 159 | |
| 160 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
| 161 | extern struct vm_struct *get_vm_area_caller(unsigned long size, |
| 162 | unsigned long flags, const void *caller); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, |
| 164 | unsigned long flags, |
| 165 | unsigned long start, unsigned long end, |
| 166 | const void *caller); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 167 | void free_vm_area(struct vm_struct *area); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | extern struct vm_struct *remove_vm_area(const void *addr); |
| 169 | extern struct vm_struct *find_vm_area(const void *addr); |
| 170 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | #ifdef CONFIG_MMU |
| 172 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, |
| 173 | pgprot_t prot, struct page **pages); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 174 | int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, |
| 175 | struct page **pages); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); |
| 177 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 178 | static inline void set_vm_flush_reset_perms(void *addr) |
| 179 | { |
| 180 | struct vm_struct *vm = find_vm_area(addr); |
| 181 | |
| 182 | if (vm) |
| 183 | vm->flags |= VM_FLUSH_RESET_PERMS; |
| 184 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | #else |
| 186 | static inline int |
| 187 | map_kernel_range_noflush(unsigned long start, unsigned long size, |
| 188 | pgprot_t prot, struct page **pages) |
| 189 | { |
| 190 | return size >> PAGE_SHIFT; |
| 191 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 192 | #define map_kernel_range map_kernel_range_noflush |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | static inline void |
| 194 | unmap_kernel_range_noflush(unsigned long addr, unsigned long size) |
| 195 | { |
| 196 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 197 | #define unmap_kernel_range unmap_kernel_range_noflush |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 198 | static inline void set_vm_flush_reset_perms(void *addr) |
| 199 | { |
| 200 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | #endif |
| 202 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 203 | /* for /dev/kmem */ |
| 204 | extern long vread(char *buf, char *addr, unsigned long count); |
| 205 | extern long vwrite(char *buf, char *addr, unsigned long count); |
| 206 | |
| 207 | /* |
| 208 | * Internals. Dont't use.. |
| 209 | */ |
| 210 | extern struct list_head vmap_area_list; |
| 211 | extern __init void vm_area_add_early(struct vm_struct *vm); |
| 212 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
| 213 | |
| 214 | #ifdef CONFIG_SMP |
| 215 | # ifdef CONFIG_MMU |
| 216 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
| 217 | const size_t *sizes, int nr_vms, |
| 218 | size_t align); |
| 219 | |
| 220 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); |
| 221 | # else |
| 222 | static inline struct vm_struct ** |
| 223 | pcpu_get_vm_areas(const unsigned long *offsets, |
| 224 | const size_t *sizes, int nr_vms, |
| 225 | size_t align) |
| 226 | { |
| 227 | return NULL; |
| 228 | } |
| 229 | |
| 230 | static inline void |
| 231 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) |
| 232 | { |
| 233 | } |
| 234 | # endif |
| 235 | #endif |
| 236 | |
| 237 | #ifdef CONFIG_MMU |
| 238 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) |
| 239 | #else |
| 240 | #define VMALLOC_TOTAL 0UL |
| 241 | #endif |
| 242 | |
| 243 | int register_vmap_purge_notifier(struct notifier_block *nb); |
| 244 | int unregister_vmap_purge_notifier(struct notifier_block *nb); |
| 245 | |
| 246 | #endif /* _LINUX_VMALLOC_H */ |