Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_MMAN_H |
| 3 | #define _LINUX_MMAN_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/percpu_counter.h> |
| 7 | |
| 8 | #include <linux/atomic.h> |
| 9 | #include <uapi/linux/mman.h> |
| 10 | |
| 11 | /* |
| 12 | * Arrange for legacy / undefined architecture specific flags to be |
| 13 | * ignored by mmap handling code. |
| 14 | */ |
| 15 | #ifndef MAP_32BIT |
| 16 | #define MAP_32BIT 0 |
| 17 | #endif |
| 18 | #ifndef MAP_HUGE_2MB |
| 19 | #define MAP_HUGE_2MB 0 |
| 20 | #endif |
| 21 | #ifndef MAP_HUGE_1GB |
| 22 | #define MAP_HUGE_1GB 0 |
| 23 | #endif |
| 24 | #ifndef MAP_UNINITIALIZED |
| 25 | #define MAP_UNINITIALIZED 0 |
| 26 | #endif |
| 27 | #ifndef MAP_SYNC |
| 28 | #define MAP_SYNC 0 |
| 29 | #endif |
| 30 | |
| 31 | /* |
| 32 | * The historical set of flags that all mmap implementations implicitly |
| 33 | * support when a ->mmap_validate() op is not provided in file_operations. |
| 34 | */ |
| 35 | #define LEGACY_MAP_MASK (MAP_SHARED \ |
| 36 | | MAP_PRIVATE \ |
| 37 | | MAP_FIXED \ |
| 38 | | MAP_ANONYMOUS \ |
| 39 | | MAP_DENYWRITE \ |
| 40 | | MAP_EXECUTABLE \ |
| 41 | | MAP_UNINITIALIZED \ |
| 42 | | MAP_GROWSDOWN \ |
| 43 | | MAP_LOCKED \ |
| 44 | | MAP_NORESERVE \ |
| 45 | | MAP_POPULATE \ |
| 46 | | MAP_NONBLOCK \ |
| 47 | | MAP_STACK \ |
| 48 | | MAP_HUGETLB \ |
| 49 | | MAP_32BIT \ |
| 50 | | MAP_HUGE_2MB \ |
| 51 | | MAP_HUGE_1GB) |
| 52 | |
| 53 | extern int sysctl_overcommit_memory; |
| 54 | extern int sysctl_overcommit_ratio; |
| 55 | extern unsigned long sysctl_overcommit_kbytes; |
| 56 | extern struct percpu_counter vm_committed_as; |
| 57 | |
| 58 | #ifdef CONFIG_SMP |
| 59 | extern s32 vm_committed_as_batch; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 60 | extern void mm_compute_batch(int overcommit_policy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | #else |
| 62 | #define vm_committed_as_batch 0 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 63 | static inline void mm_compute_batch(int overcommit_policy) |
| 64 | { |
| 65 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | #endif |
| 67 | |
| 68 | unsigned long vm_memory_committed(void); |
| 69 | |
| 70 | static inline void vm_acct_memory(long pages) |
| 71 | { |
| 72 | percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); |
| 73 | } |
| 74 | |
| 75 | static inline void vm_unacct_memory(long pages) |
| 76 | { |
| 77 | vm_acct_memory(-pages); |
| 78 | } |
| 79 | |
| 80 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 81 | * Allow architectures to handle additional protection and flag bits. The |
| 82 | * overriding macros must be defined in the arch-specific asm/mman.h file. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | */ |
| 84 | |
| 85 | #ifndef arch_calc_vm_prot_bits |
| 86 | #define arch_calc_vm_prot_bits(prot, pkey) 0 |
| 87 | #endif |
| 88 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 89 | #ifndef arch_calc_vm_flag_bits |
| 90 | #define arch_calc_vm_flag_bits(flags) 0 |
| 91 | #endif |
| 92 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | #ifndef arch_vm_get_page_prot |
| 94 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) |
| 95 | #endif |
| 96 | |
| 97 | #ifndef arch_validate_prot |
| 98 | /* |
| 99 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
| 100 | * already been masked out. |
| 101 | * |
| 102 | * Returns true if the prot flags are valid |
| 103 | */ |
| 104 | static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) |
| 105 | { |
| 106 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
| 107 | } |
| 108 | #define arch_validate_prot arch_validate_prot |
| 109 | #endif |
| 110 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 111 | #ifndef arch_validate_flags |
| 112 | /* |
| 113 | * This is called from mmap() and mprotect() with the updated vma->vm_flags. |
| 114 | * |
| 115 | * Returns true if the VM_* flags are valid. |
| 116 | */ |
| 117 | static inline bool arch_validate_flags(unsigned long flags) |
| 118 | { |
| 119 | return true; |
| 120 | } |
| 121 | #define arch_validate_flags arch_validate_flags |
| 122 | #endif |
| 123 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 124 | /* |
| 125 | * Optimisation macro. It is equivalent to: |
| 126 | * (x & bit1) ? bit2 : 0 |
| 127 | * but this version is faster. |
| 128 | * ("bit1" and "bit2" must be single bits) |
| 129 | */ |
| 130 | #define _calc_vm_trans(x, bit1, bit2) \ |
| 131 | ((!(bit1) || !(bit2)) ? 0 : \ |
| 132 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
| 133 | : ((x) & (bit1)) / ((bit1) / (bit2)))) |
| 134 | |
| 135 | /* |
| 136 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
| 137 | */ |
| 138 | static inline unsigned long |
| 139 | calc_vm_prot_bits(unsigned long prot, unsigned long pkey) |
| 140 | { |
| 141 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
| 142 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
| 143 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
| 144 | arch_calc_vm_prot_bits(prot, pkey); |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
| 149 | */ |
| 150 | static inline unsigned long |
| 151 | calc_vm_flag_bits(unsigned long flags) |
| 152 | { |
| 153 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
| 154 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
| 155 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 156 | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | |
| 157 | arch_calc_vm_flag_bits(flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | unsigned long vm_commit_limit(void); |
| 161 | #endif /* _LINUX_MMAN_H */ |