Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_SWAPOPS_H |
| 3 | #define _LINUX_SWAPOPS_H |
| 4 | |
| 5 | #include <linux/radix-tree.h> |
| 6 | #include <linux/bug.h> |
| 7 | #include <linux/mm_types.h> |
| 8 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9 | #ifdef CONFIG_MMU |
| 10 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | /* |
| 12 | * swapcache pages are stored in the swapper_space radix tree. We want to |
| 13 | * get good packing density in that tree, so the index should be dense in |
| 14 | * the low-order bits. |
| 15 | * |
| 16 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
| 17 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
| 18 | * remaining bits. Although `type' itself needs only five bits, we allow for |
| 19 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). |
| 20 | * |
| 21 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
| 22 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 23 | #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) |
| 24 | #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * Store a type+offset into a swp_entry_t in an arch-independent format |
| 28 | */ |
| 29 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) |
| 30 | { |
| 31 | swp_entry_t ret; |
| 32 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 33 | ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | return ret; |
| 35 | } |
| 36 | |
| 37 | /* |
| 38 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in |
| 39 | * arch-independent format |
| 40 | */ |
| 41 | static inline unsigned swp_type(swp_entry_t entry) |
| 42 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 43 | return (entry.val >> SWP_TYPE_SHIFT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* |
| 47 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in |
| 48 | * arch-independent format |
| 49 | */ |
| 50 | static inline pgoff_t swp_offset(swp_entry_t entry) |
| 51 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 52 | return entry.val & SWP_OFFSET_MASK; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | } |
| 54 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | /* check whether a pte points to a swap entry */ |
| 56 | static inline int is_swap_pte(pte_t pte) |
| 57 | { |
| 58 | return !pte_none(pte) && !pte_present(pte); |
| 59 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * Convert the arch-dependent pte representation of a swp_entry_t into an |
| 63 | * arch-independent swp_entry_t. |
| 64 | */ |
| 65 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) |
| 66 | { |
| 67 | swp_entry_t arch_entry; |
| 68 | |
| 69 | if (pte_swp_soft_dirty(pte)) |
| 70 | pte = pte_swp_clear_soft_dirty(pte); |
| 71 | arch_entry = __pte_to_swp_entry(pte); |
| 72 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Convert the arch-independent representation of a swp_entry_t into the |
| 77 | * arch-dependent pte representation. |
| 78 | */ |
| 79 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) |
| 80 | { |
| 81 | swp_entry_t arch_entry; |
| 82 | |
| 83 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
| 84 | return __swp_entry_to_pte(arch_entry); |
| 85 | } |
| 86 | |
| 87 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
| 88 | { |
| 89 | swp_entry_t entry; |
| 90 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 91 | entry.val = xa_to_value(arg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 92 | return entry; |
| 93 | } |
| 94 | |
| 95 | static inline void *swp_to_radix_entry(swp_entry_t entry) |
| 96 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | return xa_mk_value(entry.val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
| 101 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) |
| 102 | { |
| 103 | return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, |
| 104 | page_to_pfn(page)); |
| 105 | } |
| 106 | |
| 107 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 108 | { |
| 109 | int type = swp_type(entry); |
| 110 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; |
| 111 | } |
| 112 | |
| 113 | static inline void make_device_private_entry_read(swp_entry_t *entry) |
| 114 | { |
| 115 | *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); |
| 116 | } |
| 117 | |
| 118 | static inline bool is_write_device_private_entry(swp_entry_t entry) |
| 119 | { |
| 120 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); |
| 121 | } |
| 122 | |
| 123 | static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) |
| 124 | { |
| 125 | return swp_offset(entry); |
| 126 | } |
| 127 | |
| 128 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
| 129 | { |
| 130 | return pfn_to_page(swp_offset(entry)); |
| 131 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | #else /* CONFIG_DEVICE_PRIVATE */ |
| 133 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) |
| 134 | { |
| 135 | return swp_entry(0, 0); |
| 136 | } |
| 137 | |
| 138 | static inline void make_device_private_entry_read(swp_entry_t *entry) |
| 139 | { |
| 140 | } |
| 141 | |
| 142 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 143 | { |
| 144 | return false; |
| 145 | } |
| 146 | |
| 147 | static inline bool is_write_device_private_entry(swp_entry_t entry) |
| 148 | { |
| 149 | return false; |
| 150 | } |
| 151 | |
| 152 | static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) |
| 153 | { |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
| 158 | { |
| 159 | return NULL; |
| 160 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | #endif /* CONFIG_DEVICE_PRIVATE */ |
| 162 | |
| 163 | #ifdef CONFIG_MIGRATION |
| 164 | static inline swp_entry_t make_migration_entry(struct page *page, int write) |
| 165 | { |
| 166 | BUG_ON(!PageLocked(compound_head(page))); |
| 167 | |
| 168 | return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, |
| 169 | page_to_pfn(page)); |
| 170 | } |
| 171 | |
| 172 | static inline int is_migration_entry(swp_entry_t entry) |
| 173 | { |
| 174 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || |
| 175 | swp_type(entry) == SWP_MIGRATION_WRITE); |
| 176 | } |
| 177 | |
| 178 | static inline int is_write_migration_entry(swp_entry_t entry) |
| 179 | { |
| 180 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); |
| 181 | } |
| 182 | |
| 183 | static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) |
| 184 | { |
| 185 | return swp_offset(entry); |
| 186 | } |
| 187 | |
| 188 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
| 189 | { |
| 190 | struct page *p = pfn_to_page(swp_offset(entry)); |
| 191 | /* |
| 192 | * Any use of migration entries may only occur while the |
| 193 | * corresponding page is locked |
| 194 | */ |
| 195 | BUG_ON(!PageLocked(compound_head(p))); |
| 196 | return p; |
| 197 | } |
| 198 | |
| 199 | static inline void make_migration_entry_read(swp_entry_t *entry) |
| 200 | { |
| 201 | *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); |
| 202 | } |
| 203 | |
| 204 | extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 205 | spinlock_t *ptl); |
| 206 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 207 | unsigned long address); |
| 208 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, |
| 209 | struct mm_struct *mm, pte_t *pte); |
| 210 | #else |
| 211 | |
| 212 | #define make_migration_entry(page, write) swp_entry(0, 0) |
| 213 | static inline int is_migration_entry(swp_entry_t swp) |
| 214 | { |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) |
| 219 | { |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
| 224 | { |
| 225 | return NULL; |
| 226 | } |
| 227 | |
| 228 | static inline void make_migration_entry_read(swp_entry_t *entryp) { } |
| 229 | static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 230 | spinlock_t *ptl) { } |
| 231 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 232 | unsigned long address) { } |
| 233 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, |
| 234 | struct mm_struct *mm, pte_t *pte) { } |
| 235 | static inline int is_write_migration_entry(swp_entry_t entry) |
| 236 | { |
| 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | #endif |
| 241 | |
| 242 | struct page_vma_mapped_walk; |
| 243 | |
| 244 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 245 | extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
| 246 | struct page *page); |
| 247 | |
| 248 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 249 | struct page *new); |
| 250 | |
| 251 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); |
| 252 | |
| 253 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 254 | { |
| 255 | swp_entry_t arch_entry; |
| 256 | |
| 257 | if (pmd_swp_soft_dirty(pmd)) |
| 258 | pmd = pmd_swp_clear_soft_dirty(pmd); |
| 259 | arch_entry = __pmd_to_swp_entry(pmd); |
| 260 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 261 | } |
| 262 | |
| 263 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 264 | { |
| 265 | swp_entry_t arch_entry; |
| 266 | |
| 267 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
| 268 | return __swp_entry_to_pmd(arch_entry); |
| 269 | } |
| 270 | |
| 271 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 272 | { |
| 273 | return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); |
| 274 | } |
| 275 | #else |
| 276 | static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
| 277 | struct page *page) |
| 278 | { |
| 279 | BUILD_BUG(); |
| 280 | } |
| 281 | |
| 282 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 283 | struct page *new) |
| 284 | { |
| 285 | BUILD_BUG(); |
| 286 | } |
| 287 | |
| 288 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } |
| 289 | |
| 290 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 291 | { |
| 292 | return swp_entry(0, 0); |
| 293 | } |
| 294 | |
| 295 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 296 | { |
| 297 | return __pmd(0); |
| 298 | } |
| 299 | |
| 300 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 301 | { |
| 302 | return 0; |
| 303 | } |
| 304 | #endif |
| 305 | |
| 306 | #ifdef CONFIG_MEMORY_FAILURE |
| 307 | |
| 308 | extern atomic_long_t num_poisoned_pages __read_mostly; |
| 309 | |
| 310 | /* |
| 311 | * Support for hardware poisoned pages |
| 312 | */ |
| 313 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 314 | { |
| 315 | BUG_ON(!PageLocked(page)); |
| 316 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); |
| 317 | } |
| 318 | |
| 319 | static inline int is_hwpoison_entry(swp_entry_t entry) |
| 320 | { |
| 321 | return swp_type(entry) == SWP_HWPOISON; |
| 322 | } |
| 323 | |
| 324 | static inline void num_poisoned_pages_inc(void) |
| 325 | { |
| 326 | atomic_long_inc(&num_poisoned_pages); |
| 327 | } |
| 328 | |
| 329 | static inline void num_poisoned_pages_dec(void) |
| 330 | { |
| 331 | atomic_long_dec(&num_poisoned_pages); |
| 332 | } |
| 333 | |
| 334 | #else |
| 335 | |
| 336 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 337 | { |
| 338 | return swp_entry(0, 0); |
| 339 | } |
| 340 | |
| 341 | static inline int is_hwpoison_entry(swp_entry_t swp) |
| 342 | { |
| 343 | return 0; |
| 344 | } |
| 345 | |
| 346 | static inline void num_poisoned_pages_inc(void) |
| 347 | { |
| 348 | } |
| 349 | #endif |
| 350 | |
| 351 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) |
| 352 | static inline int non_swap_entry(swp_entry_t entry) |
| 353 | { |
| 354 | return swp_type(entry) >= MAX_SWAPFILES; |
| 355 | } |
| 356 | #else |
| 357 | static inline int non_swap_entry(swp_entry_t entry) |
| 358 | { |
| 359 | return 0; |
| 360 | } |
| 361 | #endif |
| 362 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 363 | #endif /* CONFIG_MMU */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | #endif /* _LINUX_SWAPOPS_H */ |