blob: 0d429a102d417955192d610b37648966642f0b30 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
6#include <linux/bug.h>
7#include <linux/mm_types.h>
8
David Brazdil0f672f62019-12-10 10:32:29 +00009#ifdef CONFIG_MMU
10
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011/*
12 * swapcache pages are stored in the swapper_space radix tree. We want to
13 * get good packing density in that tree, so the index should be dense in
14 * the low-order bits.
15 *
16 * We arrange the `type' and `offset' fields so that `type' is at the seven
17 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
18 * remaining bits. Although `type' itself needs only five bits, we allow for
19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
20 *
21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
22 */
David Brazdil0f672f62019-12-10 10:32:29 +000023#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
24#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025
Olivier Deprez157378f2022-04-04 15:47:50 +020026/* Clear all flags but only keep swp_entry_t related information */
27static inline pte_t pte_swp_clear_flags(pte_t pte)
28{
29 if (pte_swp_soft_dirty(pte))
30 pte = pte_swp_clear_soft_dirty(pte);
31 if (pte_swp_uffd_wp(pte))
32 pte = pte_swp_clear_uffd_wp(pte);
33 return pte;
34}
35
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036/*
37 * Store a type+offset into a swp_entry_t in an arch-independent format
38 */
39static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
40{
41 swp_entry_t ret;
42
David Brazdil0f672f62019-12-10 10:32:29 +000043 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 return ret;
45}
46
47/*
48 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
49 * arch-independent format
50 */
51static inline unsigned swp_type(swp_entry_t entry)
52{
David Brazdil0f672f62019-12-10 10:32:29 +000053 return (entry.val >> SWP_TYPE_SHIFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054}
55
56/*
57 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
58 * arch-independent format
59 */
60static inline pgoff_t swp_offset(swp_entry_t entry)
61{
David Brazdil0f672f62019-12-10 10:32:29 +000062 return entry.val & SWP_OFFSET_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063}
64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065/* check whether a pte points to a swap entry */
66static inline int is_swap_pte(pte_t pte)
67{
68 return !pte_none(pte) && !pte_present(pte);
69}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070
71/*
72 * Convert the arch-dependent pte representation of a swp_entry_t into an
73 * arch-independent swp_entry_t.
74 */
75static inline swp_entry_t pte_to_swp_entry(pte_t pte)
76{
77 swp_entry_t arch_entry;
78
Olivier Deprez157378f2022-04-04 15:47:50 +020079 pte = pte_swp_clear_flags(pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080 arch_entry = __pte_to_swp_entry(pte);
81 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
82}
83
84/*
85 * Convert the arch-independent representation of a swp_entry_t into the
86 * arch-dependent pte representation.
87 */
88static inline pte_t swp_entry_to_pte(swp_entry_t entry)
89{
90 swp_entry_t arch_entry;
91
92 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
93 return __swp_entry_to_pte(arch_entry);
94}
95
96static inline swp_entry_t radix_to_swp_entry(void *arg)
97{
98 swp_entry_t entry;
99
David Brazdil0f672f62019-12-10 10:32:29 +0000100 entry.val = xa_to_value(arg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101 return entry;
102}
103
104static inline void *swp_to_radix_entry(swp_entry_t entry)
105{
David Brazdil0f672f62019-12-10 10:32:29 +0000106 return xa_mk_value(entry.val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107}
108
109#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
110static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
111{
112 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
113 page_to_pfn(page));
114}
115
116static inline bool is_device_private_entry(swp_entry_t entry)
117{
118 int type = swp_type(entry);
119 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
120}
121
122static inline void make_device_private_entry_read(swp_entry_t *entry)
123{
124 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
125}
126
127static inline bool is_write_device_private_entry(swp_entry_t entry)
128{
129 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
130}
131
132static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
133{
134 return swp_offset(entry);
135}
136
137static inline struct page *device_private_entry_to_page(swp_entry_t entry)
138{
139 return pfn_to_page(swp_offset(entry));
140}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141#else /* CONFIG_DEVICE_PRIVATE */
142static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
143{
144 return swp_entry(0, 0);
145}
146
147static inline void make_device_private_entry_read(swp_entry_t *entry)
148{
149}
150
151static inline bool is_device_private_entry(swp_entry_t entry)
152{
153 return false;
154}
155
156static inline bool is_write_device_private_entry(swp_entry_t entry)
157{
158 return false;
159}
160
161static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
162{
163 return 0;
164}
165
166static inline struct page *device_private_entry_to_page(swp_entry_t entry)
167{
168 return NULL;
169}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000170#endif /* CONFIG_DEVICE_PRIVATE */
171
172#ifdef CONFIG_MIGRATION
173static inline swp_entry_t make_migration_entry(struct page *page, int write)
174{
175 BUG_ON(!PageLocked(compound_head(page)));
176
177 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
178 page_to_pfn(page));
179}
180
181static inline int is_migration_entry(swp_entry_t entry)
182{
183 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
184 swp_type(entry) == SWP_MIGRATION_WRITE);
185}
186
187static inline int is_write_migration_entry(swp_entry_t entry)
188{
189 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
190}
191
192static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
193{
194 return swp_offset(entry);
195}
196
197static inline struct page *migration_entry_to_page(swp_entry_t entry)
198{
199 struct page *p = pfn_to_page(swp_offset(entry));
200 /*
201 * Any use of migration entries may only occur while the
202 * corresponding page is locked
203 */
204 BUG_ON(!PageLocked(compound_head(p)));
205 return p;
206}
207
208static inline void make_migration_entry_read(swp_entry_t *entry)
209{
210 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
211}
212
213extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
214 spinlock_t *ptl);
215extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
216 unsigned long address);
217extern void migration_entry_wait_huge(struct vm_area_struct *vma,
218 struct mm_struct *mm, pte_t *pte);
219#else
220
221#define make_migration_entry(page, write) swp_entry(0, 0)
222static inline int is_migration_entry(swp_entry_t swp)
223{
224 return 0;
225}
226
227static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
228{
229 return 0;
230}
231
232static inline struct page *migration_entry_to_page(swp_entry_t entry)
233{
234 return NULL;
235}
236
237static inline void make_migration_entry_read(swp_entry_t *entryp) { }
238static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
239 spinlock_t *ptl) { }
240static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
241 unsigned long address) { }
242static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
243 struct mm_struct *mm, pte_t *pte) { }
244static inline int is_write_migration_entry(swp_entry_t entry)
245{
246 return 0;
247}
248
249#endif
250
251struct page_vma_mapped_walk;
252
253#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
254extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
255 struct page *page);
256
257extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
258 struct page *new);
259
260extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
261
262static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
263{
264 swp_entry_t arch_entry;
265
266 if (pmd_swp_soft_dirty(pmd))
267 pmd = pmd_swp_clear_soft_dirty(pmd);
Olivier Deprez157378f2022-04-04 15:47:50 +0200268 if (pmd_swp_uffd_wp(pmd))
269 pmd = pmd_swp_clear_uffd_wp(pmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 arch_entry = __pmd_to_swp_entry(pmd);
271 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
272}
273
274static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
275{
276 swp_entry_t arch_entry;
277
278 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
279 return __swp_entry_to_pmd(arch_entry);
280}
281
282static inline int is_pmd_migration_entry(pmd_t pmd)
283{
284 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
285}
286#else
287static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
288 struct page *page)
289{
290 BUILD_BUG();
291}
292
293static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
294 struct page *new)
295{
296 BUILD_BUG();
297}
298
299static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
300
301static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
302{
303 return swp_entry(0, 0);
304}
305
306static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
307{
308 return __pmd(0);
309}
310
311static inline int is_pmd_migration_entry(pmd_t pmd)
312{
313 return 0;
314}
315#endif
316
317#ifdef CONFIG_MEMORY_FAILURE
318
319extern atomic_long_t num_poisoned_pages __read_mostly;
320
321/*
322 * Support for hardware poisoned pages
323 */
324static inline swp_entry_t make_hwpoison_entry(struct page *page)
325{
326 BUG_ON(!PageLocked(page));
327 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
328}
329
330static inline int is_hwpoison_entry(swp_entry_t entry)
331{
332 return swp_type(entry) == SWP_HWPOISON;
333}
334
335static inline void num_poisoned_pages_inc(void)
336{
337 atomic_long_inc(&num_poisoned_pages);
338}
339
340static inline void num_poisoned_pages_dec(void)
341{
342 atomic_long_dec(&num_poisoned_pages);
343}
344
345#else
346
347static inline swp_entry_t make_hwpoison_entry(struct page *page)
348{
349 return swp_entry(0, 0);
350}
351
352static inline int is_hwpoison_entry(swp_entry_t swp)
353{
354 return 0;
355}
356
357static inline void num_poisoned_pages_inc(void)
358{
359}
360#endif
361
Olivier Deprez0e641232021-09-23 10:07:05 +0200362#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
363 defined(CONFIG_DEVICE_PRIVATE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364static inline int non_swap_entry(swp_entry_t entry)
365{
366 return swp_type(entry) >= MAX_SWAPFILES;
367}
368#else
369static inline int non_swap_entry(swp_entry_t entry)
370{
371 return 0;
372}
373#endif
374
David Brazdil0f672f62019-12-10 10:32:29 +0000375#endif /* CONFIG_MMU */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376#endif /* _LINUX_SWAPOPS_H */