blob: f3a70dc7c59429b5dab36b7126cd7a37c80a433e [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
8#include <asm/bug.h>
9#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020012#include <asm/mte.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include <asm/pgtable-hwdef.h>
14#include <asm/pgtable-prot.h>
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <asm/tlbflush.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016
17/*
18 * VMALLOC range.
19 *
20 * VMALLOC_START: beginning of the kernel vmalloc space
Olivier Deprez157378f2022-04-04 15:47:50 +020021 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022 * and fixed mappings
23 */
24#define VMALLOC_START (MODULES_END)
David Brazdil0f672f62019-12-10 10:32:29 +000025#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026
Olivier Deprez0e641232021-09-23 10:07:05 +020027#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029#define FIRST_USER_ADDRESS 0UL
30
31#ifndef __ASSEMBLY__
32
33#include <asm/cmpxchg.h>
34#include <asm/fixmap.h>
35#include <linux/mmdebug.h>
36#include <linux/mm_types.h>
37#include <linux/sched.h>
38
Olivier Deprez157378f2022-04-04 15:47:50 +020039#ifdef CONFIG_TRANSPARENT_HUGEPAGE
40#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
41
42/* Set stride and tlb_level in flush_*_tlb_range */
43#define flush_pmd_tlb_range(vma, addr, end) \
44 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
45#define flush_pud_tlb_range(vma, addr, end) \
46 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
47#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
48
49/*
50 * Outside of a few very special situations (e.g. hibernation), we always
51 * use broadcast TLB invalidation instructions, therefore a spurious page
52 * fault on one CPU which has been handled concurrently by another CPU
53 * does not need to perform additional invalidation.
54 */
55#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056
57/*
58 * ZERO_PAGE is a global shared page that is always zero: used
59 * for zero-mapped memory areas etc..
60 */
61extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
63
Olivier Deprez157378f2022-04-04 15:47:50 +020064#define pte_ERROR(e) \
65 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
67/*
68 * Macros to convert between a physical address and its placement in a
69 * page table entry, taking care of 52-bit addresses.
70 */
71#ifdef CONFIG_ARM64_PA_BITS_52
Olivier Deprez157378f2022-04-04 15:47:50 +020072static inline phys_addr_t __pte_to_phys(pte_t pte)
73{
74 return (pte_val(pte) & PTE_ADDR_LOW) |
75 ((pte_val(pte) & PTE_ADDR_HIGH) << 36);
76}
77static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
78{
79 return (phys | (phys >> 36)) & PTE_ADDR_MASK;
80}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081#else
82#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
83#define __phys_to_pte_val(phys) (phys)
84#endif
85
86#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
87#define pfn_pte(pfn,prot) \
88 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
89
90#define pte_none(pte) (!pte_val(pte))
91#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
92#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
93
94/*
95 * The following only work if pte_present(). Undefined behaviour otherwise.
96 */
97#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
98#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
99#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
100#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
101#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
102#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
David Brazdil0f672f62019-12-10 10:32:29 +0000103#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
Olivier Deprez157378f2022-04-04 15:47:50 +0200104#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
105 PTE_ATTRINDX(MT_NORMAL_TAGGED))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106
107#define pte_cont_addr_end(addr, end) \
108({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
109 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
110})
111
112#define pmd_cont_addr_end(addr, end) \
113({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
114 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
115})
116
117#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
118#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
119#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
120
121#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122#define pte_valid_not_user(pte) \
Olivier Deprez0e641232021-09-23 10:07:05 +0200123 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124#define pte_valid_user(pte) \
125 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
126
127/*
128 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
129 * so that we don't erroneously return false for pages that have been
130 * remapped as PROT_NONE but are yet to be flushed from the TLB.
Olivier Deprez0e641232021-09-23 10:07:05 +0200131 * Note that we can't make any assumptions based on the state of the access
132 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
133 * TLB.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 */
135#define pte_accessible(mm, pte) \
Olivier Deprez0e641232021-09-23 10:07:05 +0200136 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
138/*
139 * p??_access_permitted() is true for valid user mappings (subject to the
Olivier Deprez0e641232021-09-23 10:07:05 +0200140 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
141 * set.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 */
143#define pte_access_permitted(pte, write) \
144 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
145#define pmd_access_permitted(pmd, write) \
146 (pte_access_permitted(pmd_pte(pmd), (write)))
147#define pud_access_permitted(pud, write) \
148 (pte_access_permitted(pud_pte(pud), (write)))
149
150static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
151{
152 pte_val(pte) &= ~pgprot_val(prot);
153 return pte;
154}
155
156static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
157{
158 pte_val(pte) |= pgprot_val(prot);
159 return pte;
160}
161
Olivier Deprez157378f2022-04-04 15:47:50 +0200162static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
163{
164 pmd_val(pmd) &= ~pgprot_val(prot);
165 return pmd;
166}
167
168static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
169{
170 pmd_val(pmd) |= pgprot_val(prot);
171 return pmd;
172}
173
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174static inline pte_t pte_mkwrite(pte_t pte)
175{
176 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
177 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
178 return pte;
179}
180
181static inline pte_t pte_mkclean(pte_t pte)
182{
183 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
184 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
185
186 return pte;
187}
188
189static inline pte_t pte_mkdirty(pte_t pte)
190{
191 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
192
193 if (pte_write(pte))
194 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
195
196 return pte;
197}
198
Olivier Deprez0e641232021-09-23 10:07:05 +0200199static inline pte_t pte_wrprotect(pte_t pte)
200{
201 /*
202 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
203 * clear), set the PTE_DIRTY bit.
204 */
205 if (pte_hw_dirty(pte))
206 pte = pte_mkdirty(pte);
207
208 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
209 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
210 return pte;
211}
212
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213static inline pte_t pte_mkold(pte_t pte)
214{
215 return clear_pte_bit(pte, __pgprot(PTE_AF));
216}
217
218static inline pte_t pte_mkyoung(pte_t pte)
219{
220 return set_pte_bit(pte, __pgprot(PTE_AF));
221}
222
223static inline pte_t pte_mkspecial(pte_t pte)
224{
225 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
226}
227
228static inline pte_t pte_mkcont(pte_t pte)
229{
230 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
231 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
232}
233
234static inline pte_t pte_mknoncont(pte_t pte)
235{
236 return clear_pte_bit(pte, __pgprot(PTE_CONT));
237}
238
239static inline pte_t pte_mkpresent(pte_t pte)
240{
241 return set_pte_bit(pte, __pgprot(PTE_VALID));
242}
243
244static inline pmd_t pmd_mkcont(pmd_t pmd)
245{
246 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
247}
248
David Brazdil0f672f62019-12-10 10:32:29 +0000249static inline pte_t pte_mkdevmap(pte_t pte)
250{
251 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
252}
253
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254static inline void set_pte(pte_t *ptep, pte_t pte)
255{
256 WRITE_ONCE(*ptep, pte);
257
258 /*
259 * Only if the new pte is valid and kernel, otherwise TLB maintenance
260 * or update_mmu_cache() have the necessary barriers.
261 */
David Brazdil0f672f62019-12-10 10:32:29 +0000262 if (pte_valid_not_user(pte)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263 dsb(ishst);
David Brazdil0f672f62019-12-10 10:32:29 +0000264 isb();
265 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266}
267
268extern void __sync_icache_dcache(pte_t pteval);
269
270/*
271 * PTE bits configuration in the presence of hardware Dirty Bit Management
272 * (PTE_WRITE == PTE_DBM):
273 *
274 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
275 * 0 0 | 1 0 0
276 * 0 1 | 1 1 0
277 * 1 0 | 1 0 1
278 * 1 1 | 0 1 x
279 *
280 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
281 * the page fault mechanism. Checking the dirty status of a pte becomes:
282 *
283 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
284 */
David Brazdil0f672f62019-12-10 10:32:29 +0000285
286static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
287 pte_t pte)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288{
289 pte_t old_pte;
290
David Brazdil0f672f62019-12-10 10:32:29 +0000291 if (!IS_ENABLED(CONFIG_DEBUG_VM))
292 return;
293
294 old_pte = READ_ONCE(*ptep);
295
296 if (!pte_valid(old_pte) || !pte_valid(pte))
297 return;
298 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
299 return;
300
301 /*
302 * Check for potential race with hardware updates of the pte
303 * (ptep_set_access_flags safely changes valid ptes without going
304 * through an invalid entry).
305 */
306 VM_WARN_ONCE(!pte_young(pte),
307 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
308 __func__, pte_val(old_pte), pte_val(pte));
309 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
310 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
311 __func__, pte_val(old_pte), pte_val(pte));
312}
313
314static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
315 pte_t *ptep, pte_t pte)
316{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
318 __sync_icache_dcache(pte);
319
Olivier Deprez157378f2022-04-04 15:47:50 +0200320 if (system_supports_mte() &&
321 pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
322 mte_sync_tags(ptep, pte);
323
David Brazdil0f672f62019-12-10 10:32:29 +0000324 __check_racy_pte_update(mm, ptep, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325
326 set_pte(ptep, pte);
327}
328
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329/*
330 * Huge pte definitions.
331 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000332#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
333
334/*
335 * Hugetlb definitions.
336 */
337#define HUGE_MAX_HSTATE 4
338#define HPAGE_SHIFT PMD_SHIFT
339#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
340#define HPAGE_MASK (~(HPAGE_SIZE - 1))
341#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
342
343static inline pte_t pgd_pte(pgd_t pgd)
344{
345 return __pte(pgd_val(pgd));
346}
347
Olivier Deprez157378f2022-04-04 15:47:50 +0200348static inline pte_t p4d_pte(p4d_t p4d)
349{
350 return __pte(p4d_val(p4d));
351}
352
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353static inline pte_t pud_pte(pud_t pud)
354{
355 return __pte(pud_val(pud));
356}
357
David Brazdil0f672f62019-12-10 10:32:29 +0000358static inline pud_t pte_pud(pte_t pte)
359{
360 return __pud(pte_val(pte));
361}
362
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363static inline pmd_t pud_pmd(pud_t pud)
364{
365 return __pmd(pud_val(pud));
366}
367
368static inline pte_t pmd_pte(pmd_t pmd)
369{
370 return __pte(pmd_val(pmd));
371}
372
373static inline pmd_t pte_pmd(pte_t pte)
374{
375 return __pmd(pte_val(pte));
376}
377
David Brazdil0f672f62019-12-10 10:32:29 +0000378static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379{
David Brazdil0f672f62019-12-10 10:32:29 +0000380 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
381}
382
383static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
384{
385 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386}
387
388#ifdef CONFIG_NUMA_BALANCING
389/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200390 * See the comment in include/linux/pgtable.h
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 */
392static inline int pte_protnone(pte_t pte)
393{
394 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
395}
396
397static inline int pmd_protnone(pmd_t pmd)
398{
399 return pte_protnone(pmd_pte(pmd));
400}
401#endif
402
Olivier Deprez157378f2022-04-04 15:47:50 +0200403#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
404
405static inline int pmd_present(pmd_t pmd)
406{
407 return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
408}
409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410/*
411 * THP definitions.
412 */
413
414#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Olivier Deprez157378f2022-04-04 15:47:50 +0200415static inline int pmd_trans_huge(pmd_t pmd)
416{
417 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
418}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
420
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
422#define pmd_young(pmd) pte_young(pmd_pte(pmd))
David Brazdil0f672f62019-12-10 10:32:29 +0000423#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
425#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
426#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
427#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
428#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
429#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
Olivier Deprez157378f2022-04-04 15:47:50 +0200430
431static inline pmd_t pmd_mkinvalid(pmd_t pmd)
432{
433 pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
434 pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
435
436 return pmd;
437}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438
439#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
440
441#define pmd_write(pmd) pte_write(pmd_pte(pmd))
442
443#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
444
David Brazdil0f672f62019-12-10 10:32:29 +0000445#ifdef CONFIG_TRANSPARENT_HUGEPAGE
446#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
447#endif
448static inline pmd_t pmd_mkdevmap(pmd_t pmd)
449{
450 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
451}
452
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
454#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
455#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
456#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
457#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
458
David Brazdil0f672f62019-12-10 10:32:29 +0000459#define pud_young(pud) pte_young(pud_pte(pud))
460#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461#define pud_write(pud) pte_write(pud_pte(pud))
462
David Brazdil0f672f62019-12-10 10:32:29 +0000463#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
464
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
466#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
467#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
468#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
469
470#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
471
Olivier Deprez157378f2022-04-04 15:47:50 +0200472#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
473#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
474
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
476#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
477
478#define __pgprot_modify(prot,mask,bits) \
479 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
480
Olivier Deprez157378f2022-04-04 15:47:50 +0200481#define pgprot_nx(prot) \
482 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
483
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484/*
485 * Mark the prot value as uncacheable and unbufferable.
486 */
487#define pgprot_noncached(prot) \
488 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
489#define pgprot_writecombine(prot) \
490 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
491#define pgprot_device(prot) \
492 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
Olivier Deprez157378f2022-04-04 15:47:50 +0200493#define pgprot_tagged(prot) \
494 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
495#define pgprot_mhp pgprot_tagged
David Brazdil0f672f62019-12-10 10:32:29 +0000496/*
497 * DMA allocations for non-coherent devices use what the Arm architecture calls
498 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
499 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
500 * is intended for MMIO and thus forbids speculation, preserves access size,
501 * requires strict alignment and can also force write responses to come from the
502 * endpoint.
503 */
504#define pgprot_dmacoherent(prot) \
505 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
506 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
507
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508#define __HAVE_PHYS_MEM_ACCESS_PROT
509struct file;
510extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
511 unsigned long size, pgprot_t vma_prot);
512
513#define pmd_none(pmd) (!pmd_val(pmd))
514
515#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
516
517#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
518 PMD_TYPE_TABLE)
519#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
520 PMD_TYPE_SECT)
Olivier Deprez157378f2022-04-04 15:47:50 +0200521#define pmd_leaf(pmd) pmd_sect(pmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522
523#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
David Brazdil0f672f62019-12-10 10:32:29 +0000524static inline bool pud_sect(pud_t pud) { return false; }
525static inline bool pud_table(pud_t pud) { return true; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000526#else
527#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
528 PUD_TYPE_SECT)
529#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
530 PUD_TYPE_TABLE)
531#endif
532
David Brazdil0f672f62019-12-10 10:32:29 +0000533extern pgd_t init_pg_dir[PTRS_PER_PGD];
534extern pgd_t init_pg_end[];
535extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
536extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
Olivier Deprez0e641232021-09-23 10:07:05 +0200537extern pgd_t idmap_pg_end[];
David Brazdil0f672f62019-12-10 10:32:29 +0000538extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
Olivier Deprez0e641232021-09-23 10:07:05 +0200539extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
David Brazdil0f672f62019-12-10 10:32:29 +0000540
541extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
542
543static inline bool in_swapper_pgdir(void *addr)
544{
545 return ((unsigned long)addr & PAGE_MASK) ==
546 ((unsigned long)swapper_pg_dir & PAGE_MASK);
547}
548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
550{
David Brazdil0f672f62019-12-10 10:32:29 +0000551#ifdef __PAGETABLE_PMD_FOLDED
552 if (in_swapper_pgdir(pmdp)) {
553 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
554 return;
555 }
556#endif /* __PAGETABLE_PMD_FOLDED */
557
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000558 WRITE_ONCE(*pmdp, pmd);
David Brazdil0f672f62019-12-10 10:32:29 +0000559
560 if (pmd_valid(pmd)) {
561 dsb(ishst);
562 isb();
563 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564}
565
566static inline void pmd_clear(pmd_t *pmdp)
567{
568 set_pmd(pmdp, __pmd(0));
569}
570
571static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
572{
573 return __pmd_to_phys(pmd);
574}
575
Olivier Deprez157378f2022-04-04 15:47:50 +0200576static inline unsigned long pmd_page_vaddr(pmd_t pmd)
577{
578 return (unsigned long)__va(pmd_page_paddr(pmd));
579}
David Brazdil0f672f62019-12-10 10:32:29 +0000580
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581/* Find an entry in the third-level page table. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583
584#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
585#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
586#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
587
Olivier Deprez157378f2022-04-04 15:47:50 +0200588#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589
590/* use ONLY for statically allocated translation tables */
591#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
592
593/*
594 * Conversion functions: convert a page and protection to a page entry,
595 * and a page entry and page directory to the page they refer to.
596 */
597#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
598
599#if CONFIG_PGTABLE_LEVELS > 2
600
Olivier Deprez157378f2022-04-04 15:47:50 +0200601#define pmd_ERROR(e) \
602 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000603
604#define pud_none(pud) (!pud_val(pud))
605#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
606#define pud_present(pud) pte_present(pud_pte(pud))
Olivier Deprez157378f2022-04-04 15:47:50 +0200607#define pud_leaf(pud) pud_sect(pud)
David Brazdil0f672f62019-12-10 10:32:29 +0000608#define pud_valid(pud) pte_valid(pud_pte(pud))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000609
610static inline void set_pud(pud_t *pudp, pud_t pud)
611{
David Brazdil0f672f62019-12-10 10:32:29 +0000612#ifdef __PAGETABLE_PUD_FOLDED
613 if (in_swapper_pgdir(pudp)) {
614 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
615 return;
616 }
617#endif /* __PAGETABLE_PUD_FOLDED */
618
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000619 WRITE_ONCE(*pudp, pud);
David Brazdil0f672f62019-12-10 10:32:29 +0000620
621 if (pud_valid(pud)) {
622 dsb(ishst);
623 isb();
624 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625}
626
627static inline void pud_clear(pud_t *pudp)
628{
629 set_pud(pudp, __pud(0));
630}
631
632static inline phys_addr_t pud_page_paddr(pud_t pud)
633{
634 return __pud_to_phys(pud);
635}
636
Olivier Deprez157378f2022-04-04 15:47:50 +0200637static inline unsigned long pud_page_vaddr(pud_t pud)
638{
639 return (unsigned long)__va(pud_page_paddr(pud));
640}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000641
Olivier Deprez157378f2022-04-04 15:47:50 +0200642/* Find an entry in the second-level page table. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644
645#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
646#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
647#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
648
Olivier Deprez157378f2022-04-04 15:47:50 +0200649#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650
651/* use ONLY for statically allocated translation tables */
652#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
653
654#else
655
656#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
657
658/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
659#define pmd_set_fixmap(addr) NULL
660#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
661#define pmd_clear_fixmap()
662
663#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
664
665#endif /* CONFIG_PGTABLE_LEVELS > 2 */
666
667#if CONFIG_PGTABLE_LEVELS > 3
668
Olivier Deprez157378f2022-04-04 15:47:50 +0200669#define pud_ERROR(e) \
670 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671
Olivier Deprez157378f2022-04-04 15:47:50 +0200672#define p4d_none(p4d) (!p4d_val(p4d))
673#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
674#define p4d_present(p4d) (p4d_val(p4d))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000675
Olivier Deprez157378f2022-04-04 15:47:50 +0200676static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677{
Olivier Deprez157378f2022-04-04 15:47:50 +0200678 if (in_swapper_pgdir(p4dp)) {
679 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
David Brazdil0f672f62019-12-10 10:32:29 +0000680 return;
681 }
682
Olivier Deprez157378f2022-04-04 15:47:50 +0200683 WRITE_ONCE(*p4dp, p4d);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 dsb(ishst);
David Brazdil0f672f62019-12-10 10:32:29 +0000685 isb();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686}
687
Olivier Deprez157378f2022-04-04 15:47:50 +0200688static inline void p4d_clear(p4d_t *p4dp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000689{
Olivier Deprez157378f2022-04-04 15:47:50 +0200690 set_p4d(p4dp, __p4d(0));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000691}
692
Olivier Deprez157378f2022-04-04 15:47:50 +0200693static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000694{
Olivier Deprez157378f2022-04-04 15:47:50 +0200695 return __p4d_to_phys(p4d);
696}
697
698static inline unsigned long p4d_page_vaddr(p4d_t p4d)
699{
700 return (unsigned long)__va(p4d_page_paddr(p4d));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701}
702
703/* Find an entry in the frst-level page table. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200704#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000705
706#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
Olivier Deprez157378f2022-04-04 15:47:50 +0200707#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
709
Olivier Deprez157378f2022-04-04 15:47:50 +0200710#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711
712/* use ONLY for statically allocated translation tables */
713#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
714
715#else
716
Olivier Deprez157378f2022-04-04 15:47:50 +0200717#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
719
720/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
721#define pud_set_fixmap(addr) NULL
722#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
723#define pud_clear_fixmap()
724
725#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
726
727#endif /* CONFIG_PGTABLE_LEVELS > 3 */
728
Olivier Deprez157378f2022-04-04 15:47:50 +0200729#define pgd_ERROR(e) \
730 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000731
732#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
733#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
734
735static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
736{
Olivier Deprez157378f2022-04-04 15:47:50 +0200737 /*
738 * Normal and Normal-Tagged are two different memory types and indices
739 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
740 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000741 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
Olivier Deprez157378f2022-04-04 15:47:50 +0200742 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
743 PTE_ATTRINDX_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000744 /* preserve the hardware dirty information */
745 if (pte_hw_dirty(pte))
746 pte = pte_mkdirty(pte);
747 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
748 return pte;
749}
750
751static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
752{
753 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
754}
755
756#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
757extern int ptep_set_access_flags(struct vm_area_struct *vma,
758 unsigned long address, pte_t *ptep,
759 pte_t entry, int dirty);
760
761#ifdef CONFIG_TRANSPARENT_HUGEPAGE
762#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
763static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
764 unsigned long address, pmd_t *pmdp,
765 pmd_t entry, int dirty)
766{
767 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
768}
David Brazdil0f672f62019-12-10 10:32:29 +0000769
770static inline int pud_devmap(pud_t pud)
771{
772 return 0;
773}
774
775static inline int pgd_devmap(pgd_t pgd)
776{
777 return 0;
778}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779#endif
780
781/*
782 * Atomic pte/pmd modifications.
783 */
784#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
785static inline int __ptep_test_and_clear_young(pte_t *ptep)
786{
787 pte_t old_pte, pte;
788
789 pte = READ_ONCE(*ptep);
790 do {
791 old_pte = pte;
792 pte = pte_mkold(pte);
793 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
794 pte_val(old_pte), pte_val(pte));
795 } while (pte_val(pte) != pte_val(old_pte));
796
797 return pte_young(pte);
798}
799
800static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
801 unsigned long address,
802 pte_t *ptep)
803{
804 return __ptep_test_and_clear_young(ptep);
805}
806
David Brazdil0f672f62019-12-10 10:32:29 +0000807#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
808static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
809 unsigned long address, pte_t *ptep)
810{
811 int young = ptep_test_and_clear_young(vma, address, ptep);
812
813 if (young) {
814 /*
815 * We can elide the trailing DSB here since the worst that can
816 * happen is that a CPU continues to use the young entry in its
817 * TLB and we mistakenly reclaim the associated page. The
818 * window for such an event is bounded by the next
819 * context-switch, which provides a DSB to complete the TLB
820 * invalidation.
821 */
822 flush_tlb_page_nosync(vma, address);
823 }
824
825 return young;
826}
827
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828#ifdef CONFIG_TRANSPARENT_HUGEPAGE
829#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
830static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
831 unsigned long address,
832 pmd_t *pmdp)
833{
834 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
835}
836#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
837
838#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
839static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
840 unsigned long address, pte_t *ptep)
841{
842 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
843}
844
845#ifdef CONFIG_TRANSPARENT_HUGEPAGE
846#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
847static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
848 unsigned long address, pmd_t *pmdp)
849{
850 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
851}
852#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
853
854/*
855 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
856 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
857 */
858#define __HAVE_ARCH_PTEP_SET_WRPROTECT
859static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
860{
861 pte_t old_pte, pte;
862
863 pte = READ_ONCE(*ptep);
864 do {
865 old_pte = pte;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 pte = pte_wrprotect(pte);
867 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
868 pte_val(old_pte), pte_val(pte));
869 } while (pte_val(pte) != pte_val(old_pte));
870}
871
872#ifdef CONFIG_TRANSPARENT_HUGEPAGE
873#define __HAVE_ARCH_PMDP_SET_WRPROTECT
874static inline void pmdp_set_wrprotect(struct mm_struct *mm,
875 unsigned long address, pmd_t *pmdp)
876{
877 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
878}
879
880#define pmdp_establish pmdp_establish
881static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
882 unsigned long address, pmd_t *pmdp, pmd_t pmd)
883{
884 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
885}
886#endif
887
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000888/*
889 * Encode and decode a swap entry:
890 * bits 0-1: present (must be zero)
891 * bits 2-7: swap type
892 * bits 8-57: swap offset
893 * bit 58: PTE_PROT_NONE (must be zero)
894 */
895#define __SWP_TYPE_SHIFT 2
896#define __SWP_TYPE_BITS 6
897#define __SWP_OFFSET_BITS 50
898#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
899#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
900#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
901
902#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
903#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
904#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
905
906#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
907#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
908
Olivier Deprez157378f2022-04-04 15:47:50 +0200909#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
910#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
911#define __swp_entry_to_pmd(swp) __pmd((swp).val)
912#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
913
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914/*
915 * Ensure that there are not more swap files than can be encoded in the kernel
916 * PTEs.
917 */
918#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
919
920extern int kern_addr_valid(unsigned long addr);
921
Olivier Deprez157378f2022-04-04 15:47:50 +0200922#ifdef CONFIG_ARM64_MTE
923
924#define __HAVE_ARCH_PREPARE_TO_SWAP
925static inline int arch_prepare_to_swap(struct page *page)
926{
927 if (system_supports_mte())
928 return mte_save_tags(page);
929 return 0;
930}
931
932#define __HAVE_ARCH_SWAP_INVALIDATE
933static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
934{
935 if (system_supports_mte())
936 mte_invalidate_tags(type, offset);
937}
938
939static inline void arch_swap_invalidate_area(int type)
940{
941 if (system_supports_mte())
942 mte_invalidate_tags_area(type);
943}
944
945#define __HAVE_ARCH_SWAP_RESTORE
946static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
947{
948 if (system_supports_mte() && mte_restore_tags(entry, page))
949 set_bit(PG_mte_tagged, &page->flags);
950}
951
952#endif /* CONFIG_ARM64_MTE */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000954/*
955 * On AArch64, the cache coherency is handled via the set_pte_at() function.
956 */
957static inline void update_mmu_cache(struct vm_area_struct *vma,
958 unsigned long addr, pte_t *ptep)
959{
960 /*
961 * We don't do anything here, so there's a very small chance of
962 * us retaking a user fault which we just fixed up. The alternative
963 * is doing a dsb(ishst), but that penalises the fastpath.
964 */
965}
966
967#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
968
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969#ifdef CONFIG_ARM64_PA_BITS_52
970#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
971#else
972#define phys_to_ttbr(addr) (addr)
973#endif
974
Olivier Deprez157378f2022-04-04 15:47:50 +0200975/*
976 * On arm64 without hardware Access Flag, copying from user will fail because
977 * the pte is old and cannot be marked young. So we always end up with zeroed
978 * page after fork() + CoW for pfn mappings. We don't always have a
979 * hardware-managed access flag on arm64.
980 */
981static inline bool arch_faults_on_old_pte(void)
982{
983 WARN_ON(preemptible());
984
985 return !cpu_has_hw_af();
986}
987#define arch_faults_on_old_pte arch_faults_on_old_pte
988
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000989#endif /* !__ASSEMBLY__ */
990
991#endif /* __ASM_PGTABLE_H */