blob: a92a187ec891996d5ded3a436a6a7cb6d8612656 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
8#include <asm/bug.h>
9#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/pgtable-prot.h>
David Brazdil0f672f62019-12-10 10:32:29 +000014#include <asm/tlbflush.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015
16/*
17 * VMALLOC range.
18 *
19 * VMALLOC_START: beginning of the kernel vmalloc space
20 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
21 * and fixed mappings
22 */
23#define VMALLOC_START (MODULES_END)
David Brazdil0f672f62019-12-10 10:32:29 +000024#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025
Olivier Deprez0e641232021-09-23 10:07:05 +020026#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
27
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028#define FIRST_USER_ADDRESS 0UL
29
30#ifndef __ASSEMBLY__
31
32#include <asm/cmpxchg.h>
33#include <asm/fixmap.h>
34#include <linux/mmdebug.h>
35#include <linux/mm_types.h>
36#include <linux/sched.h>
37
38extern void __pte_error(const char *file, int line, unsigned long val);
39extern void __pmd_error(const char *file, int line, unsigned long val);
40extern void __pud_error(const char *file, int line, unsigned long val);
41extern void __pgd_error(const char *file, int line, unsigned long val);
42
43/*
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
46 */
47extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
48#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
49
50#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
51
52/*
53 * Macros to convert between a physical address and its placement in a
54 * page table entry, taking care of 52-bit addresses.
55 */
56#ifdef CONFIG_ARM64_PA_BITS_52
57#define __pte_to_phys(pte) \
58 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
59#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
60#else
61#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
62#define __phys_to_pte_val(phys) (phys)
63#endif
64
65#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
66#define pfn_pte(pfn,prot) \
67 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
68
69#define pte_none(pte) (!pte_val(pte))
70#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
71#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
72
73/*
74 * The following only work if pte_present(). Undefined behaviour otherwise.
75 */
76#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
77#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
78#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
79#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
80#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
81#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
David Brazdil0f672f62019-12-10 10:32:29 +000082#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083
84#define pte_cont_addr_end(addr, end) \
85({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
86 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
87})
88
89#define pmd_cont_addr_end(addr, end) \
90({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
91 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
92})
93
94#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
95#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
96#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
97
98#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099#define pte_valid_not_user(pte) \
Olivier Deprez0e641232021-09-23 10:07:05 +0200100 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101#define pte_valid_user(pte) \
102 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
103
104/*
105 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
106 * so that we don't erroneously return false for pages that have been
107 * remapped as PROT_NONE but are yet to be flushed from the TLB.
Olivier Deprez0e641232021-09-23 10:07:05 +0200108 * Note that we can't make any assumptions based on the state of the access
109 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
110 * TLB.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 */
112#define pte_accessible(mm, pte) \
Olivier Deprez0e641232021-09-23 10:07:05 +0200113 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115/*
116 * p??_access_permitted() is true for valid user mappings (subject to the
Olivier Deprez0e641232021-09-23 10:07:05 +0200117 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
118 * set.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 */
120#define pte_access_permitted(pte, write) \
121 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
122#define pmd_access_permitted(pmd, write) \
123 (pte_access_permitted(pmd_pte(pmd), (write)))
124#define pud_access_permitted(pud, write) \
125 (pte_access_permitted(pud_pte(pud), (write)))
126
127static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
128{
129 pte_val(pte) &= ~pgprot_val(prot);
130 return pte;
131}
132
133static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
134{
135 pte_val(pte) |= pgprot_val(prot);
136 return pte;
137}
138
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139static inline pte_t pte_mkwrite(pte_t pte)
140{
141 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
142 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
143 return pte;
144}
145
146static inline pte_t pte_mkclean(pte_t pte)
147{
148 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
149 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
150
151 return pte;
152}
153
154static inline pte_t pte_mkdirty(pte_t pte)
155{
156 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
157
158 if (pte_write(pte))
159 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
160
161 return pte;
162}
163
Olivier Deprez0e641232021-09-23 10:07:05 +0200164static inline pte_t pte_wrprotect(pte_t pte)
165{
166 /*
167 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
168 * clear), set the PTE_DIRTY bit.
169 */
170 if (pte_hw_dirty(pte))
171 pte = pte_mkdirty(pte);
172
173 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
174 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
175 return pte;
176}
177
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178static inline pte_t pte_mkold(pte_t pte)
179{
180 return clear_pte_bit(pte, __pgprot(PTE_AF));
181}
182
183static inline pte_t pte_mkyoung(pte_t pte)
184{
185 return set_pte_bit(pte, __pgprot(PTE_AF));
186}
187
188static inline pte_t pte_mkspecial(pte_t pte)
189{
190 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
191}
192
193static inline pte_t pte_mkcont(pte_t pte)
194{
195 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
196 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
197}
198
199static inline pte_t pte_mknoncont(pte_t pte)
200{
201 return clear_pte_bit(pte, __pgprot(PTE_CONT));
202}
203
204static inline pte_t pte_mkpresent(pte_t pte)
205{
206 return set_pte_bit(pte, __pgprot(PTE_VALID));
207}
208
209static inline pmd_t pmd_mkcont(pmd_t pmd)
210{
211 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
212}
213
David Brazdil0f672f62019-12-10 10:32:29 +0000214static inline pte_t pte_mkdevmap(pte_t pte)
215{
216 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
217}
218
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219static inline void set_pte(pte_t *ptep, pte_t pte)
220{
221 WRITE_ONCE(*ptep, pte);
222
223 /*
224 * Only if the new pte is valid and kernel, otherwise TLB maintenance
225 * or update_mmu_cache() have the necessary barriers.
226 */
David Brazdil0f672f62019-12-10 10:32:29 +0000227 if (pte_valid_not_user(pte)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228 dsb(ishst);
David Brazdil0f672f62019-12-10 10:32:29 +0000229 isb();
230 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231}
232
233extern void __sync_icache_dcache(pte_t pteval);
234
235/*
236 * PTE bits configuration in the presence of hardware Dirty Bit Management
237 * (PTE_WRITE == PTE_DBM):
238 *
239 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
240 * 0 0 | 1 0 0
241 * 0 1 | 1 1 0
242 * 1 0 | 1 0 1
243 * 1 1 | 0 1 x
244 *
245 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
246 * the page fault mechanism. Checking the dirty status of a pte becomes:
247 *
248 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
249 */
David Brazdil0f672f62019-12-10 10:32:29 +0000250
251static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
252 pte_t pte)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253{
254 pte_t old_pte;
255
David Brazdil0f672f62019-12-10 10:32:29 +0000256 if (!IS_ENABLED(CONFIG_DEBUG_VM))
257 return;
258
259 old_pte = READ_ONCE(*ptep);
260
261 if (!pte_valid(old_pte) || !pte_valid(pte))
262 return;
263 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
264 return;
265
266 /*
267 * Check for potential race with hardware updates of the pte
268 * (ptep_set_access_flags safely changes valid ptes without going
269 * through an invalid entry).
270 */
271 VM_WARN_ONCE(!pte_young(pte),
272 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
273 __func__, pte_val(old_pte), pte_val(pte));
274 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
275 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
276 __func__, pte_val(old_pte), pte_val(pte));
277}
278
279static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t pte)
281{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
283 __sync_icache_dcache(pte);
284
David Brazdil0f672f62019-12-10 10:32:29 +0000285 __check_racy_pte_update(mm, ptep, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286
287 set_pte(ptep, pte);
288}
289
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290/*
291 * Huge pte definitions.
292 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
294
295/*
296 * Hugetlb definitions.
297 */
298#define HUGE_MAX_HSTATE 4
299#define HPAGE_SHIFT PMD_SHIFT
300#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
301#define HPAGE_MASK (~(HPAGE_SIZE - 1))
302#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
303
304static inline pte_t pgd_pte(pgd_t pgd)
305{
306 return __pte(pgd_val(pgd));
307}
308
309static inline pte_t pud_pte(pud_t pud)
310{
311 return __pte(pud_val(pud));
312}
313
David Brazdil0f672f62019-12-10 10:32:29 +0000314static inline pud_t pte_pud(pte_t pte)
315{
316 return __pud(pte_val(pte));
317}
318
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000319static inline pmd_t pud_pmd(pud_t pud)
320{
321 return __pmd(pud_val(pud));
322}
323
324static inline pte_t pmd_pte(pmd_t pmd)
325{
326 return __pte(pmd_val(pmd));
327}
328
329static inline pmd_t pte_pmd(pte_t pte)
330{
331 return __pmd(pte_val(pte));
332}
333
David Brazdil0f672f62019-12-10 10:32:29 +0000334static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000335{
David Brazdil0f672f62019-12-10 10:32:29 +0000336 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
337}
338
339static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
340{
341 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342}
343
344#ifdef CONFIG_NUMA_BALANCING
345/*
346 * See the comment in include/asm-generic/pgtable.h
347 */
348static inline int pte_protnone(pte_t pte)
349{
350 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
351}
352
353static inline int pmd_protnone(pmd_t pmd)
354{
355 return pte_protnone(pmd_pte(pmd));
356}
357#endif
358
359/*
360 * THP definitions.
361 */
362
363#ifdef CONFIG_TRANSPARENT_HUGEPAGE
364#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
365#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
366
367#define pmd_present(pmd) pte_present(pmd_pte(pmd))
368#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
369#define pmd_young(pmd) pte_young(pmd_pte(pmd))
David Brazdil0f672f62019-12-10 10:32:29 +0000370#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
372#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
373#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
374#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
375#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
376#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
377#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
378
379#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
380
381#define pmd_write(pmd) pte_write(pmd_pte(pmd))
382
383#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
384
David Brazdil0f672f62019-12-10 10:32:29 +0000385#ifdef CONFIG_TRANSPARENT_HUGEPAGE
386#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
387#endif
388static inline pmd_t pmd_mkdevmap(pmd_t pmd)
389{
390 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
391}
392
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
394#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
395#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
396#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
397#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
398
David Brazdil0f672f62019-12-10 10:32:29 +0000399#define pud_young(pud) pte_young(pud_pte(pud))
400#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401#define pud_write(pud) pte_write(pud_pte(pud))
402
David Brazdil0f672f62019-12-10 10:32:29 +0000403#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
404
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
406#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
407#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
408#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
409
410#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
411
412#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
413#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
414
415#define __pgprot_modify(prot,mask,bits) \
416 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
417
418/*
419 * Mark the prot value as uncacheable and unbufferable.
420 */
421#define pgprot_noncached(prot) \
422 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
423#define pgprot_writecombine(prot) \
424 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
425#define pgprot_device(prot) \
426 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
David Brazdil0f672f62019-12-10 10:32:29 +0000427/*
428 * DMA allocations for non-coherent devices use what the Arm architecture calls
429 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
430 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
431 * is intended for MMIO and thus forbids speculation, preserves access size,
432 * requires strict alignment and can also force write responses to come from the
433 * endpoint.
434 */
435#define pgprot_dmacoherent(prot) \
436 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
437 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
438
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439#define __HAVE_PHYS_MEM_ACCESS_PROT
440struct file;
441extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
442 unsigned long size, pgprot_t vma_prot);
443
444#define pmd_none(pmd) (!pmd_val(pmd))
445
446#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
447
448#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
449 PMD_TYPE_TABLE)
450#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
451 PMD_TYPE_SECT)
452
453#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
David Brazdil0f672f62019-12-10 10:32:29 +0000454static inline bool pud_sect(pud_t pud) { return false; }
455static inline bool pud_table(pud_t pud) { return true; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456#else
457#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
458 PUD_TYPE_SECT)
459#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
460 PUD_TYPE_TABLE)
461#endif
462
David Brazdil0f672f62019-12-10 10:32:29 +0000463extern pgd_t init_pg_dir[PTRS_PER_PGD];
464extern pgd_t init_pg_end[];
465extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
466extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
Olivier Deprez0e641232021-09-23 10:07:05 +0200467extern pgd_t idmap_pg_end[];
David Brazdil0f672f62019-12-10 10:32:29 +0000468extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
Olivier Deprez0e641232021-09-23 10:07:05 +0200469extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
David Brazdil0f672f62019-12-10 10:32:29 +0000470
471extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
472
473static inline bool in_swapper_pgdir(void *addr)
474{
475 return ((unsigned long)addr & PAGE_MASK) ==
476 ((unsigned long)swapper_pg_dir & PAGE_MASK);
477}
478
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
480{
David Brazdil0f672f62019-12-10 10:32:29 +0000481#ifdef __PAGETABLE_PMD_FOLDED
482 if (in_swapper_pgdir(pmdp)) {
483 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
484 return;
485 }
486#endif /* __PAGETABLE_PMD_FOLDED */
487
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488 WRITE_ONCE(*pmdp, pmd);
David Brazdil0f672f62019-12-10 10:32:29 +0000489
490 if (pmd_valid(pmd)) {
491 dsb(ishst);
492 isb();
493 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494}
495
496static inline void pmd_clear(pmd_t *pmdp)
497{
498 set_pmd(pmdp, __pmd(0));
499}
500
501static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
502{
503 return __pmd_to_phys(pmd);
504}
505
David Brazdil0f672f62019-12-10 10:32:29 +0000506static inline void pte_unmap(pte_t *pte) { }
507
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508/* Find an entry in the third-level page table. */
509#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
510
511#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
512#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
513
514#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515
516#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
517#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
518#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
519
520#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
521
522/* use ONLY for statically allocated translation tables */
523#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
524
525/*
526 * Conversion functions: convert a page and protection to a page entry,
527 * and a page entry and page directory to the page they refer to.
528 */
529#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
530
531#if CONFIG_PGTABLE_LEVELS > 2
532
533#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
534
535#define pud_none(pud) (!pud_val(pud))
536#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
537#define pud_present(pud) pte_present(pud_pte(pud))
David Brazdil0f672f62019-12-10 10:32:29 +0000538#define pud_valid(pud) pte_valid(pud_pte(pud))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539
540static inline void set_pud(pud_t *pudp, pud_t pud)
541{
David Brazdil0f672f62019-12-10 10:32:29 +0000542#ifdef __PAGETABLE_PUD_FOLDED
543 if (in_swapper_pgdir(pudp)) {
544 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
545 return;
546 }
547#endif /* __PAGETABLE_PUD_FOLDED */
548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 WRITE_ONCE(*pudp, pud);
David Brazdil0f672f62019-12-10 10:32:29 +0000550
551 if (pud_valid(pud)) {
552 dsb(ishst);
553 isb();
554 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555}
556
557static inline void pud_clear(pud_t *pudp)
558{
559 set_pud(pudp, __pud(0));
560}
561
562static inline phys_addr_t pud_page_paddr(pud_t pud)
563{
564 return __pud_to_phys(pud);
565}
566
567/* Find an entry in the second-level page table. */
568#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
569
570#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
571#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
572
573#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
574#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
575#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
576
577#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
578
579/* use ONLY for statically allocated translation tables */
580#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
581
582#else
583
584#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
585
586/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
587#define pmd_set_fixmap(addr) NULL
588#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
589#define pmd_clear_fixmap()
590
591#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
592
593#endif /* CONFIG_PGTABLE_LEVELS > 2 */
594
595#if CONFIG_PGTABLE_LEVELS > 3
596
597#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
598
599#define pgd_none(pgd) (!pgd_val(pgd))
600#define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
601#define pgd_present(pgd) (pgd_val(pgd))
602
603static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
604{
David Brazdil0f672f62019-12-10 10:32:29 +0000605 if (in_swapper_pgdir(pgdp)) {
606 set_swapper_pgd(pgdp, pgd);
607 return;
608 }
609
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610 WRITE_ONCE(*pgdp, pgd);
611 dsb(ishst);
David Brazdil0f672f62019-12-10 10:32:29 +0000612 isb();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613}
614
615static inline void pgd_clear(pgd_t *pgdp)
616{
617 set_pgd(pgdp, __pgd(0));
618}
619
620static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
621{
622 return __pgd_to_phys(pgd);
623}
624
625/* Find an entry in the frst-level page table. */
626#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
627
628#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
629#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
630
631#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
632#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
633#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
634
635#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
636
637/* use ONLY for statically allocated translation tables */
638#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
639
640#else
641
642#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
643
644/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
645#define pud_set_fixmap(addr) NULL
646#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
647#define pud_clear_fixmap()
648
649#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
650
651#endif /* CONFIG_PGTABLE_LEVELS > 3 */
652
653#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
654
655/* to find an entry in a page-table-directory */
656#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
657
658#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
659
660#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
661
662/* to find an entry in a kernel page-table-directory */
663#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
664
665#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
666#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
667
668static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
669{
670 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
671 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
672 /* preserve the hardware dirty information */
673 if (pte_hw_dirty(pte))
674 pte = pte_mkdirty(pte);
675 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
676 return pte;
677}
678
679static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
680{
681 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
682}
683
684#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
685extern int ptep_set_access_flags(struct vm_area_struct *vma,
686 unsigned long address, pte_t *ptep,
687 pte_t entry, int dirty);
688
689#ifdef CONFIG_TRANSPARENT_HUGEPAGE
690#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
691static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
692 unsigned long address, pmd_t *pmdp,
693 pmd_t entry, int dirty)
694{
695 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
696}
David Brazdil0f672f62019-12-10 10:32:29 +0000697
698static inline int pud_devmap(pud_t pud)
699{
700 return 0;
701}
702
703static inline int pgd_devmap(pgd_t pgd)
704{
705 return 0;
706}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000707#endif
708
709/*
710 * Atomic pte/pmd modifications.
711 */
712#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
713static inline int __ptep_test_and_clear_young(pte_t *ptep)
714{
715 pte_t old_pte, pte;
716
717 pte = READ_ONCE(*ptep);
718 do {
719 old_pte = pte;
720 pte = pte_mkold(pte);
721 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
722 pte_val(old_pte), pte_val(pte));
723 } while (pte_val(pte) != pte_val(old_pte));
724
725 return pte_young(pte);
726}
727
728static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
729 unsigned long address,
730 pte_t *ptep)
731{
732 return __ptep_test_and_clear_young(ptep);
733}
734
David Brazdil0f672f62019-12-10 10:32:29 +0000735#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
736static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
737 unsigned long address, pte_t *ptep)
738{
739 int young = ptep_test_and_clear_young(vma, address, ptep);
740
741 if (young) {
742 /*
743 * We can elide the trailing DSB here since the worst that can
744 * happen is that a CPU continues to use the young entry in its
745 * TLB and we mistakenly reclaim the associated page. The
746 * window for such an event is bounded by the next
747 * context-switch, which provides a DSB to complete the TLB
748 * invalidation.
749 */
750 flush_tlb_page_nosync(vma, address);
751 }
752
753 return young;
754}
755
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756#ifdef CONFIG_TRANSPARENT_HUGEPAGE
757#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
758static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
759 unsigned long address,
760 pmd_t *pmdp)
761{
762 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
763}
764#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
765
766#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
767static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
768 unsigned long address, pte_t *ptep)
769{
770 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
771}
772
773#ifdef CONFIG_TRANSPARENT_HUGEPAGE
774#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
775static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
776 unsigned long address, pmd_t *pmdp)
777{
778 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
779}
780#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
781
782/*
783 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
784 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
785 */
786#define __HAVE_ARCH_PTEP_SET_WRPROTECT
787static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
788{
789 pte_t old_pte, pte;
790
791 pte = READ_ONCE(*ptep);
792 do {
793 old_pte = pte;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000794 pte = pte_wrprotect(pte);
795 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
796 pte_val(old_pte), pte_val(pte));
797 } while (pte_val(pte) != pte_val(old_pte));
798}
799
800#ifdef CONFIG_TRANSPARENT_HUGEPAGE
801#define __HAVE_ARCH_PMDP_SET_WRPROTECT
802static inline void pmdp_set_wrprotect(struct mm_struct *mm,
803 unsigned long address, pmd_t *pmdp)
804{
805 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
806}
807
808#define pmdp_establish pmdp_establish
809static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
810 unsigned long address, pmd_t *pmdp, pmd_t pmd)
811{
812 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
813}
814#endif
815
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816/*
817 * Encode and decode a swap entry:
818 * bits 0-1: present (must be zero)
819 * bits 2-7: swap type
820 * bits 8-57: swap offset
821 * bit 58: PTE_PROT_NONE (must be zero)
822 */
823#define __SWP_TYPE_SHIFT 2
824#define __SWP_TYPE_BITS 6
825#define __SWP_OFFSET_BITS 50
826#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
827#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
828#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
829
830#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
831#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
832#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
833
834#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
835#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
836
837/*
838 * Ensure that there are not more swap files than can be encoded in the kernel
839 * PTEs.
840 */
841#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
842
843extern int kern_addr_valid(unsigned long addr);
844
845#include <asm-generic/pgtable.h>
846
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000847/*
848 * On AArch64, the cache coherency is handled via the set_pte_at() function.
849 */
850static inline void update_mmu_cache(struct vm_area_struct *vma,
851 unsigned long addr, pte_t *ptep)
852{
853 /*
854 * We don't do anything here, so there's a very small chance of
855 * us retaking a user fault which we just fixed up. The alternative
856 * is doing a dsb(ishst), but that penalises the fastpath.
857 */
858}
859
860#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
861
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000862#ifdef CONFIG_ARM64_PA_BITS_52
863#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
864#else
865#define phys_to_ttbr(addr) (addr)
866#endif
867
868#endif /* !__ASSEMBLY__ */
869
870#endif /* __ASM_PGTABLE_H */