Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _PGTABLE_NOPUD_H |
| 3 | #define _PGTABLE_NOPUD_H |
| 4 | |
| 5 | #ifndef __ASSEMBLY__ |
| 6 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | #include <asm-generic/pgtable-nop4d.h> |
| 8 | |
| 9 | #define __PAGETABLE_PUD_FOLDED 1 |
| 10 | |
| 11 | /* |
| 12 | * Having the pud type consist of a p4d gets the size right, and allows |
| 13 | * us to conceptually access the p4d entry that this pud is folded into |
| 14 | * without casting. |
| 15 | */ |
| 16 | typedef struct { p4d_t p4d; } pud_t; |
| 17 | |
| 18 | #define PUD_SHIFT P4D_SHIFT |
| 19 | #define PTRS_PER_PUD 1 |
| 20 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 21 | #define PUD_MASK (~(PUD_SIZE-1)) |
| 22 | |
| 23 | /* |
| 24 | * The "p4d_xxx()" functions here are trivial for a folded two-level |
| 25 | * setup: the pud is never bad, and a pud always exists (as it's folded |
| 26 | * into the p4d entry) |
| 27 | */ |
| 28 | static inline int p4d_none(p4d_t p4d) { return 0; } |
| 29 | static inline int p4d_bad(p4d_t p4d) { return 0; } |
| 30 | static inline int p4d_present(p4d_t p4d) { return 1; } |
| 31 | static inline void p4d_clear(p4d_t *p4d) { } |
| 32 | #define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) |
| 33 | |
| 34 | #define p4d_populate(mm, p4d, pud) do { } while (0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 35 | #define p4d_populate_safe(mm, p4d, pud) do { } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | /* |
| 37 | * (puds are folded into p4ds so this doesn't get actually called, |
| 38 | * but the define is needed for a generic inline function.) |
| 39 | */ |
| 40 | #define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) |
| 41 | |
| 42 | static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) |
| 43 | { |
| 44 | return (pud_t *)p4d; |
| 45 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 46 | #define pud_offset pud_offset |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | |
| 48 | #define pud_val(x) (p4d_val((x).p4d)) |
| 49 | #define __pud(x) ((pud_t) { __p4d(x) }) |
| 50 | |
| 51 | #define p4d_page(p4d) (pud_page((pud_t){ p4d })) |
| 52 | #define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) |
| 53 | |
| 54 | /* |
| 55 | * allocating and freeing a pud is trivial: the 1-entry pud is |
| 56 | * inside the p4d, so has no extra memory associated with it. |
| 57 | */ |
| 58 | #define pud_alloc_one(mm, address) NULL |
| 59 | #define pud_free(mm, x) do { } while (0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 60 | #define pud_free_tlb(tlb, x, a) do { } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | |
| 62 | #undef pud_addr_end |
| 63 | #define pud_addr_end(addr, end) (end) |
| 64 | |
| 65 | #endif /* __ASSEMBLY__ */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | #endif /* _PGTABLE_NOPUD_H */ |