blob: 813dfe5f45a5988efe94636d717ef5db00c744ed [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_64_H
10#define _ASM_PGTABLE_64_H
11
12#include <linux/compiler.h>
13#include <linux/linkage.h>
14
15#include <asm/addrspace.h>
16#include <asm/page.h>
17#include <asm/cachectl.h>
18#include <asm/fixmap.h>
19
20#define __ARCH_USE_5LEVEL_HACK
Olivier Deprez0e641232021-09-23 10:07:05 +020021#if CONFIG_PGTABLE_LEVELS == 2
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#include <asm-generic/pgtable-nopmd.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020023#elif CONFIG_PGTABLE_LEVELS == 3
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#include <asm-generic/pgtable-nopud.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020025#else
26#include <asm-generic/5level-fixup.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#endif
28
29/*
30 * Each address space has 2 4K pages as its page directory, giving 1024
31 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
32 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
33 * tables. Each page table is also a single 4K page, giving 512 (==
34 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
35 * invalid_pmd_table, each pmd entry is initialized to point to
36 * invalid_pte_table, each pte is initialized to 0.
37 *
38 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
39 * The layout is identical to userspace except it's indexed with the
40 * fault address - VMALLOC_START.
41 */
42
43
44/* PGDIR_SHIFT determines what a third-level page table entry can map */
45#ifdef __PAGETABLE_PMD_FOLDED
46#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
47#else
48
49/* PMD_SHIFT determines the size of the area a second-level page table can map */
50#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
51#define PMD_SIZE (1UL << PMD_SHIFT)
52#define PMD_MASK (~(PMD_SIZE-1))
53
54# ifdef __PAGETABLE_PUD_FOLDED
55# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
56# endif
57#endif
58
59#ifndef __PAGETABLE_PUD_FOLDED
60#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
61#define PUD_SIZE (1UL << PUD_SHIFT)
62#define PUD_MASK (~(PUD_SIZE-1))
63#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
64#endif
65
66#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
67#define PGDIR_MASK (~(PGDIR_SIZE-1))
68
69/*
70 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
71 * permits us mapping 40 bits of virtual address space.
72 *
73 * We used to implement 41 bits by having an order 1 pmd level but that seemed
74 * rather pointless.
75 *
76 * For 8kB page size we use a 3 level page tree which permits a total of
77 * 8TB of address space. Alternatively a 33-bit / 8GB organization using
78 * two levels would be easy to implement.
79 *
80 * For 16kB page size we use a 2 level page tree which permits a total of
81 * 36 bits of virtual address space. We could add a third level but it seems
82 * like at the moment there's no need for this.
83 *
84 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
85 * of virtual address space.
86 */
87#ifdef CONFIG_PAGE_SIZE_4KB
88# ifdef CONFIG_MIPS_VA_BITS_48
89# define PGD_ORDER 0
90# define PUD_ORDER 0
91# else
92# define PGD_ORDER 1
93# define PUD_ORDER aieeee_attempt_to_allocate_pud
94# endif
95#define PMD_ORDER 0
96#define PTE_ORDER 0
97#endif
98#ifdef CONFIG_PAGE_SIZE_8KB
99#define PGD_ORDER 0
100#define PUD_ORDER aieeee_attempt_to_allocate_pud
101#define PMD_ORDER 0
102#define PTE_ORDER 0
103#endif
104#ifdef CONFIG_PAGE_SIZE_16KB
105#ifdef CONFIG_MIPS_VA_BITS_48
106#define PGD_ORDER 1
107#else
108#define PGD_ORDER 0
109#endif
110#define PUD_ORDER aieeee_attempt_to_allocate_pud
111#define PMD_ORDER 0
112#define PTE_ORDER 0
113#endif
114#ifdef CONFIG_PAGE_SIZE_32KB
115#define PGD_ORDER 0
116#define PUD_ORDER aieeee_attempt_to_allocate_pud
117#define PMD_ORDER 0
118#define PTE_ORDER 0
119#endif
120#ifdef CONFIG_PAGE_SIZE_64KB
121#define PGD_ORDER 0
122#define PUD_ORDER aieeee_attempt_to_allocate_pud
123#ifdef CONFIG_MIPS_VA_BITS_48
124#define PMD_ORDER 0
125#else
126#define PMD_ORDER aieeee_attempt_to_allocate_pmd
127#endif
128#define PTE_ORDER 0
129#endif
130
131#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
132#ifndef __PAGETABLE_PUD_FOLDED
133#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
134#endif
135#ifndef __PAGETABLE_PMD_FOLDED
136#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
137#endif
138#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
139
140#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
141#define FIRST_USER_ADDRESS 0UL
142
143/*
144 * TLB refill handlers also map the vmalloc area into xuseg. Avoid
145 * the first couple of pages so NULL pointer dereferences will still
146 * reliably trap.
147 */
148#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
149#define VMALLOC_END \
150 (MAP_BASE + \
151 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
152 (1UL << cpu_vmbits)) - (1UL << 32))
153
154#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
155 VMALLOC_START != CKSSEG
156/* Load modules into 32bit-compatible segment. */
157#define MODULE_START CKSSEG
158#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
159#endif
160
161#define pte_ERROR(e) \
162 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
163#ifndef __PAGETABLE_PMD_FOLDED
164#define pmd_ERROR(e) \
165 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
166#endif
167#ifndef __PAGETABLE_PUD_FOLDED
168#define pud_ERROR(e) \
169 printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
170#endif
171#define pgd_ERROR(e) \
172 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
173
174extern pte_t invalid_pte_table[PTRS_PER_PTE];
175
176#ifndef __PAGETABLE_PUD_FOLDED
177/*
178 * For 4-level pagetables we defines these ourselves, for 3-level the
179 * definitions are below, for 2-level the
180 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
181 */
182typedef struct { unsigned long pud; } pud_t;
183#define pud_val(x) ((x).pud)
184#define __pud(x) ((pud_t) { (x) })
185
186extern pud_t invalid_pud_table[PTRS_PER_PUD];
187
188/*
189 * Empty pgd entries point to the invalid_pud_table.
190 */
191static inline int pgd_none(pgd_t pgd)
192{
193 return pgd_val(pgd) == (unsigned long)invalid_pud_table;
194}
195
196static inline int pgd_bad(pgd_t pgd)
197{
198 if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
199 return 1;
200
201 return 0;
202}
203
204static inline int pgd_present(pgd_t pgd)
205{
206 return pgd_val(pgd) != (unsigned long)invalid_pud_table;
207}
208
209static inline void pgd_clear(pgd_t *pgdp)
210{
211 pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
212}
213
214#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
215
216static inline unsigned long pgd_page_vaddr(pgd_t pgd)
217{
218 return pgd_val(pgd);
219}
220
Olivier Deprez0e641232021-09-23 10:07:05 +0200221#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd))
222#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
225{
226 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
227}
228
229static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
230{
231 *pgd = pgdval;
232}
233
234#endif
235
236#ifndef __PAGETABLE_PMD_FOLDED
237/*
238 * For 3-level pagetables we defines these ourselves, for 2-level the
239 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
240 */
241typedef struct { unsigned long pmd; } pmd_t;
242#define pmd_val(x) ((x).pmd)
243#define __pmd(x) ((pmd_t) { (x) } )
244
245
246extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
247#endif
248
249/*
250 * Empty pgd/pmd entries point to the invalid_pte_table.
251 */
252static inline int pmd_none(pmd_t pmd)
253{
254 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
255}
256
257static inline int pmd_bad(pmd_t pmd)
258{
259#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
260 /* pmd_huge(pmd) but inline */
261 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
262 return 0;
263#endif
264
265 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
266 return 1;
267
268 return 0;
269}
270
271static inline int pmd_present(pmd_t pmd)
272{
David Brazdil0f672f62019-12-10 10:32:29 +0000273#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
274 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
275 return pmd_val(pmd) & _PAGE_PRESENT;
276#endif
277
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
279}
280
281static inline void pmd_clear(pmd_t *pmdp)
282{
283 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
284}
285#ifndef __PAGETABLE_PMD_FOLDED
286
287/*
288 * Empty pud entries point to the invalid_pmd_table.
289 */
290static inline int pud_none(pud_t pud)
291{
292 return pud_val(pud) == (unsigned long) invalid_pmd_table;
293}
294
295static inline int pud_bad(pud_t pud)
296{
297 return pud_val(pud) & ~PAGE_MASK;
298}
299
300static inline int pud_present(pud_t pud)
301{
302 return pud_val(pud) != (unsigned long) invalid_pmd_table;
303}
304
305static inline void pud_clear(pud_t *pudp)
306{
307 pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
308}
309#endif
310
311#define pte_page(x) pfn_to_page(pte_pfn(x))
312
313#ifdef CONFIG_CPU_VR41XX
314#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
315#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
316#else
317#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
318#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
319#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
320#endif
321
322#define __pgd_offset(address) pgd_index(address)
323#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
324#define __pmd_offset(address) pmd_index(address)
325
326/* to find an entry in a kernel page-table-directory */
327#define pgd_offset_k(address) pgd_offset(&init_mm, address)
328
329#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
330#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
331
332/* to find an entry in a page-table-directory */
333#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
334
335#ifndef __PAGETABLE_PMD_FOLDED
336static inline unsigned long pud_page_vaddr(pud_t pud)
337{
338 return pud_val(pud);
339}
340#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
341#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
342
343/* Find an entry in the second-level page table.. */
344static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
345{
346 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
347}
348#endif
349
350/* Find an entry in the third-level page table.. */
351#define __pte_offset(address) \
352 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
353#define pte_offset(dir, address) \
354 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
355#define pte_offset_kernel(dir, address) \
356 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
357#define pte_offset_map(dir, address) \
358 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
359#define pte_unmap(pte) ((void)(pte))
360
361/*
362 * Initialize a new pgd / pmd table with invalid pointers.
363 */
364extern void pgd_init(unsigned long page);
365extern void pud_init(unsigned long page, unsigned long pagetable);
366extern void pmd_init(unsigned long page, unsigned long pagetable);
367
368/*
369 * Non-present pages: high 40 bits are offset, next 8 bits type,
370 * low 16 bits zero.
371 */
372static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
373{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
374
375#define __swp_type(x) (((x).val >> 16) & 0xff)
376#define __swp_offset(x) ((x).val >> 24)
377#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
378#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
379#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
380
381#endif /* _ASM_PGTABLE_64_H */