blob: d3221017194dd19bfe824683ecb700dee60d6e7b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2012 Regents of the University of California
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
6#ifndef _ASM_RISCV_PGTABLE_H
7#define _ASM_RISCV_PGTABLE_H
8
9#include <linux/mmzone.h>
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <linux/sizes.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011
12#include <asm/pgtable-bits.h>
13
14#ifndef __ASSEMBLY__
15
16/* Page Upper Directory not used in RISC-V */
17#include <asm-generic/pgtable-nopud.h>
18#include <asm/page.h>
19#include <asm/tlbflush.h>
20#include <linux/mm_types.h>
21
22#ifdef CONFIG_64BIT
23#include <asm/pgtable-64.h>
24#else
25#include <asm/pgtable-32.h>
26#endif /* CONFIG_64BIT */
27
28/* Number of entries in the page global directory */
29#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30/* Number of entries in the page table */
31#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
32
33/* Number of PGD entries that a user-mode program can use */
34#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
35#define FIRST_USER_ADDRESS 0
36
37/* Page protection bits */
38#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
39
David Brazdil0f672f62019-12-10 10:32:29 +000040#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
42#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
43#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
44#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
45#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
46 _PAGE_EXEC | _PAGE_WRITE)
47
48#define PAGE_COPY PAGE_READ
49#define PAGE_COPY_EXEC PAGE_EXEC
50#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
51#define PAGE_SHARED PAGE_WRITE
52#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
53
54#define _PAGE_KERNEL (_PAGE_READ \
55 | _PAGE_WRITE \
56 | _PAGE_PRESENT \
57 | _PAGE_ACCESSED \
58 | _PAGE_DIRTY)
59
60#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
61#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
62
David Brazdil0f672f62019-12-10 10:32:29 +000063#define PAGE_TABLE __pgprot(_PAGE_TABLE)
64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065extern pgd_t swapper_pg_dir[];
66
67/* MAP_PRIVATE permissions: xwr (copy-on-write) */
68#define __P000 PAGE_NONE
69#define __P001 PAGE_READ
70#define __P010 PAGE_COPY
71#define __P011 PAGE_COPY
72#define __P100 PAGE_EXEC
73#define __P101 PAGE_READ_EXEC
74#define __P110 PAGE_COPY_EXEC
75#define __P111 PAGE_COPY_READ_EXEC
76
77/* MAP_SHARED permissions: xwr */
78#define __S000 PAGE_NONE
79#define __S001 PAGE_READ
80#define __S010 PAGE_SHARED
81#define __S011 PAGE_SHARED
82#define __S100 PAGE_EXEC
83#define __S101 PAGE_READ_EXEC
84#define __S110 PAGE_SHARED_EXEC
85#define __S111 PAGE_SHARED_EXEC
86
David Brazdil0f672f62019-12-10 10:32:29 +000087#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
88#define VMALLOC_END (PAGE_OFFSET - 1)
89#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
90#define PCI_IO_SIZE SZ_16M
91
92/*
93 * Roughly size the vmemmap space to be large enough to fit enough
94 * struct pages to map half the virtual address space. Then
95 * position vmemmap directly below the VMALLOC region.
96 */
97#define VMEMMAP_SHIFT \
98 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
99#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
100#define VMEMMAP_END (VMALLOC_START - 1)
101#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
102
103#define vmemmap ((struct page *)VMEMMAP_START)
104
105#define PCI_IO_END VMEMMAP_START
106#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
107#define FIXADDR_TOP PCI_IO_START
108
109#ifdef CONFIG_64BIT
110#define FIXADDR_SIZE PMD_SIZE
111#else
112#define FIXADDR_SIZE PGDIR_SIZE
113#endif
114#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
115
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116/*
117 * ZERO_PAGE is a global shared page that is always zero,
118 * used for zero-mapped memory areas, etc.
119 */
120extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
121#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
122
123static inline int pmd_present(pmd_t pmd)
124{
David Brazdil0f672f62019-12-10 10:32:29 +0000125 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126}
127
128static inline int pmd_none(pmd_t pmd)
129{
130 return (pmd_val(pmd) == 0);
131}
132
133static inline int pmd_bad(pmd_t pmd)
134{
135 return !pmd_present(pmd);
136}
137
138static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
139{
140 *pmdp = pmd;
141}
142
143static inline void pmd_clear(pmd_t *pmdp)
144{
145 set_pmd(pmdp, __pmd(0));
146}
147
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
149{
150 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
151}
152
David Brazdil0f672f62019-12-10 10:32:29 +0000153static inline unsigned long _pgd_pfn(pgd_t pgd)
154{
155 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
156}
157
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
159
160/* Locate an entry in the page global directory */
161static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
162{
163 return mm->pgd + pgd_index(addr);
164}
165/* Locate an entry in the kernel page global directory */
166#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
167
168static inline struct page *pmd_page(pmd_t pmd)
169{
170 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
171}
172
173static inline unsigned long pmd_page_vaddr(pmd_t pmd)
174{
175 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
176}
177
178/* Yields the page frame number (PFN) of a page table entry */
179static inline unsigned long pte_pfn(pte_t pte)
180{
181 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
182}
183
184#define pte_page(x) pfn_to_page(pte_pfn(x))
185
186/* Constructs a page table entry */
187static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
188{
189 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
190}
191
David Brazdil0f672f62019-12-10 10:32:29 +0000192#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
194#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
195
196static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
197{
198 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
199}
200
201#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
202#define pte_unmap(pte) ((void)(pte))
203
204static inline int pte_present(pte_t pte)
205{
David Brazdil0f672f62019-12-10 10:32:29 +0000206 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207}
208
209static inline int pte_none(pte_t pte)
210{
211 return (pte_val(pte) == 0);
212}
213
214static inline int pte_write(pte_t pte)
215{
216 return pte_val(pte) & _PAGE_WRITE;
217}
218
219static inline int pte_exec(pte_t pte)
220{
221 return pte_val(pte) & _PAGE_EXEC;
222}
223
224static inline int pte_huge(pte_t pte)
225{
226 return pte_present(pte)
227 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
228}
229
230static inline int pte_dirty(pte_t pte)
231{
232 return pte_val(pte) & _PAGE_DIRTY;
233}
234
235static inline int pte_young(pte_t pte)
236{
237 return pte_val(pte) & _PAGE_ACCESSED;
238}
239
240static inline int pte_special(pte_t pte)
241{
242 return pte_val(pte) & _PAGE_SPECIAL;
243}
244
245/* static inline pte_t pte_rdprotect(pte_t pte) */
246
247static inline pte_t pte_wrprotect(pte_t pte)
248{
249 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
250}
251
252/* static inline pte_t pte_mkread(pte_t pte) */
253
254static inline pte_t pte_mkwrite(pte_t pte)
255{
256 return __pte(pte_val(pte) | _PAGE_WRITE);
257}
258
259/* static inline pte_t pte_mkexec(pte_t pte) */
260
261static inline pte_t pte_mkdirty(pte_t pte)
262{
263 return __pte(pte_val(pte) | _PAGE_DIRTY);
264}
265
266static inline pte_t pte_mkclean(pte_t pte)
267{
268 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
269}
270
271static inline pte_t pte_mkyoung(pte_t pte)
272{
273 return __pte(pte_val(pte) | _PAGE_ACCESSED);
274}
275
276static inline pte_t pte_mkold(pte_t pte)
277{
278 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
279}
280
281static inline pte_t pte_mkspecial(pte_t pte)
282{
283 return __pte(pte_val(pte) | _PAGE_SPECIAL);
284}
285
David Brazdil0f672f62019-12-10 10:32:29 +0000286static inline pte_t pte_mkhuge(pte_t pte)
287{
288 return pte;
289}
290
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291/* Modify page protection bits */
292static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
293{
294 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
295}
296
297#define pgd_ERROR(e) \
298 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
299
300
301/* Commit new configuration to MMU hardware */
302static inline void update_mmu_cache(struct vm_area_struct *vma,
303 unsigned long address, pte_t *ptep)
304{
305 /*
306 * The kernel assumes that TLBs don't cache invalid entries, but
307 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
308 * cache flush; it is necessary even after writing invalid entries.
309 * Relying on flush_tlb_fix_spurious_fault would suffice, but
310 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
311 */
312 local_flush_tlb_page(address);
313}
314
315#define __HAVE_ARCH_PTE_SAME
316static inline int pte_same(pte_t pte_a, pte_t pte_b)
317{
318 return pte_val(pte_a) == pte_val(pte_b);
319}
320
321/*
322 * Certain architectures need to do special things when PTEs within
323 * a page table are directly modified. Thus, the following hook is
324 * made available.
325 */
326static inline void set_pte(pte_t *ptep, pte_t pteval)
327{
328 *ptep = pteval;
329}
330
331void flush_icache_pte(pte_t pte);
332
333static inline void set_pte_at(struct mm_struct *mm,
334 unsigned long addr, pte_t *ptep, pte_t pteval)
335{
336 if (pte_present(pteval) && pte_exec(pteval))
337 flush_icache_pte(pteval);
338
339 set_pte(ptep, pteval);
340}
341
342static inline void pte_clear(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
344{
345 set_pte_at(mm, addr, ptep, __pte(0));
346}
347
348#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
349static inline int ptep_set_access_flags(struct vm_area_struct *vma,
350 unsigned long address, pte_t *ptep,
351 pte_t entry, int dirty)
352{
353 if (!pte_same(*ptep, entry))
354 set_pte_at(vma->vm_mm, address, ptep, entry);
355 /*
356 * update_mmu_cache will unconditionally execute, handling both
357 * the case that the PTE changed and the spurious fault case.
358 */
359 return true;
360}
361
362#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
363static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
364 unsigned long address, pte_t *ptep)
365{
366 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
367}
368
369#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
370static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
371 unsigned long address,
372 pte_t *ptep)
373{
374 if (!pte_young(*ptep))
375 return 0;
376 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
377}
378
379#define __HAVE_ARCH_PTEP_SET_WRPROTECT
380static inline void ptep_set_wrprotect(struct mm_struct *mm,
381 unsigned long address, pte_t *ptep)
382{
383 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
384}
385
386#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
387static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
388 unsigned long address, pte_t *ptep)
389{
390 /*
391 * This comment is borrowed from x86, but applies equally to RISC-V:
392 *
393 * Clearing the accessed bit without a TLB flush
394 * doesn't cause data corruption. [ It could cause incorrect
395 * page aging and the (mistaken) reclaim of hot pages, but the
396 * chance of that should be relatively low. ]
397 *
398 * So as a performance optimization don't flush the TLB when
399 * clearing the accessed bit, it will eventually be flushed by
400 * a context switch or a VM operation anyway. [ In the rare
401 * event of it not getting flushed for a long time the delay
402 * shouldn't really matter because there's no real memory
403 * pressure for swapout to react to. ]
404 */
405 return ptep_test_and_clear_young(vma, address, ptep);
406}
407
408/*
409 * Encode and decode a swap entry
410 *
411 * Format of swap PTE:
412 * bit 0: _PAGE_PRESENT (zero)
David Brazdil0f672f62019-12-10 10:32:29 +0000413 * bit 1: _PAGE_PROT_NONE (zero)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414 * bits 2 to 6: swap type
415 * bits 7 to XLEN-1: swap offset
416 */
417#define __SWP_TYPE_SHIFT 2
418#define __SWP_TYPE_BITS 5
419#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
420#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
421
422#define MAX_SWAPFILES_CHECK() \
423 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
424
425#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
426#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
427#define __swp_entry(type, offset) ((swp_entry_t) \
428 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
429
430#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
431#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
432
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433#define kern_addr_valid(addr) (1) /* FIXME */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434
David Brazdil0f672f62019-12-10 10:32:29 +0000435extern void *dtb_early_va;
436extern void setup_bootmem(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437extern void paging_init(void);
438
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439/*
David Brazdil0f672f62019-12-10 10:32:29 +0000440 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
442 */
443#ifdef CONFIG_64BIT
444#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
445#else
David Brazdil0f672f62019-12-10 10:32:29 +0000446#define TASK_SIZE FIXADDR_START
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447#endif
448
449#include <asm-generic/pgtable.h>
450
451#endif /* !__ASSEMBLY__ */
452
453#endif /* _ASM_RISCV_PGTABLE_H */