blob: 75cf84070fc9190343b6379de6a9e72efa16e138 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H
4
David Brazdil0f672f62019-12-10 10:32:29 +00005#include <asm/page.h>
Olivier Deprez157378f2022-04-04 15:47:50 +02006
7#if CONFIG_PGTABLE_LEVELS == 3
8#include <asm-generic/pgtable-nopud.h>
9#elif CONFIG_PGTABLE_LEVELS == 2
10#include <asm-generic/pgtable-nopmd.h>
11#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012
13#include <asm/fixmap.h>
14
15#ifndef __ASSEMBLY__
16/*
17 * we simulate an x86-style page table for the linux mm code
18 */
19
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/mm_types.h>
23#include <asm/processor.h>
24#include <asm/cache.h>
25
David Brazdil0f672f62019-12-10 10:32:29 +000026static inline spinlock_t *pgd_spinlock(pgd_t *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28/*
29 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
30 * memory. For the return value to be meaningful, ADDR must be >=
31 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
32 * require a hash-, or multi-level tree-lookup or something of that
33 * sort) but it guarantees to return TRUE only if accessing the page
34 * at that address does not cause an error. Note that there may be
35 * addresses for which kern_addr_valid() returns FALSE even though an
36 * access would not cause an error (e.g., this is typically true for
37 * memory mapped I/O regions.
38 *
39 * XXX Need to implement this for parisc.
40 */
41#define kern_addr_valid(addr) (1)
42
David Brazdil0f672f62019-12-10 10:32:29 +000043/* This is for the serialization of PxTLB broadcasts. At least on the N class
44 * systems, only one PxTLB inter processor broadcast can be active at any one
45 * time on the Merced bus.
46
47 * PTE updates are protected by locks in the PMD.
48 */
49extern spinlock_t pa_tlb_flush_lock;
50extern spinlock_t pa_swapper_pg_lock;
51#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
52extern int pa_serialize_tlb_flushes;
53#else
54#define pa_serialize_tlb_flushes (0)
55#endif
56
57#define purge_tlb_start(flags) do { \
58 if (pa_serialize_tlb_flushes) \
59 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
60 else \
61 local_irq_save(flags); \
62 } while (0)
63#define purge_tlb_end(flags) do { \
64 if (pa_serialize_tlb_flushes) \
65 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
66 else \
67 local_irq_restore(flags); \
68 } while (0)
69
70/* Purge data and instruction TLB entries. The TLB purge instructions
71 * are slow on SMP machines since the purge must be broadcast to all CPUs.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 */
73
74static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
75{
David Brazdil0f672f62019-12-10 10:32:29 +000076 unsigned long flags;
77
78 purge_tlb_start(flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 mtsp(mm->context, 1);
80 pdtlb(addr);
David Brazdil0f672f62019-12-10 10:32:29 +000081 pitlb(addr);
82 purge_tlb_end(flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083}
84
85/* Certain architectures need to do special things when PTEs
86 * within a page table are directly modified. Thus, the following
87 * hook is made available.
88 */
89#define set_pte(pteptr, pteval) \
90 do{ \
91 *(pteptr) = (pteval); \
92 } while(0)
93
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094#define set_pte_at(mm, addr, ptep, pteval) \
95 do { \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 unsigned long flags; \
David Brazdil0f672f62019-12-10 10:32:29 +000097 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098 set_pte(ptep, pteval); \
David Brazdil0f672f62019-12-10 10:32:29 +000099 purge_tlb_entries(mm, addr); \
100 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101 } while (0)
102
103#endif /* !__ASSEMBLY__ */
104
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105#define pte_ERROR(e) \
106 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
Olivier Deprez157378f2022-04-04 15:47:50 +0200107#if CONFIG_PGTABLE_LEVELS == 3
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108#define pmd_ERROR(e) \
109 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
Olivier Deprez157378f2022-04-04 15:47:50 +0200110#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111#define pgd_ERROR(e) \
112 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
113
114/* This is the size of the initially mapped kernel memory */
115#if defined(CONFIG_64BIT)
116#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
117#else
118#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
119#endif
120#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
121
122#if CONFIG_PGTABLE_LEVELS == 3
123#define PGD_ORDER 1 /* Number of pages per pgd */
124#define PMD_ORDER 1 /* Number of pages per pmd */
David Brazdil0f672f62019-12-10 10:32:29 +0000125#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126#else
127#define PGD_ORDER 1 /* Number of pages per pgd */
David Brazdil0f672f62019-12-10 10:32:29 +0000128#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129#endif
130
131/* Definitions for 3rd level (we use PLD here for Page Lower directory
132 * because PTE_SHIFT is used lower down to mean shift that has to be
133 * done to get usable bits out of the PTE) */
134#define PLD_SHIFT PAGE_SHIFT
135#define PLD_SIZE PAGE_SIZE
136#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
137#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
138
139/* Definitions for 2nd level */
Olivier Deprez157378f2022-04-04 15:47:50 +0200140#if CONFIG_PGTABLE_LEVELS == 3
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
142#define PMD_SIZE (1UL << PMD_SHIFT)
143#define PMD_MASK (~(PMD_SIZE-1))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
Olivier Deprez157378f2022-04-04 15:47:50 +0200145#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146#else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147#define BITS_PER_PMD 0
148#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149
150/* Definitions for 1st level */
Olivier Deprez157378f2022-04-04 15:47:50 +0200151#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
153#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
154#else
155#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
156#endif
157#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
158#define PGDIR_MASK (~(PGDIR_SIZE-1))
159#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
160#define USER_PTRS_PER_PGD PTRS_PER_PGD
161
162#ifdef CONFIG_64BIT
163#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
164#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
165#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
166#else
167#define MAX_ADDRBITS (BITS_PER_LONG)
168#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
169#define SPACEID_SHIFT 0
170#endif
171
172/* This calculates the number of initial pages we need for the initial
173 * page tables */
174#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
175# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
176#else
177# define PT_INITIAL (1) /* all initial PTEs fit into one page */
178#endif
179
180/*
181 * pgd entries used up by user/kernel:
182 */
183
184#define FIRST_USER_ADDRESS 0UL
185
186/* NB: The tlb miss handlers make certain assumptions about the order */
187/* of the following bits, so be careful (One example, bits 25-31 */
188/* are moved together in one instruction). */
189
190#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
191#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
192#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
193#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
194#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
195#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
196#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
197#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
198#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
199#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
200#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
201#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
202
203/* N.B. The bits are defined in terms of a 32 bit word above, so the */
204/* following macro is ok for both 32 and 64 bit. */
205
206#define xlate_pabit(x) (31 - x)
207
208/* this defines the shift to the usable bits in the PTE it is set so
209 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
210 * to zero */
211#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
212
213/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
214#define PFN_PTE_SHIFT 12
215
216#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
217#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
218#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
219#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
220#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
221#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
222#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
223#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
224#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
225#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
226#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
227#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
228#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
229
David Brazdil0f672f62019-12-10 10:32:29 +0000230#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
232#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
233#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
234#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
235#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
236
237/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
238 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
239 * for a few meta-information bits, so we shift the address to be
240 * able to effectively address 40/42/44-bits of physical address space
241 * depending on 4k/16k/64k PAGE_SIZE */
242#define _PxD_PRESENT_BIT 31
243#define _PxD_ATTACHED_BIT 30
244#define _PxD_VALID_BIT 29
245
246#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
247#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
248#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
249#define PxD_FLAG_MASK (0xf)
250#define PxD_FLAG_SHIFT (4)
251#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
252
253#ifndef __ASSEMBLY__
254
David Brazdil0f672f62019-12-10 10:32:29 +0000255#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
256#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257/* Others seem to make this executable, I don't know if that's correct
258 or not. The stack is mapped this way though so this is necessary
259 in the short term - dhd@linuxcare.com, 2000-08-08 */
David Brazdil0f672f62019-12-10 10:32:29 +0000260#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
261#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
262#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263#define PAGE_COPY PAGE_EXECREAD
David Brazdil0f672f62019-12-10 10:32:29 +0000264#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
266#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
267#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
268#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
269#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
David Brazdil0f672f62019-12-10 10:32:29 +0000270#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271
272
273/*
274 * We could have an execute only page using "gateway - promote to priv
275 * level 3", but that is kind of silly. So, the way things are defined
276 * now, we must always have read permission for pages with execute
277 * permission. For the fun of it we'll go ahead and support write only
278 * pages.
279 */
280
281 /*xwr*/
282#define __P000 PAGE_NONE
283#define __P001 PAGE_READONLY
284#define __P010 __P000 /* copy on write */
285#define __P011 __P001 /* copy on write */
286#define __P100 PAGE_EXECREAD
287#define __P101 PAGE_EXECREAD
288#define __P110 __P100 /* copy on write */
289#define __P111 __P101 /* copy on write */
290
291#define __S000 PAGE_NONE
292#define __S001 PAGE_READONLY
293#define __S010 PAGE_WRITEONLY
294#define __S011 PAGE_SHARED
295#define __S100 PAGE_EXECREAD
296#define __S101 PAGE_EXECREAD
297#define __S110 PAGE_RWX
298#define __S111 PAGE_RWX
299
300
301extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
302
303/* initial page tables for 0-8MB for kernel */
304
305extern pte_t pg0[];
306
307/* zero page used for uninitialized stuff */
308
309extern unsigned long *empty_zero_page;
310
311/*
312 * ZERO_PAGE is a global shared page that is always zero: used
313 * for zero-mapped memory areas etc..
314 */
315
316#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
317
318#define pte_none(x) (pte_val(x) == 0)
319#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
320#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
321
322#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
323#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
Olivier Deprez157378f2022-04-04 15:47:50 +0200324#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
325#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
327#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
328
329#if CONFIG_PGTABLE_LEVELS == 3
330/* The first entry of the permanent pmd is not there if it contains
331 * the gateway marker */
332#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
333#else
334#define pmd_none(x) (!pmd_val(x))
335#endif
336#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
337#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
338static inline void pmd_clear(pmd_t *pmd) {
339#if CONFIG_PGTABLE_LEVELS == 3
340 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
341 /* This is the entry pointing to the permanent pmd
342 * attached to the pgd; cannot clear it */
Olivier Deprez157378f2022-04-04 15:47:50 +0200343 set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344 else
345#endif
Olivier Deprez157378f2022-04-04 15:47:50 +0200346 set_pmd(pmd, __pmd(0));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347}
348
349
350
351#if CONFIG_PGTABLE_LEVELS == 3
Olivier Deprez157378f2022-04-04 15:47:50 +0200352#define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
353#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354
355/* For 64 bit we have three level tables */
356
Olivier Deprez157378f2022-04-04 15:47:50 +0200357#define pud_none(x) (!pud_val(x))
358#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
359#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
360static inline void pud_clear(pud_t *pud) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361#if CONFIG_PGTABLE_LEVELS == 3
Olivier Deprez157378f2022-04-04 15:47:50 +0200362 if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
363 /* This is the permanent pmd attached to the pud; cannot
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364 * free it */
365 return;
366#endif
Olivier Deprez157378f2022-04-04 15:47:50 +0200367 set_pud(pud, __pud(0));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000369#endif
370
371/*
372 * The following only work if pte_present() is true.
373 * Undefined behaviour if not..
374 */
375static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
376static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
377static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378
379static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
380static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
381static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
382static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
383static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
384static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385
386/*
387 * Huge pte definitions.
388 */
389#ifdef CONFIG_HUGETLB_PAGE
390#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
391#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
392 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
393#else
394#define pte_huge(pte) (0)
395#define pte_mkhuge(pte) (pte)
396#endif
397
398
399/*
400 * Conversion functions: convert a page and protection to a page entry,
401 * and a page entry and page directory to the page they refer to.
402 */
403#define __mk_pte(addr,pgprot) \
404({ \
405 pte_t __pte; \
406 \
407 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
408 \
409 __pte; \
410})
411
412#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
413
414static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
415{
416 pte_t pte;
417 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
418 return pte;
419}
420
421static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
422{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
423
424/* Permanent address of a page. On parisc we don't have highmem. */
425
426#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
427
428#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
429
Olivier Deprez157378f2022-04-04 15:47:50 +0200430static inline unsigned long pmd_page_vaddr(pmd_t pmd)
431{
432 return ((unsigned long) __va(pmd_address(pmd)));
433}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434
435#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
436#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
437
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438/* Find an entry in the second-level page table.. */
439
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000440extern void paging_init (void);
441
442/* Used for deferring calls to flush_dcache_page() */
443
444#define PG_dcache_dirty PG_arch_1
445
446extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
447
448/* Encode and de-code a swap entry */
449
450#define __swp_type(x) ((x).val & 0x1f)
451#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
452 (((x).val >> 8) & ~0x7) )
453#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
454 ((offset & 0x7) << 6) | \
455 ((offset & ~0x7) << 8) })
456#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
457#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
458
David Brazdil0f672f62019-12-10 10:32:29 +0000459
460static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
461{
462 if (unlikely(pgd == swapper_pg_dir))
463 return &pa_swapper_pg_lock;
464 return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
465}
466
467
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
469{
470 pte_t pte;
471 unsigned long flags;
472
473 if (!pte_young(*ptep))
474 return 0;
475
David Brazdil0f672f62019-12-10 10:32:29 +0000476 spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477 pte = *ptep;
478 if (!pte_young(pte)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000479 spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 return 0;
481 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482 set_pte(ptep, pte_mkold(pte));
David Brazdil0f672f62019-12-10 10:32:29 +0000483 purge_tlb_entries(vma->vm_mm, addr);
484 spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485 return 1;
486}
487
488struct mm_struct;
489static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
490{
491 pte_t old_pte;
492 unsigned long flags;
493
David Brazdil0f672f62019-12-10 10:32:29 +0000494 spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495 old_pte = *ptep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496 set_pte(ptep, __pte(0));
David Brazdil0f672f62019-12-10 10:32:29 +0000497 purge_tlb_entries(mm, addr);
498 spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499
500 return old_pte;
501}
502
503static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
504{
505 unsigned long flags;
David Brazdil0f672f62019-12-10 10:32:29 +0000506 spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000507 set_pte(ptep, pte_wrprotect(*ptep));
David Brazdil0f672f62019-12-10 10:32:29 +0000508 purge_tlb_entries(mm, addr);
509 spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510}
511
512#define pte_same(A,B) (pte_val(A) == pte_val(B))
513
514struct seq_file;
515extern void arch_report_meminfo(struct seq_file *m);
516
517#endif /* !__ASSEMBLY__ */
518
519
520/* TLB page size encoding - see table 3-1 in parisc20.pdf */
521#define _PAGE_SIZE_ENCODING_4K 0
522#define _PAGE_SIZE_ENCODING_16K 1
523#define _PAGE_SIZE_ENCODING_64K 2
524#define _PAGE_SIZE_ENCODING_256K 3
525#define _PAGE_SIZE_ENCODING_1M 4
526#define _PAGE_SIZE_ENCODING_4M 5
527#define _PAGE_SIZE_ENCODING_16M 6
528#define _PAGE_SIZE_ENCODING_64M 7
529
530#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
531# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
532#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
533# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
534#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
535# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
536#endif
537
538
539#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
540
541/* We provide our own get_unmapped_area to provide cache coherency */
542
543#define HAVE_ARCH_UNMAPPED_AREA
544#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
545
546#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
547#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
548#define __HAVE_ARCH_PTEP_SET_WRPROTECT
549#define __HAVE_ARCH_PTE_SAME
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550
551#endif /* _PARISC_PGTABLE_H */