blob: ff0860b2b21ab34e9c2c69197fbffe0de939c36c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H
4
5#include <asm-generic/4level-fixup.h>
6
7#include <asm/fixmap.h>
8
9#ifndef __ASSEMBLY__
10/*
11 * we simulate an x86-style page table for the linux mm code
12 */
13
14#include <linux/bitops.h>
15#include <linux/spinlock.h>
16#include <linux/mm_types.h>
17#include <asm/processor.h>
18#include <asm/cache.h>
19
20extern spinlock_t pa_tlb_lock;
21
22/*
23 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
24 * memory. For the return value to be meaningful, ADDR must be >=
25 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
26 * require a hash-, or multi-level tree-lookup or something of that
27 * sort) but it guarantees to return TRUE only if accessing the page
28 * at that address does not cause an error. Note that there may be
29 * addresses for which kern_addr_valid() returns FALSE even though an
30 * access would not cause an error (e.g., this is typically true for
31 * memory mapped I/O regions.
32 *
33 * XXX Need to implement this for parisc.
34 */
35#define kern_addr_valid(addr) (1)
36
37/* Purge data and instruction TLB entries. Must be called holding
38 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
39 * machines since the purge must be broadcast to all CPUs.
40 */
41
42static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
43{
44 mtsp(mm->context, 1);
45 pdtlb(addr);
46 if (unlikely(split_tlb))
47 pitlb(addr);
48}
49
50/* Certain architectures need to do special things when PTEs
51 * within a page table are directly modified. Thus, the following
52 * hook is made available.
53 */
54#define set_pte(pteptr, pteval) \
55 do{ \
56 *(pteptr) = (pteval); \
57 } while(0)
58
59#define pte_inserted(x) \
60 ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
61 == (_PAGE_PRESENT|_PAGE_ACCESSED))
62
63#define set_pte_at(mm, addr, ptep, pteval) \
64 do { \
65 pte_t old_pte; \
66 unsigned long flags; \
67 spin_lock_irqsave(&pa_tlb_lock, flags); \
68 old_pte = *ptep; \
69 if (pte_inserted(old_pte)) \
70 purge_tlb_entries(mm, addr); \
71 set_pte(ptep, pteval); \
72 spin_unlock_irqrestore(&pa_tlb_lock, flags); \
73 } while (0)
74
75#endif /* !__ASSEMBLY__ */
76
77#include <asm/page.h>
78
79#define pte_ERROR(e) \
80 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
81#define pmd_ERROR(e) \
82 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
83#define pgd_ERROR(e) \
84 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
85
86/* This is the size of the initially mapped kernel memory */
87#if defined(CONFIG_64BIT)
88#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
89#else
90#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
91#endif
92#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
93
94#if CONFIG_PGTABLE_LEVELS == 3
95#define PGD_ORDER 1 /* Number of pages per pgd */
96#define PMD_ORDER 1 /* Number of pages per pmd */
97#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
98#else
99#define PGD_ORDER 1 /* Number of pages per pgd */
100#define PGD_ALLOC_ORDER PGD_ORDER
101#endif
102
103/* Definitions for 3rd level (we use PLD here for Page Lower directory
104 * because PTE_SHIFT is used lower down to mean shift that has to be
105 * done to get usable bits out of the PTE) */
106#define PLD_SHIFT PAGE_SHIFT
107#define PLD_SIZE PAGE_SIZE
108#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
109#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
110
111/* Definitions for 2nd level */
112#define pgtable_cache_init() do { } while (0)
113
114#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
115#define PMD_SIZE (1UL << PMD_SHIFT)
116#define PMD_MASK (~(PMD_SIZE-1))
117#if CONFIG_PGTABLE_LEVELS == 3
118#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
119#else
120#define __PAGETABLE_PMD_FOLDED 1
121#define BITS_PER_PMD 0
122#endif
123#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
124
125/* Definitions for 1st level */
126#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
127#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
128#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
129#else
130#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
131#endif
132#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
133#define PGDIR_MASK (~(PGDIR_SIZE-1))
134#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
135#define USER_PTRS_PER_PGD PTRS_PER_PGD
136
137#ifdef CONFIG_64BIT
138#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
139#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
140#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
141#else
142#define MAX_ADDRBITS (BITS_PER_LONG)
143#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
144#define SPACEID_SHIFT 0
145#endif
146
147/* This calculates the number of initial pages we need for the initial
148 * page tables */
149#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
150# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
151#else
152# define PT_INITIAL (1) /* all initial PTEs fit into one page */
153#endif
154
155/*
156 * pgd entries used up by user/kernel:
157 */
158
159#define FIRST_USER_ADDRESS 0UL
160
161/* NB: The tlb miss handlers make certain assumptions about the order */
162/* of the following bits, so be careful (One example, bits 25-31 */
163/* are moved together in one instruction). */
164
165#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
166#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
167#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
168#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
169#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
170#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
171#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
172#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
173#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
174#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
175#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
176#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
177
178/* N.B. The bits are defined in terms of a 32 bit word above, so the */
179/* following macro is ok for both 32 and 64 bit. */
180
181#define xlate_pabit(x) (31 - x)
182
183/* this defines the shift to the usable bits in the PTE it is set so
184 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
185 * to zero */
186#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
187
188/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
189#define PFN_PTE_SHIFT 12
190
191#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
192#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
193#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
194#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
195#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
196#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
197#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
198#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
199#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
200#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
201#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
202#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
203#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
204
205#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
206#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
207#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
208#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
209#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
210#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
211
212/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
213 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
214 * for a few meta-information bits, so we shift the address to be
215 * able to effectively address 40/42/44-bits of physical address space
216 * depending on 4k/16k/64k PAGE_SIZE */
217#define _PxD_PRESENT_BIT 31
218#define _PxD_ATTACHED_BIT 30
219#define _PxD_VALID_BIT 29
220
221#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
222#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
223#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
224#define PxD_FLAG_MASK (0xf)
225#define PxD_FLAG_SHIFT (4)
226#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
227
228#ifndef __ASSEMBLY__
229
230#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
231#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
232/* Others seem to make this executable, I don't know if that's correct
233 or not. The stack is mapped this way though so this is necessary
234 in the short term - dhd@linuxcare.com, 2000-08-08 */
235#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
236#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
237#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
238#define PAGE_COPY PAGE_EXECREAD
239#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
240#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
241#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
242#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
243#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
244#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
245#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
246
247
248/*
249 * We could have an execute only page using "gateway - promote to priv
250 * level 3", but that is kind of silly. So, the way things are defined
251 * now, we must always have read permission for pages with execute
252 * permission. For the fun of it we'll go ahead and support write only
253 * pages.
254 */
255
256 /*xwr*/
257#define __P000 PAGE_NONE
258#define __P001 PAGE_READONLY
259#define __P010 __P000 /* copy on write */
260#define __P011 __P001 /* copy on write */
261#define __P100 PAGE_EXECREAD
262#define __P101 PAGE_EXECREAD
263#define __P110 __P100 /* copy on write */
264#define __P111 __P101 /* copy on write */
265
266#define __S000 PAGE_NONE
267#define __S001 PAGE_READONLY
268#define __S010 PAGE_WRITEONLY
269#define __S011 PAGE_SHARED
270#define __S100 PAGE_EXECREAD
271#define __S101 PAGE_EXECREAD
272#define __S110 PAGE_RWX
273#define __S111 PAGE_RWX
274
275
276extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
277
278/* initial page tables for 0-8MB for kernel */
279
280extern pte_t pg0[];
281
282/* zero page used for uninitialized stuff */
283
284extern unsigned long *empty_zero_page;
285
286/*
287 * ZERO_PAGE is a global shared page that is always zero: used
288 * for zero-mapped memory areas etc..
289 */
290
291#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
292
293#define pte_none(x) (pte_val(x) == 0)
294#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
295#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
296
297#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
298#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
299#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
300#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
301
302#if CONFIG_PGTABLE_LEVELS == 3
303/* The first entry of the permanent pmd is not there if it contains
304 * the gateway marker */
305#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
306#else
307#define pmd_none(x) (!pmd_val(x))
308#endif
309#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
310#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
311static inline void pmd_clear(pmd_t *pmd) {
312#if CONFIG_PGTABLE_LEVELS == 3
313 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
314 /* This is the entry pointing to the permanent pmd
315 * attached to the pgd; cannot clear it */
316 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
317 else
318#endif
319 __pmd_val_set(*pmd, 0);
320}
321
322
323
324#if CONFIG_PGTABLE_LEVELS == 3
325#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
326#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
327
328/* For 64 bit we have three level tables */
329
330#define pgd_none(x) (!pgd_val(x))
331#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
332#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
333static inline void pgd_clear(pgd_t *pgd) {
334#if CONFIG_PGTABLE_LEVELS == 3
335 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
336 /* This is the permanent pmd attached to the pgd; cannot
337 * free it */
338 return;
339#endif
340 __pgd_val_set(*pgd, 0);
341}
342#else
343/*
344 * The "pgd_xxx()" functions here are trivial for a folded two-level
345 * setup: the pgd is never bad, and a pmd always exists (as it's folded
346 * into the pgd entry)
347 */
348static inline int pgd_none(pgd_t pgd) { return 0; }
349static inline int pgd_bad(pgd_t pgd) { return 0; }
350static inline int pgd_present(pgd_t pgd) { return 1; }
351static inline void pgd_clear(pgd_t * pgdp) { }
352#endif
353
354/*
355 * The following only work if pte_present() is true.
356 * Undefined behaviour if not..
357 */
358static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
359static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
360static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
361static inline int pte_special(pte_t pte) { return 0; }
362
363static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
364static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
365static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
366static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
367static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
368static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
369static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
370
371/*
372 * Huge pte definitions.
373 */
374#ifdef CONFIG_HUGETLB_PAGE
375#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
376#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
377 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
378#else
379#define pte_huge(pte) (0)
380#define pte_mkhuge(pte) (pte)
381#endif
382
383
384/*
385 * Conversion functions: convert a page and protection to a page entry,
386 * and a page entry and page directory to the page they refer to.
387 */
388#define __mk_pte(addr,pgprot) \
389({ \
390 pte_t __pte; \
391 \
392 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
393 \
394 __pte; \
395})
396
397#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
398
399static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
400{
401 pte_t pte;
402 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
403 return pte;
404}
405
406static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
407{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
408
409/* Permanent address of a page. On parisc we don't have highmem. */
410
411#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
412
413#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
414
415#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
416
417#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
418#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
419
420#define pgd_index(address) ((address) >> PGDIR_SHIFT)
421
422/* to find an entry in a page-table-directory */
423#define pgd_offset(mm, address) \
424((mm)->pgd + ((address) >> PGDIR_SHIFT))
425
426/* to find an entry in a kernel page-table-directory */
427#define pgd_offset_k(address) pgd_offset(&init_mm, address)
428
429/* Find an entry in the second-level page table.. */
430
431#if CONFIG_PGTABLE_LEVELS == 3
432#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
433#define pmd_offset(dir,address) \
434((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
435#else
436#define pmd_offset(dir,addr) ((pmd_t *) dir)
437#endif
438
439/* Find an entry in the third-level page table.. */
440#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
441#define pte_offset_kernel(pmd, address) \
442 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
443#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
444#define pte_unmap(pte) do { } while (0)
445
446#define pte_unmap(pte) do { } while (0)
447#define pte_unmap_nested(pte) do { } while (0)
448
449extern void paging_init (void);
450
451/* Used for deferring calls to flush_dcache_page() */
452
453#define PG_dcache_dirty PG_arch_1
454
455extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
456
457/* Encode and de-code a swap entry */
458
459#define __swp_type(x) ((x).val & 0x1f)
460#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
461 (((x).val >> 8) & ~0x7) )
462#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
463 ((offset & 0x7) << 6) | \
464 ((offset & ~0x7) << 8) })
465#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
466#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
467
468static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
469{
470 pte_t pte;
471 unsigned long flags;
472
473 if (!pte_young(*ptep))
474 return 0;
475
476 spin_lock_irqsave(&pa_tlb_lock, flags);
477 pte = *ptep;
478 if (!pte_young(pte)) {
479 spin_unlock_irqrestore(&pa_tlb_lock, flags);
480 return 0;
481 }
482 purge_tlb_entries(vma->vm_mm, addr);
483 set_pte(ptep, pte_mkold(pte));
484 spin_unlock_irqrestore(&pa_tlb_lock, flags);
485 return 1;
486}
487
488struct mm_struct;
489static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
490{
491 pte_t old_pte;
492 unsigned long flags;
493
494 spin_lock_irqsave(&pa_tlb_lock, flags);
495 old_pte = *ptep;
496 if (pte_inserted(old_pte))
497 purge_tlb_entries(mm, addr);
498 set_pte(ptep, __pte(0));
499 spin_unlock_irqrestore(&pa_tlb_lock, flags);
500
501 return old_pte;
502}
503
504static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
505{
506 unsigned long flags;
507 spin_lock_irqsave(&pa_tlb_lock, flags);
508 purge_tlb_entries(mm, addr);
509 set_pte(ptep, pte_wrprotect(*ptep));
510 spin_unlock_irqrestore(&pa_tlb_lock, flags);
511}
512
513#define pte_same(A,B) (pte_val(A) == pte_val(B))
514
515struct seq_file;
516extern void arch_report_meminfo(struct seq_file *m);
517
518#endif /* !__ASSEMBLY__ */
519
520
521/* TLB page size encoding - see table 3-1 in parisc20.pdf */
522#define _PAGE_SIZE_ENCODING_4K 0
523#define _PAGE_SIZE_ENCODING_16K 1
524#define _PAGE_SIZE_ENCODING_64K 2
525#define _PAGE_SIZE_ENCODING_256K 3
526#define _PAGE_SIZE_ENCODING_1M 4
527#define _PAGE_SIZE_ENCODING_4M 5
528#define _PAGE_SIZE_ENCODING_16M 6
529#define _PAGE_SIZE_ENCODING_64M 7
530
531#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
532# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
533#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
534# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
535#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
536# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
537#endif
538
539
540#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
541
542/* We provide our own get_unmapped_area to provide cache coherency */
543
544#define HAVE_ARCH_UNMAPPED_AREA
545#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
546
547#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
548#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
549#define __HAVE_ARCH_PTEP_SET_WRPROTECT
550#define __HAVE_ARCH_PTE_SAME
551#include <asm-generic/pgtable.h>
552
553#endif /* _PARISC_PGTABLE_H */