blob: de05466ce50c596e6f9c7b75f9665dca9eda4e93 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/bug.h>
21#include <asm/page.h>
22
23extern pgd_t swapper_pg_dir[];
24extern void paging_init(void);
25
26enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31};
32
33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35static inline void update_page_count(int level, long count)
36{
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39}
40
41struct seq_file;
42void arch_report_meminfo(struct seq_file *m);
43
44/*
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
47 */
48#define update_mmu_cache(vma, address, ptep) do { } while (0)
49#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50
51/*
52 * ZERO_PAGE is a global shared page that is always zero; used
53 * for zero-mapped memory areas etc..
54 */
55
56extern unsigned long empty_zero_page;
57extern unsigned long zero_page_mask;
58
59#define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62#define __HAVE_COLOR_ZERO_PAGE
63
64/* TODO: s390 cannot support io_remap_pfn_range... */
65
66#define FIRST_USER_ADDRESS 0UL
67
68#define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70#define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72#define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74#define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76#define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
79/*
80 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
86 */
87extern unsigned long VMALLOC_START;
88extern unsigned long VMALLOC_END;
89extern struct page *vmemmap;
90
91#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
92
93extern unsigned long MODULES_VADDR;
94extern unsigned long MODULES_END;
95#define MODULES_VADDR MODULES_VADDR
96#define MODULES_END MODULES_END
97#define MODULES_LEN (1UL << 31)
98
99static inline int is_module_addr(void *addr)
100{
101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
102 if (addr < (void *)MODULES_VADDR)
103 return 0;
104 if (addr > (void *)MODULES_END)
105 return 0;
106 return 1;
107}
108
109/*
110 * A 64 bit pagetable entry of S390 has following format:
111 * | PFRA |0IPC| OS |
112 * 0000000000111111111122222222223333333333444444444455555555556666
113 * 0123456789012345678901234567890123456789012345678901234567890123
114 *
115 * I Page-Invalid Bit: Page is not available for address-translation
116 * P Page-Protection Bit: Store access not possible for page
117 * C Change-bit override: HW is not required to set change bit
118 *
119 * A 64 bit segmenttable entry of S390 has following format:
120 * | P-table origin | TT
121 * 0000000000111111111122222222223333333333444444444455555555556666
122 * 0123456789012345678901234567890123456789012345678901234567890123
123 *
124 * I Segment-Invalid Bit: Segment is not available for address-translation
125 * C Common-Segment Bit: Segment is not private (PoP 3-30)
126 * P Page-Protection Bit: Store access not possible for page
127 * TT Type 00
128 *
129 * A 64 bit region table entry of S390 has following format:
130 * | S-table origin | TF TTTL
131 * 0000000000111111111122222222223333333333444444444455555555556666
132 * 0123456789012345678901234567890123456789012345678901234567890123
133 *
134 * I Segment-Invalid Bit: Segment is not available for address-translation
135 * TT Type 01
136 * TF
137 * TL Table length
138 *
139 * The 64 bit regiontable origin of S390 has following format:
140 * | region table origon | DTTL
141 * 0000000000111111111122222222223333333333444444444455555555556666
142 * 0123456789012345678901234567890123456789012345678901234567890123
143 *
144 * X Space-Switch event:
145 * G Segment-Invalid Bit:
146 * P Private-Space Bit:
147 * S Storage-Alteration:
148 * R Real space
149 * TL Table-Length:
150 *
151 * A storage key has the following format:
152 * | ACC |F|R|C|0|
153 * 0 3 4 5 6 7
154 * ACC: access key
155 * F : fetch protection bit
156 * R : referenced bit
157 * C : changed bit
158 */
159
160/* Hardware bits in the page table entry */
161#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
162#define _PAGE_PROTECT 0x200 /* HW read-only bit */
163#define _PAGE_INVALID 0x400 /* HW invalid bit */
164#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
165
166/* Software bits in the page table entry */
167#define _PAGE_PRESENT 0x001 /* SW pte present bit */
168#define _PAGE_YOUNG 0x004 /* SW pte young bit */
169#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
170#define _PAGE_READ 0x010 /* SW pte read bit */
171#define _PAGE_WRITE 0x020 /* SW pte write bit */
172#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
173#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
174
175#ifdef CONFIG_MEM_SOFT_DIRTY
176#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
177#else
178#define _PAGE_SOFT_DIRTY 0x000
179#endif
180
181/* Set of bits not changed in pte_modify */
182#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
184
185/*
186 * handle_pte_fault uses pte_present and pte_none to find out the pte type
187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188 * distinguish present from not-present ptes. It is changed only with the page
189 * table lock held.
190 *
191 * The following table gives the different possible bit combinations for
192 * the pte hardware and software bits in the last 12 bits of a pte
193 * (. unassigned bit, x don't care, t swap type):
194 *
195 * 842100000000
196 * 000084210000
197 * 000000008421
198 * .IR.uswrdy.p
199 * empty .10.00000000
200 * swap .11..ttttt.0
201 * prot-none, clean, old .11.xx0000.1
202 * prot-none, clean, young .11.xx0001.1
203 * prot-none, dirty, old .11.xx0010.1
204 * prot-none, dirty, young .11.xx0011.1
205 * read-only, clean, old .11.xx0100.1
206 * read-only, clean, young .01.xx0101.1
207 * read-only, dirty, old .11.xx0110.1
208 * read-only, dirty, young .01.xx0111.1
209 * read-write, clean, old .11.xx1100.1
210 * read-write, clean, young .01.xx1101.1
211 * read-write, dirty, old .10.xx1110.1
212 * read-write, dirty, young .00.xx1111.1
213 * HW-bits: R read-only, I invalid
214 * SW-bits: p present, y young, d dirty, r read, w write, s special,
215 * u unused, l large
216 *
217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
220 */
221
222/* Bits in the segment/region table address-space-control-element */
223#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
224#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
225#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
226#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
227#define _ASCE_REAL_SPACE 0x20 /* real space control */
228#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
229#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
230#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
231#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
232#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
233#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
234
235/* Bits in the region table entry */
236#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
237#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
238#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
239#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
240#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
241#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
242#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
245#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
246
247#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
253
254#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
255#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
256#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
257#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
258#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
259#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
260
261#ifdef CONFIG_MEM_SOFT_DIRTY
262#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
263#else
264#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
265#endif
266
267#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
269
270/* Bits in the segment table entry */
271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
275#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
276#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
277#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
278#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
279#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
280
281#define _SEGMENT_ENTRY (0)
282#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
283
284#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
285#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
286#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
287#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
288#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
289
290#ifdef CONFIG_MEM_SOFT_DIRTY
291#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
292#else
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
294#endif
295
296#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
297#define _PAGE_ENTRIES 256 /* number of page table entries */
298
299#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
301
302#define _REGION1_SHIFT 53
303#define _REGION2_SHIFT 42
304#define _REGION3_SHIFT 31
305#define _SEGMENT_SHIFT 20
306
307#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
308#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
309#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
310#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
311#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
312
313#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
314#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
315#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
316#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
317
318#define _REGION1_MASK (~(_REGION1_SIZE - 1))
319#define _REGION2_MASK (~(_REGION2_SIZE - 1))
320#define _REGION3_MASK (~(_REGION3_SIZE - 1))
321#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
322
323#define PMD_SHIFT _SEGMENT_SHIFT
324#define PUD_SHIFT _REGION3_SHIFT
325#define P4D_SHIFT _REGION2_SHIFT
326#define PGDIR_SHIFT _REGION1_SHIFT
327
328#define PMD_SIZE _SEGMENT_SIZE
329#define PUD_SIZE _REGION3_SIZE
330#define P4D_SIZE _REGION2_SIZE
331#define PGDIR_SIZE _REGION1_SIZE
332
333#define PMD_MASK _SEGMENT_MASK
334#define PUD_MASK _REGION3_MASK
335#define P4D_MASK _REGION2_MASK
336#define PGDIR_MASK _REGION1_MASK
337
338#define PTRS_PER_PTE _PAGE_ENTRIES
339#define PTRS_PER_PMD _CRST_ENTRIES
340#define PTRS_PER_PUD _CRST_ENTRIES
341#define PTRS_PER_P4D _CRST_ENTRIES
342#define PTRS_PER_PGD _CRST_ENTRIES
343
344/*
345 * Segment table and region3 table entry encoding
346 * (R = read-only, I = invalid, y = young bit):
347 * dy..R...I...wr
348 * prot-none, clean, old 00..1...1...00
349 * prot-none, clean, young 01..1...1...00
350 * prot-none, dirty, old 10..1...1...00
351 * prot-none, dirty, young 11..1...1...00
352 * read-only, clean, old 00..1...1...01
353 * read-only, clean, young 01..1...0...01
354 * read-only, dirty, old 10..1...1...01
355 * read-only, dirty, young 11..1...0...01
356 * read-write, clean, old 00..1...1...11
357 * read-write, clean, young 01..1...0...11
358 * read-write, dirty, old 10..0...1...11
359 * read-write, dirty, young 11..0...0...11
360 * The segment table origin is used to distinguish empty (origin==0) from
361 * read-write, old segment table entries (origin!=0)
362 * HW-bits: R read-only, I invalid
363 * SW-bits: y young, d dirty, r read, w write
364 */
365
366/* Page status table bits for virtualization */
367#define PGSTE_ACC_BITS 0xf000000000000000UL
368#define PGSTE_FP_BIT 0x0800000000000000UL
369#define PGSTE_PCL_BIT 0x0080000000000000UL
370#define PGSTE_HR_BIT 0x0040000000000000UL
371#define PGSTE_HC_BIT 0x0020000000000000UL
372#define PGSTE_GR_BIT 0x0004000000000000UL
373#define PGSTE_GC_BIT 0x0002000000000000UL
374#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
375#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
376#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
377
378/* Guest Page State used for virtualization */
379#define _PGSTE_GPS_ZERO 0x0000000080000000UL
380#define _PGSTE_GPS_NODAT 0x0000000040000000UL
381#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
382#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
383#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
384#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
385#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
386
387/*
388 * A user page table pointer has the space-switch-event bit, the
389 * private-space-control bit and the storage-alteration-event-control
390 * bit set. A kernel page table pointer doesn't need them.
391 */
392#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
393 _ASCE_ALT_EVENT)
394
395/*
396 * Page protection definitions.
397 */
398#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
399#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
400 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
401#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407
408#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
410#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
413 _PAGE_PROTECT | _PAGE_NOEXEC)
414#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY)
416
417/*
418 * On s390 the page table entry has an invalid bit and a read-only bit.
419 * Read permission implies execute permission and write permission
420 * implies read permission.
421 */
422 /*xwr*/
423#define __P000 PAGE_NONE
424#define __P001 PAGE_RO
425#define __P010 PAGE_RO
426#define __P011 PAGE_RO
427#define __P100 PAGE_RX
428#define __P101 PAGE_RX
429#define __P110 PAGE_RX
430#define __P111 PAGE_RX
431
432#define __S000 PAGE_NONE
433#define __S001 PAGE_RO
434#define __S010 PAGE_RW
435#define __S011 PAGE_RW
436#define __S100 PAGE_RX
437#define __S101 PAGE_RX
438#define __S110 PAGE_RWX
439#define __S111 PAGE_RWX
440
441/*
442 * Segment entry (large page) protection definitions.
443 */
444#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
445 _SEGMENT_ENTRY_PROTECT)
446#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
447 _SEGMENT_ENTRY_READ | \
448 _SEGMENT_ENTRY_NOEXEC)
449#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
450 _SEGMENT_ENTRY_READ)
451#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_WRITE | \
453 _SEGMENT_ENTRY_NOEXEC)
454#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
455 _SEGMENT_ENTRY_WRITE)
456#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
457 _SEGMENT_ENTRY_LARGE | \
458 _SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE | \
460 _SEGMENT_ENTRY_YOUNG | \
461 _SEGMENT_ENTRY_DIRTY | \
462 _SEGMENT_ENTRY_NOEXEC)
463#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
464 _SEGMENT_ENTRY_LARGE | \
465 _SEGMENT_ENTRY_READ | \
466 _SEGMENT_ENTRY_YOUNG | \
467 _SEGMENT_ENTRY_PROTECT | \
468 _SEGMENT_ENTRY_NOEXEC)
469
470/*
471 * Region3 entry (large page) protection definitions.
472 */
473
474#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
475 _REGION3_ENTRY_LARGE | \
476 _REGION3_ENTRY_READ | \
477 _REGION3_ENTRY_WRITE | \
478 _REGION3_ENTRY_YOUNG | \
479 _REGION3_ENTRY_DIRTY | \
480 _REGION_ENTRY_NOEXEC)
481#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
482 _REGION3_ENTRY_LARGE | \
483 _REGION3_ENTRY_READ | \
484 _REGION3_ENTRY_YOUNG | \
485 _REGION_ENTRY_PROTECT | \
486 _REGION_ENTRY_NOEXEC)
487
488static inline bool mm_p4d_folded(struct mm_struct *mm)
489{
490 return mm->context.asce_limit <= _REGION1_SIZE;
491}
492#define mm_p4d_folded(mm) mm_p4d_folded(mm)
493
494static inline bool mm_pud_folded(struct mm_struct *mm)
495{
496 return mm->context.asce_limit <= _REGION2_SIZE;
497}
498#define mm_pud_folded(mm) mm_pud_folded(mm)
499
500static inline bool mm_pmd_folded(struct mm_struct *mm)
501{
502 return mm->context.asce_limit <= _REGION3_SIZE;
503}
504#define mm_pmd_folded(mm) mm_pmd_folded(mm)
505
506static inline int mm_has_pgste(struct mm_struct *mm)
507{
508#ifdef CONFIG_PGSTE
509 if (unlikely(mm->context.has_pgste))
510 return 1;
511#endif
512 return 0;
513}
514
515static inline int mm_alloc_pgste(struct mm_struct *mm)
516{
517#ifdef CONFIG_PGSTE
518 if (unlikely(mm->context.alloc_pgste))
519 return 1;
520#endif
521 return 0;
522}
523
524/*
525 * In the case that a guest uses storage keys
526 * faults should no longer be backed by zero pages
527 */
528#define mm_forbids_zeropage mm_has_pgste
529static inline int mm_uses_skeys(struct mm_struct *mm)
530{
531#ifdef CONFIG_PGSTE
532 if (mm->context.uses_skeys)
533 return 1;
534#endif
535 return 0;
536}
537
538static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
539{
540 register unsigned long reg2 asm("2") = old;
541 register unsigned long reg3 asm("3") = new;
542 unsigned long address = (unsigned long)ptr | 1;
543
544 asm volatile(
545 " csp %0,%3"
546 : "+d" (reg2), "+m" (*ptr)
547 : "d" (reg3), "d" (address)
548 : "cc");
549}
550
551static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
552{
553 register unsigned long reg2 asm("2") = old;
554 register unsigned long reg3 asm("3") = new;
555 unsigned long address = (unsigned long)ptr | 1;
556
557 asm volatile(
558 " .insn rre,0xb98a0000,%0,%3"
559 : "+d" (reg2), "+m" (*ptr)
560 : "d" (reg3), "d" (address)
561 : "cc");
562}
563
564#define CRDTE_DTT_PAGE 0x00UL
565#define CRDTE_DTT_SEGMENT 0x10UL
566#define CRDTE_DTT_REGION3 0x14UL
567#define CRDTE_DTT_REGION2 0x18UL
568#define CRDTE_DTT_REGION1 0x1cUL
569
570static inline void crdte(unsigned long old, unsigned long new,
571 unsigned long table, unsigned long dtt,
572 unsigned long address, unsigned long asce)
573{
574 register unsigned long reg2 asm("2") = old;
575 register unsigned long reg3 asm("3") = new;
576 register unsigned long reg4 asm("4") = table | dtt;
577 register unsigned long reg5 asm("5") = address;
578
579 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
580 : "+d" (reg2)
581 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
582 : "memory", "cc");
583}
584
585/*
586 * pgd/p4d/pud/pmd/pte query functions
587 */
588static inline int pgd_folded(pgd_t pgd)
589{
590 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
591}
592
593static inline int pgd_present(pgd_t pgd)
594{
595 if (pgd_folded(pgd))
596 return 1;
597 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
598}
599
600static inline int pgd_none(pgd_t pgd)
601{
602 if (pgd_folded(pgd))
603 return 0;
604 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
605}
606
607static inline int pgd_bad(pgd_t pgd)
608{
609 /*
610 * With dynamic page table levels the pgd can be a region table
611 * entry or a segment table entry. Check for the bit that are
612 * invalid for either table entry.
613 */
614 unsigned long mask =
615 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
616 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
617 return (pgd_val(pgd) & mask) != 0;
618}
619
620static inline int p4d_folded(p4d_t p4d)
621{
622 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
623}
624
625static inline int p4d_present(p4d_t p4d)
626{
627 if (p4d_folded(p4d))
628 return 1;
629 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
630}
631
632static inline int p4d_none(p4d_t p4d)
633{
634 if (p4d_folded(p4d))
635 return 0;
636 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
637}
638
639static inline unsigned long p4d_pfn(p4d_t p4d)
640{
641 unsigned long origin_mask;
642
643 origin_mask = _REGION_ENTRY_ORIGIN;
644 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
645}
646
647static inline int pud_folded(pud_t pud)
648{
649 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
650}
651
652static inline int pud_present(pud_t pud)
653{
654 if (pud_folded(pud))
655 return 1;
656 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
657}
658
659static inline int pud_none(pud_t pud)
660{
661 if (pud_folded(pud))
662 return 0;
663 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
664}
665
666static inline int pud_large(pud_t pud)
667{
668 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
669 return 0;
670 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
671}
672
673static inline unsigned long pud_pfn(pud_t pud)
674{
675 unsigned long origin_mask;
676
677 origin_mask = _REGION_ENTRY_ORIGIN;
678 if (pud_large(pud))
679 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
680 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
681}
682
683static inline int pmd_large(pmd_t pmd)
684{
685 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
686}
687
688static inline int pmd_bad(pmd_t pmd)
689{
690 if (pmd_large(pmd))
691 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
692 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
693}
694
695static inline int pud_bad(pud_t pud)
696{
697 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
698 return pmd_bad(__pmd(pud_val(pud)));
699 if (pud_large(pud))
700 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
701 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
702}
703
704static inline int p4d_bad(p4d_t p4d)
705{
706 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
707 return pud_bad(__pud(p4d_val(p4d)));
708 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
709}
710
711static inline int pmd_present(pmd_t pmd)
712{
713 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
714}
715
716static inline int pmd_none(pmd_t pmd)
717{
718 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
719}
720
721static inline unsigned long pmd_pfn(pmd_t pmd)
722{
723 unsigned long origin_mask;
724
725 origin_mask = _SEGMENT_ENTRY_ORIGIN;
726 if (pmd_large(pmd))
727 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
728 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
729}
730
731#define pmd_write pmd_write
732static inline int pmd_write(pmd_t pmd)
733{
734 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
735}
736
737static inline int pmd_dirty(pmd_t pmd)
738{
739 int dirty = 1;
740 if (pmd_large(pmd))
741 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
742 return dirty;
743}
744
745static inline int pmd_young(pmd_t pmd)
746{
747 int young = 1;
748 if (pmd_large(pmd))
749 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
750 return young;
751}
752
753static inline int pte_present(pte_t pte)
754{
755 /* Bit pattern: (pte & 0x001) == 0x001 */
756 return (pte_val(pte) & _PAGE_PRESENT) != 0;
757}
758
759static inline int pte_none(pte_t pte)
760{
761 /* Bit pattern: pte == 0x400 */
762 return pte_val(pte) == _PAGE_INVALID;
763}
764
765static inline int pte_swap(pte_t pte)
766{
767 /* Bit pattern: (pte & 0x201) == 0x200 */
768 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
769 == _PAGE_PROTECT;
770}
771
772static inline int pte_special(pte_t pte)
773{
774 return (pte_val(pte) & _PAGE_SPECIAL);
775}
776
777#define __HAVE_ARCH_PTE_SAME
778static inline int pte_same(pte_t a, pte_t b)
779{
780 return pte_val(a) == pte_val(b);
781}
782
783#ifdef CONFIG_NUMA_BALANCING
784static inline int pte_protnone(pte_t pte)
785{
786 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
787}
788
789static inline int pmd_protnone(pmd_t pmd)
790{
791 /* pmd_large(pmd) implies pmd_present(pmd) */
792 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
793}
794#endif
795
796static inline int pte_soft_dirty(pte_t pte)
797{
798 return pte_val(pte) & _PAGE_SOFT_DIRTY;
799}
800#define pte_swp_soft_dirty pte_soft_dirty
801
802static inline pte_t pte_mksoft_dirty(pte_t pte)
803{
804 pte_val(pte) |= _PAGE_SOFT_DIRTY;
805 return pte;
806}
807#define pte_swp_mksoft_dirty pte_mksoft_dirty
808
809static inline pte_t pte_clear_soft_dirty(pte_t pte)
810{
811 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
812 return pte;
813}
814#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
815
816static inline int pmd_soft_dirty(pmd_t pmd)
817{
818 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
819}
820
821static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
822{
823 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
824 return pmd;
825}
826
827static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
828{
829 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
830 return pmd;
831}
832
833/*
834 * query functions pte_write/pte_dirty/pte_young only work if
835 * pte_present() is true. Undefined behaviour if not..
836 */
837static inline int pte_write(pte_t pte)
838{
839 return (pte_val(pte) & _PAGE_WRITE) != 0;
840}
841
842static inline int pte_dirty(pte_t pte)
843{
844 return (pte_val(pte) & _PAGE_DIRTY) != 0;
845}
846
847static inline int pte_young(pte_t pte)
848{
849 return (pte_val(pte) & _PAGE_YOUNG) != 0;
850}
851
852#define __HAVE_ARCH_PTE_UNUSED
853static inline int pte_unused(pte_t pte)
854{
855 return pte_val(pte) & _PAGE_UNUSED;
856}
857
858/*
859 * pgd/pmd/pte modification functions
860 */
861
862static inline void pgd_clear(pgd_t *pgd)
863{
864 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
865 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
866}
867
868static inline void p4d_clear(p4d_t *p4d)
869{
870 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
871 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
872}
873
874static inline void pud_clear(pud_t *pud)
875{
876 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
877 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
878}
879
880static inline void pmd_clear(pmd_t *pmdp)
881{
882 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
883}
884
885static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
886{
887 pte_val(*ptep) = _PAGE_INVALID;
888}
889
890/*
891 * The following pte modification functions only work if
892 * pte_present() is true. Undefined behaviour if not..
893 */
894static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
895{
896 pte_val(pte) &= _PAGE_CHG_MASK;
897 pte_val(pte) |= pgprot_val(newprot);
898 /*
899 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
900 * has the invalid bit set, clear it again for readable, young pages
901 */
902 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
903 pte_val(pte) &= ~_PAGE_INVALID;
904 /*
905 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
906 * protection bit set, clear it again for writable, dirty pages
907 */
908 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
909 pte_val(pte) &= ~_PAGE_PROTECT;
910 return pte;
911}
912
913static inline pte_t pte_wrprotect(pte_t pte)
914{
915 pte_val(pte) &= ~_PAGE_WRITE;
916 pte_val(pte) |= _PAGE_PROTECT;
917 return pte;
918}
919
920static inline pte_t pte_mkwrite(pte_t pte)
921{
922 pte_val(pte) |= _PAGE_WRITE;
923 if (pte_val(pte) & _PAGE_DIRTY)
924 pte_val(pte) &= ~_PAGE_PROTECT;
925 return pte;
926}
927
928static inline pte_t pte_mkclean(pte_t pte)
929{
930 pte_val(pte) &= ~_PAGE_DIRTY;
931 pte_val(pte) |= _PAGE_PROTECT;
932 return pte;
933}
934
935static inline pte_t pte_mkdirty(pte_t pte)
936{
937 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
938 if (pte_val(pte) & _PAGE_WRITE)
939 pte_val(pte) &= ~_PAGE_PROTECT;
940 return pte;
941}
942
943static inline pte_t pte_mkold(pte_t pte)
944{
945 pte_val(pte) &= ~_PAGE_YOUNG;
946 pte_val(pte) |= _PAGE_INVALID;
947 return pte;
948}
949
950static inline pte_t pte_mkyoung(pte_t pte)
951{
952 pte_val(pte) |= _PAGE_YOUNG;
953 if (pte_val(pte) & _PAGE_READ)
954 pte_val(pte) &= ~_PAGE_INVALID;
955 return pte;
956}
957
958static inline pte_t pte_mkspecial(pte_t pte)
959{
960 pte_val(pte) |= _PAGE_SPECIAL;
961 return pte;
962}
963
964#ifdef CONFIG_HUGETLB_PAGE
965static inline pte_t pte_mkhuge(pte_t pte)
966{
967 pte_val(pte) |= _PAGE_LARGE;
968 return pte;
969}
970#endif
971
972#define IPTE_GLOBAL 0
973#define IPTE_LOCAL 1
974
975#define IPTE_NODAT 0x400
976#define IPTE_GUEST_ASCE 0x800
977
978static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
979 unsigned long opt, unsigned long asce,
980 int local)
981{
982 unsigned long pto = (unsigned long) ptep;
983
984 if (__builtin_constant_p(opt) && opt == 0) {
985 /* Invalidation + TLB flush for the pte */
986 asm volatile(
987 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
988 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
989 [m4] "i" (local));
990 return;
991 }
992
993 /* Invalidate ptes with options + TLB flush of the ptes */
994 opt = opt | (asce & _ASCE_ORIGIN);
995 asm volatile(
996 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
997 : [r2] "+a" (address), [r3] "+a" (opt)
998 : [r1] "a" (pto), [m4] "i" (local) : "memory");
999}
1000
1001static inline void __ptep_ipte_range(unsigned long address, int nr,
1002 pte_t *ptep, int local)
1003{
1004 unsigned long pto = (unsigned long) ptep;
1005
1006 /* Invalidate a range of ptes + TLB flush of the ptes */
1007 do {
1008 asm volatile(
1009 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1010 : [r2] "+a" (address), [r3] "+a" (nr)
1011 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1012 } while (nr != 255);
1013}
1014
1015/*
1016 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1017 * both clear the TLB for the unmapped pte. The reason is that
1018 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1019 * to modify an active pte. The sequence is
1020 * 1) ptep_get_and_clear
1021 * 2) set_pte_at
1022 * 3) flush_tlb_range
1023 * On s390 the tlb needs to get flushed with the modification of the pte
1024 * if the pte is active. The only way how this can be implemented is to
1025 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1026 * is a nop.
1027 */
1028pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1029pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1030
1031#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1032static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1033 unsigned long addr, pte_t *ptep)
1034{
1035 pte_t pte = *ptep;
1036
1037 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1038 return pte_young(pte);
1039}
1040
1041#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1042static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1043 unsigned long address, pte_t *ptep)
1044{
1045 return ptep_test_and_clear_young(vma, address, ptep);
1046}
1047
1048#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1049static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1050 unsigned long addr, pte_t *ptep)
1051{
1052 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1053}
1054
1055#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1056pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
1057void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058
1059#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1060static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1061 unsigned long addr, pte_t *ptep)
1062{
1063 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1064}
1065
1066/*
1067 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1068 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1069 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1070 * cannot be accessed while the batched unmap is running. In this case
1071 * full==1 and a simple pte_clear is enough. See tlb.h.
1072 */
1073#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1074static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1075 unsigned long addr,
1076 pte_t *ptep, int full)
1077{
1078 if (full) {
1079 pte_t pte = *ptep;
1080 *ptep = __pte(_PAGE_INVALID);
1081 return pte;
1082 }
1083 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1084}
1085
1086#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1087static inline void ptep_set_wrprotect(struct mm_struct *mm,
1088 unsigned long addr, pte_t *ptep)
1089{
1090 pte_t pte = *ptep;
1091
1092 if (pte_write(pte))
1093 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1094}
1095
1096#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1097static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1098 unsigned long addr, pte_t *ptep,
1099 pte_t entry, int dirty)
1100{
1101 if (pte_same(*ptep, entry))
1102 return 0;
1103 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1104 return 1;
1105}
1106
1107/*
1108 * Additional functions to handle KVM guest page tables
1109 */
1110void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1111 pte_t *ptep, pte_t entry);
1112void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1113void ptep_notify(struct mm_struct *mm, unsigned long addr,
1114 pte_t *ptep, unsigned long bits);
1115int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1116 pte_t *ptep, int prot, unsigned long bit);
1117void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1118 pte_t *ptep , int reset);
1119void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1120int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1121 pte_t *sptep, pte_t *tptep, pte_t pte);
1122void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1123
1124bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1125 pte_t *ptep);
1126int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1127 unsigned char key, bool nq);
1128int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1129 unsigned char key, unsigned char *oldkey,
1130 bool nq, bool mr, bool mc);
1131int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1132int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1133 unsigned char *key);
1134
1135int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1136 unsigned long bits, unsigned long value);
1137int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1138int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1139 unsigned long *oldpte, unsigned long *oldpgste);
1140void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1141void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1142void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1143void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1144
1145/*
1146 * Certain architectures need to do special things when PTEs
1147 * within a page table are directly modified. Thus, the following
1148 * hook is made available.
1149 */
1150static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1151 pte_t *ptep, pte_t entry)
1152{
1153 if (!MACHINE_HAS_NX)
1154 pte_val(entry) &= ~_PAGE_NOEXEC;
1155 if (pte_present(entry))
1156 pte_val(entry) &= ~_PAGE_UNUSED;
1157 if (mm_has_pgste(mm))
1158 ptep_set_pte_at(mm, addr, ptep, entry);
1159 else
1160 *ptep = entry;
1161}
1162
1163/*
1164 * Conversion functions: convert a page and protection to a page entry,
1165 * and a page entry and page directory to the page they refer to.
1166 */
1167static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1168{
1169 pte_t __pte;
1170 pte_val(__pte) = physpage + pgprot_val(pgprot);
1171 return pte_mkyoung(__pte);
1172}
1173
1174static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1175{
1176 unsigned long physpage = page_to_phys(page);
1177 pte_t __pte = mk_pte_phys(physpage, pgprot);
1178
1179 if (pte_write(__pte) && PageDirty(page))
1180 __pte = pte_mkdirty(__pte);
1181 return __pte;
1182}
1183
1184#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1185#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1186#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1187#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1188#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1189
1190#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1191#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1192
1193#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1194#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1195#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1196#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1197
1198static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1199{
1200 p4d_t *p4d = (p4d_t *) pgd;
1201
1202 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1203 p4d = (p4d_t *) pgd_deref(*pgd);
1204 return p4d + p4d_index(address);
1205}
1206
1207static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1208{
1209 pud_t *pud = (pud_t *) p4d;
1210
1211 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1212 pud = (pud_t *) p4d_deref(*p4d);
1213 return pud + pud_index(address);
1214}
1215
1216static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1217{
1218 pmd_t *pmd = (pmd_t *) pud;
1219
1220 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1221 pmd = (pmd_t *) pud_deref(*pud);
1222 return pmd + pmd_index(address);
1223}
1224
1225#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1226#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1227#define pte_page(x) pfn_to_page(pte_pfn(x))
1228
1229#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1230#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1231#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1232
1233/* Find an entry in the lowest level page table.. */
1234#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1235#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1236#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1237#define pte_unmap(pte) do { } while (0)
1238
1239static inline pmd_t pmd_wrprotect(pmd_t pmd)
1240{
1241 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1242 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1243 return pmd;
1244}
1245
1246static inline pmd_t pmd_mkwrite(pmd_t pmd)
1247{
1248 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1249 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1250 return pmd;
1251 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1252 return pmd;
1253}
1254
1255static inline pmd_t pmd_mkclean(pmd_t pmd)
1256{
1257 if (pmd_large(pmd)) {
1258 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1259 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1260 }
1261 return pmd;
1262}
1263
1264static inline pmd_t pmd_mkdirty(pmd_t pmd)
1265{
1266 if (pmd_large(pmd)) {
1267 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1268 _SEGMENT_ENTRY_SOFT_DIRTY;
1269 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1270 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1271 }
1272 return pmd;
1273}
1274
1275static inline pud_t pud_wrprotect(pud_t pud)
1276{
1277 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1278 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1279 return pud;
1280}
1281
1282static inline pud_t pud_mkwrite(pud_t pud)
1283{
1284 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1285 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1286 return pud;
1287 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1288 return pud;
1289}
1290
1291static inline pud_t pud_mkclean(pud_t pud)
1292{
1293 if (pud_large(pud)) {
1294 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1295 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1296 }
1297 return pud;
1298}
1299
1300static inline pud_t pud_mkdirty(pud_t pud)
1301{
1302 if (pud_large(pud)) {
1303 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1304 _REGION3_ENTRY_SOFT_DIRTY;
1305 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1306 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1307 }
1308 return pud;
1309}
1310
1311#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1312static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1313{
1314 /*
1315 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1316 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1317 */
1318 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1319 return pgprot_val(SEGMENT_NONE);
1320 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1321 return pgprot_val(SEGMENT_RO);
1322 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1323 return pgprot_val(SEGMENT_RX);
1324 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1325 return pgprot_val(SEGMENT_RW);
1326 return pgprot_val(SEGMENT_RWX);
1327}
1328
1329static inline pmd_t pmd_mkyoung(pmd_t pmd)
1330{
1331 if (pmd_large(pmd)) {
1332 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1333 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1334 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1335 }
1336 return pmd;
1337}
1338
1339static inline pmd_t pmd_mkold(pmd_t pmd)
1340{
1341 if (pmd_large(pmd)) {
1342 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1343 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1344 }
1345 return pmd;
1346}
1347
1348static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1349{
1350 if (pmd_large(pmd)) {
1351 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1352 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1353 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1354 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1355 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1356 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1357 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1358 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1359 return pmd;
1360 }
1361 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1362 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1363 return pmd;
1364}
1365
1366static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1367{
1368 pmd_t __pmd;
1369 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1370 return __pmd;
1371}
1372
1373#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1374
1375static inline void __pmdp_csp(pmd_t *pmdp)
1376{
1377 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1378 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1379}
1380
1381#define IDTE_GLOBAL 0
1382#define IDTE_LOCAL 1
1383
1384#define IDTE_PTOA 0x0800
1385#define IDTE_NODAT 0x1000
1386#define IDTE_GUEST_ASCE 0x2000
1387
1388static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1389 unsigned long opt, unsigned long asce,
1390 int local)
1391{
1392 unsigned long sto;
1393
1394 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1395 if (__builtin_constant_p(opt) && opt == 0) {
1396 /* flush without guest asce */
1397 asm volatile(
1398 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1399 : "+m" (*pmdp)
1400 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1401 [m4] "i" (local)
1402 : "cc" );
1403 } else {
1404 /* flush with guest asce */
1405 asm volatile(
1406 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1407 : "+m" (*pmdp)
1408 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1409 [r3] "a" (asce), [m4] "i" (local)
1410 : "cc" );
1411 }
1412}
1413
1414static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1415 unsigned long opt, unsigned long asce,
1416 int local)
1417{
1418 unsigned long r3o;
1419
1420 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1421 r3o |= _ASCE_TYPE_REGION3;
1422 if (__builtin_constant_p(opt) && opt == 0) {
1423 /* flush without guest asce */
1424 asm volatile(
1425 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1426 : "+m" (*pudp)
1427 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1428 [m4] "i" (local)
1429 : "cc");
1430 } else {
1431 /* flush with guest asce */
1432 asm volatile(
1433 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1434 : "+m" (*pudp)
1435 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1436 [r3] "a" (asce), [m4] "i" (local)
1437 : "cc" );
1438 }
1439}
1440
1441pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1442pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1443pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1444
1445#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1446
1447#define __HAVE_ARCH_PGTABLE_DEPOSIT
1448void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1449 pgtable_t pgtable);
1450
1451#define __HAVE_ARCH_PGTABLE_WITHDRAW
1452pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1453
1454#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1455static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1456 unsigned long addr, pmd_t *pmdp,
1457 pmd_t entry, int dirty)
1458{
1459 VM_BUG_ON(addr & ~HPAGE_MASK);
1460
1461 entry = pmd_mkyoung(entry);
1462 if (dirty)
1463 entry = pmd_mkdirty(entry);
1464 if (pmd_val(*pmdp) == pmd_val(entry))
1465 return 0;
1466 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1467 return 1;
1468}
1469
1470#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1471static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1472 unsigned long addr, pmd_t *pmdp)
1473{
1474 pmd_t pmd = *pmdp;
1475
1476 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1477 return pmd_young(pmd);
1478}
1479
1480#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1481static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1482 unsigned long addr, pmd_t *pmdp)
1483{
1484 VM_BUG_ON(addr & ~HPAGE_MASK);
1485 return pmdp_test_and_clear_young(vma, addr, pmdp);
1486}
1487
1488static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1489 pmd_t *pmdp, pmd_t entry)
1490{
1491 if (!MACHINE_HAS_NX)
1492 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1493 *pmdp = entry;
1494}
1495
1496static inline pmd_t pmd_mkhuge(pmd_t pmd)
1497{
1498 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1499 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1501 return pmd;
1502}
1503
1504#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1505static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1506 unsigned long addr, pmd_t *pmdp)
1507{
1508 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1509}
1510
1511#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1512static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1513 unsigned long addr,
1514 pmd_t *pmdp, int full)
1515{
1516 if (full) {
1517 pmd_t pmd = *pmdp;
1518 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1519 return pmd;
1520 }
1521 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1522}
1523
1524#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1525static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1526 unsigned long addr, pmd_t *pmdp)
1527{
1528 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1529}
1530
1531#define __HAVE_ARCH_PMDP_INVALIDATE
1532static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1533 unsigned long addr, pmd_t *pmdp)
1534{
1535 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1536
1537 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1538}
1539
1540#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1541static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1542 unsigned long addr, pmd_t *pmdp)
1543{
1544 pmd_t pmd = *pmdp;
1545
1546 if (pmd_write(pmd))
1547 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1548}
1549
1550static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1551 unsigned long address,
1552 pmd_t *pmdp)
1553{
1554 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1555}
1556#define pmdp_collapse_flush pmdp_collapse_flush
1557
1558#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1559#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1560
1561static inline int pmd_trans_huge(pmd_t pmd)
1562{
1563 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1564}
1565
1566#define has_transparent_hugepage has_transparent_hugepage
1567static inline int has_transparent_hugepage(void)
1568{
1569 return MACHINE_HAS_EDAT1 ? 1 : 0;
1570}
1571#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1572
1573/*
1574 * 64 bit swap entry format:
1575 * A page-table entry has some bits we have to treat in a special way.
1576 * Bits 52 and bit 55 have to be zero, otherwise a specification
1577 * exception will occur instead of a page translation exception. The
1578 * specification exception has the bad habit not to store necessary
1579 * information in the lowcore.
1580 * Bits 54 and 63 are used to indicate the page type.
1581 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1582 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1583 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1584 * for the offset.
1585 * | offset |01100|type |00|
1586 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1587 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1588 */
1589
1590#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1591#define __SWP_OFFSET_SHIFT 12
1592#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1593#define __SWP_TYPE_SHIFT 2
1594
1595static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1596{
1597 pte_t pte;
1598
1599 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1600 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1601 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1602 return pte;
1603}
1604
1605static inline unsigned long __swp_type(swp_entry_t entry)
1606{
1607 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1608}
1609
1610static inline unsigned long __swp_offset(swp_entry_t entry)
1611{
1612 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1613}
1614
1615static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1616{
1617 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1618}
1619
1620#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1621#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1622
1623#define kern_addr_valid(addr) (1)
1624
1625extern int vmem_add_mapping(unsigned long start, unsigned long size);
1626extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1627extern int s390_enable_sie(void);
1628extern int s390_enable_skey(void);
1629extern void s390_reset_cmma(struct mm_struct *mm);
1630
1631/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1632#define HAVE_ARCH_UNMAPPED_AREA
1633#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1634
1635/*
1636 * No page table caches to initialise
1637 */
1638static inline void pgtable_cache_init(void) { }
1639static inline void check_pgt_cache(void) { }
1640
1641#include <asm-generic/pgtable.h>
1642
1643#endif /* _S390_PAGE_H */