blob: 77606c4acd58d0bfddd52c82ad2c4778ff3d2c4a [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgalloc.h"
9 * Copyright (C) 1994 Linus Torvalds
10 */
11
12#ifndef _S390_PGALLOC_H
13#define _S390_PGALLOC_H
14
15#include <linux/threads.h>
16#include <linux/string.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define CRST_ALLOC_ORDER 2
21
22unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *);
24
25unsigned long *page_table_alloc(struct mm_struct *);
26struct page *page_table_alloc_pgste(struct mm_struct *mm);
27void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
29void page_table_free_pgste(struct page *page);
30extern int page_table_allocate_pgste;
31
32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
33{
34 memset64((u64 *)crst, entry, _CRST_ENTRIES);
35}
36
37static inline unsigned long pgd_entry_type(struct mm_struct *mm)
38{
39 if (mm_pmd_folded(mm))
40 return _SEGMENT_ENTRY_EMPTY;
41 if (mm_pud_folded(mm))
42 return _REGION3_ENTRY_EMPTY;
43 if (mm_p4d_folded(mm))
44 return _REGION2_ENTRY_EMPTY;
45 return _REGION1_ENTRY_EMPTY;
46}
47
48int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
49void crst_table_downgrade(struct mm_struct *);
50
51static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
52{
53 unsigned long *table = crst_table_alloc(mm);
54
55 if (table)
56 crst_table_init(table, _REGION2_ENTRY_EMPTY);
57 return (p4d_t *) table;
58}
Olivier Deprez0e641232021-09-23 10:07:05 +020059
60static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
61{
62 if (!mm_p4d_folded(mm))
63 crst_table_free(mm, (unsigned long *) p4d);
64}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065
66static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
67{
68 unsigned long *table = crst_table_alloc(mm);
69 if (table)
70 crst_table_init(table, _REGION3_ENTRY_EMPTY);
71 return (pud_t *) table;
72}
Olivier Deprez0e641232021-09-23 10:07:05 +020073
74static inline void pud_free(struct mm_struct *mm, pud_t *pud)
75{
76 if (!mm_pud_folded(mm))
77 crst_table_free(mm, (unsigned long *) pud);
78}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079
80static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
81{
82 unsigned long *table = crst_table_alloc(mm);
83
84 if (!table)
85 return NULL;
86 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
87 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
88 crst_table_free(mm, table);
89 return NULL;
90 }
91 return (pmd_t *) table;
92}
93
94static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
95{
Olivier Deprez0e641232021-09-23 10:07:05 +020096 if (mm_pmd_folded(mm))
97 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098 pgtable_pmd_page_dtor(virt_to_page(pmd));
99 crst_table_free(mm, (unsigned long *) pmd);
100}
101
102static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
103{
104 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
105}
106
107static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
108{
109 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
110}
111
112static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
113{
114 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
115}
116
117static inline pgd_t *pgd_alloc(struct mm_struct *mm)
118{
119 unsigned long *table = crst_table_alloc(mm);
120
121 if (!table)
122 return NULL;
123 if (mm->context.asce_limit == _REGION3_SIZE) {
124 /* Forking a compat process with 2 page table levels */
125 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
126 crst_table_free(mm, table);
127 return NULL;
128 }
129 }
130 return (pgd_t *) table;
131}
132
133static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
134{
135 if (mm->context.asce_limit == _REGION3_SIZE)
136 pgtable_pmd_page_dtor(virt_to_page(pgd));
137 crst_table_free(mm, (unsigned long *) pgd);
138}
139
140static inline void pmd_populate(struct mm_struct *mm,
141 pmd_t *pmd, pgtable_t pte)
142{
143 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
144}
145
146#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
147
148#define pmd_pgtable(pmd) \
149 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
150
151/*
152 * page table entry allocation/free routines.
153 */
David Brazdil0f672f62019-12-10 10:32:29 +0000154#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
155#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156
157#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
158#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
159
160extern void rcu_table_freelist_finish(void);
161
162void vmem_map_init(void);
163void *vmem_crst_alloc(unsigned long val);
164pte_t *vmem_pte_alloc(void);
165
166unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
167void base_asce_free(unsigned long asce);
168
169#endif /* _S390_PGALLOC_H */