blob: 29aa7859bdee458a7b384e35273d47ba4dfb9a46 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGALLOC_H
3#define _ASM_X86_PGALLOC_H
4
5#include <linux/threads.h>
6#include <linux/mm.h> /* for struct page */
7#include <linux/pagemap.h>
8
David Brazdil0f672f62019-12-10 10:32:29 +00009#define __HAVE_ARCH_PTE_ALLOC_ONE
10#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
11
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
13
David Brazdil0f672f62019-12-10 10:32:29 +000014#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <asm/paravirt.h>
16#else
17#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
18static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
19static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
20static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
21static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
22 unsigned long start, unsigned long count) {}
23static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
24static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
25static inline void paravirt_release_pte(unsigned long pfn) {}
26static inline void paravirt_release_pmd(unsigned long pfn) {}
27static inline void paravirt_release_pud(unsigned long pfn) {}
28static inline void paravirt_release_p4d(unsigned long pfn) {}
29#endif
30
31/*
32 * Flags to use when allocating a user page table page.
33 */
34extern gfp_t __userpte_alloc_gfp;
35
36#ifdef CONFIG_PAGE_TABLE_ISOLATION
37/*
38 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
39 * both 8k in size and 8k-aligned. That lets us just flip bit 12
40 * in a pointer to swap between the two 4k halves.
41 */
42#define PGD_ALLOCATION_ORDER 1
43#else
44#define PGD_ALLOCATION_ORDER 0
45#endif
46
47/*
48 * Allocate and free page tables.
49 */
50extern pgd_t *pgd_alloc(struct mm_struct *);
51extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
52
David Brazdil0f672f62019-12-10 10:32:29 +000053extern pgtable_t pte_alloc_one(struct mm_struct *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054
55extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
56
57static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
58 unsigned long address)
59{
60 ___pte_free_tlb(tlb, pte);
61}
62
63static inline void pmd_populate_kernel(struct mm_struct *mm,
64 pmd_t *pmd, pte_t *pte)
65{
66 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
67 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
68}
69
David Brazdil0f672f62019-12-10 10:32:29 +000070static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
71 pmd_t *pmd, pte_t *pte)
72{
73 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
74 set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
75}
76
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
78 struct page *pte)
79{
80 unsigned long pfn = page_to_pfn(pte);
81
82 paravirt_alloc_pte(mm, pfn);
83 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
84}
85
86#define pmd_pgtable(pmd) pmd_page(pmd)
87
88#if CONFIG_PGTABLE_LEVELS > 2
89static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
90{
91 struct page *page;
92 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
93
94 if (mm == &init_mm)
95 gfp &= ~__GFP_ACCOUNT;
96 page = alloc_pages(gfp, 0);
97 if (!page)
98 return NULL;
99 if (!pgtable_pmd_page_ctor(page)) {
100 __free_pages(page, 0);
101 return NULL;
102 }
103 return (pmd_t *)page_address(page);
104}
105
106static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
107{
108 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
109 pgtable_pmd_page_dtor(virt_to_page(pmd));
110 free_page((unsigned long)pmd);
111}
112
113extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
114
115static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
116 unsigned long address)
117{
118 ___pmd_free_tlb(tlb, pmd);
119}
120
121#ifdef CONFIG_X86_PAE
122extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
123#else /* !CONFIG_X86_PAE */
124static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
125{
126 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
127 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
128}
David Brazdil0f672f62019-12-10 10:32:29 +0000129
130static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
131{
132 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
133 set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
134}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135#endif /* CONFIG_X86_PAE */
136
137#if CONFIG_PGTABLE_LEVELS > 3
138static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
139{
140 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
141 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
142}
143
David Brazdil0f672f62019-12-10 10:32:29 +0000144static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
145{
146 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
147 set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
148}
149
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
151{
152 gfp_t gfp = GFP_KERNEL_ACCOUNT;
153
154 if (mm == &init_mm)
155 gfp &= ~__GFP_ACCOUNT;
156 return (pud_t *)get_zeroed_page(gfp);
157}
158
159static inline void pud_free(struct mm_struct *mm, pud_t *pud)
160{
161 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
162 free_page((unsigned long)pud);
163}
164
165extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
166
167static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
168 unsigned long address)
169{
170 ___pud_free_tlb(tlb, pud);
171}
172
173#if CONFIG_PGTABLE_LEVELS > 4
174static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
175{
176 if (!pgtable_l5_enabled())
177 return;
178 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
179 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
180}
181
David Brazdil0f672f62019-12-10 10:32:29 +0000182static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
183{
184 if (!pgtable_l5_enabled())
185 return;
186 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
187 set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
188}
189
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
191{
192 gfp_t gfp = GFP_KERNEL_ACCOUNT;
193
194 if (mm == &init_mm)
195 gfp &= ~__GFP_ACCOUNT;
196 return (p4d_t *)get_zeroed_page(gfp);
197}
198
199static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
200{
201 if (!pgtable_l5_enabled())
202 return;
203
204 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
205 free_page((unsigned long)p4d);
206}
207
208extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
209
210static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
211 unsigned long address)
212{
213 if (pgtable_l5_enabled())
214 ___p4d_free_tlb(tlb, p4d);
215}
216
217#endif /* CONFIG_PGTABLE_LEVELS > 4 */
218#endif /* CONFIG_PGTABLE_LEVELS > 3 */
219#endif /* CONFIG_PGTABLE_LEVELS > 2 */
220
221#endif /* _ASM_X86_PGALLOC_H */