blob: 139b4050259fab5b6971f3c0d153cb66b288e887 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGALLOC_H
10#define _ASM_PGALLOC_H
11
12#include <linux/highmem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15
Olivier Deprez157378f2022-04-04 15:47:50 +020016#define __HAVE_ARCH_PMD_ALLOC_ONE
17#define __HAVE_ARCH_PUD_ALLOC_ONE
18#include <asm-generic/pgalloc.h>
David Brazdil0f672f62019-12-10 10:32:29 +000019
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
21 pte_t *pte)
22{
23 set_pmd(pmd, __pmd((unsigned long)pte));
24}
25
26static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
27 pgtable_t pte)
28{
29 set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
30}
31#define pmd_pgtable(pmd) pmd_page(pmd)
32
33/*
34 * Initialize a new pmd table with invalid pointers.
35 */
36extern void pmd_init(unsigned long page, unsigned long pagetable);
37
38#ifndef __PAGETABLE_PMD_FOLDED
39
40static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
41{
42 set_pud(pud, __pud((unsigned long)pmd));
43}
44#endif
45
46/*
47 * Initialize a new pgd / pmd table with invalid pointers.
48 */
49extern void pgd_init(unsigned long page);
50extern pgd_t *pgd_alloc(struct mm_struct *mm);
51
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052#define __pte_free_tlb(tlb,pte,address) \
53do { \
David Brazdil0f672f62019-12-10 10:32:29 +000054 pgtable_pte_page_dtor(pte); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 tlb_remove_page((tlb), pte); \
56} while (0)
57
58#ifndef __PAGETABLE_PMD_FOLDED
59
60static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
61{
62 pmd_t *pmd;
Olivier Deprez157378f2022-04-04 15:47:50 +020063 struct page *pg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064
Olivier Deprez157378f2022-04-04 15:47:50 +020065 pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
66 if (!pg)
67 return NULL;
68
69 if (!pgtable_pmd_page_ctor(pg)) {
70 __free_pages(pg, PMD_ORDER);
71 return NULL;
72 }
73
74 pmd = (pmd_t *)page_address(pg);
75 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 return pmd;
77}
78
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
80
81#endif
82
83#ifndef __PAGETABLE_PUD_FOLDED
84
85static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
86{
87 pud_t *pud;
88
89 pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
90 if (pud)
91 pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
92 return pud;
93}
94
Olivier Deprez157378f2022-04-04 15:47:50 +020095static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096{
Olivier Deprez157378f2022-04-04 15:47:50 +020097 set_p4d(p4d, __p4d((unsigned long)pud));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098}
99
100#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
101
102#endif /* __PAGETABLE_PUD_FOLDED */
103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104extern void pagetable_init(void);
105
106#endif /* _ASM_PGALLOC_H */