blob: a6482b2ce0eab35eeec1c574a4ea8f0de50a477e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_PGALLOC_H
3#define _ASM_PGALLOC_H
4
5#include <linux/gfp.h>
6#include <linux/mm.h>
7#include <linux/threads.h>
8#include <asm/processor.h>
9#include <asm/fixmap.h>
10
11#include <asm/cache.h>
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013#define __HAVE_ARCH_PMD_ALLOC_ONE
14#define __HAVE_ARCH_PMD_FREE
15#define __HAVE_ARCH_PGD_FREE
16#include <asm-generic/pgalloc.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018/* Allocate the top level pgd (page directory)
19 *
20 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
21 * allocate the first pmd adjacent to the pgd. This means that we can
22 * subtract a constant offset to get to it. The pmd and pgd sizes are
23 * arranged so that a single pmd covers 4GB (giving a full 64-bit
24 * process access to 8TB) so our lookups are effectively L2 for the
25 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
26 * kernel for machines with under 4GB of memory) */
27static inline pgd_t *pgd_alloc(struct mm_struct *mm)
28{
29 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
30 PGD_ALLOC_ORDER);
31 pgd_t *actual_pgd = pgd;
32
33 if (likely(pgd != NULL)) {
34 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
35#if CONFIG_PGTABLE_LEVELS == 3
36 actual_pgd += PTRS_PER_PGD;
37 /* Populate first pmd with allocated memory. We mark it
38 * with PxD_FLAG_ATTACHED as a signal to the system that this
39 * pmd entry may not be cleared. */
Olivier Deprez157378f2022-04-04 15:47:50 +020040 set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
41 PxD_FLAG_VALID |
42 PxD_FLAG_ATTACHED)
43 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
45 * a signal that this pmd may not be freed */
Olivier Deprez157378f2022-04-04 15:47:50 +020046 set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047#endif
48 }
David Brazdil0f672f62019-12-10 10:32:29 +000049 spin_lock_init(pgd_spinlock(actual_pgd));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 return actual_pgd;
51}
52
53static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
54{
55#if CONFIG_PGTABLE_LEVELS == 3
56 pgd -= PTRS_PER_PGD;
57#endif
58 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
59}
60
61#if CONFIG_PGTABLE_LEVELS == 3
62
63/* Three Level Page Table Support for pmd's */
64
Olivier Deprez157378f2022-04-04 15:47:50 +020065static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066{
Olivier Deprez157378f2022-04-04 15:47:50 +020067 set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
68 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069}
70
71static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72{
Olivier Deprez157378f2022-04-04 15:47:50 +020073 return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074}
75
76static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
77{
78 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
79 /*
80 * This is the permanent pmd attached to the pgd;
81 * cannot free it.
82 * Increment the counter to compensate for the decrement
83 * done by generic mm code.
84 */
85 mm_inc_nr_pmds(mm);
86 return;
87 }
88 free_pages((unsigned long)pmd, PMD_ORDER);
89}
90
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000091#endif
92
93static inline void
94pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
95{
96#if CONFIG_PGTABLE_LEVELS == 3
97 /* preserve the gateway marker if this is the beginning of
98 * the permanent pmd */
99 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
Olivier Deprez157378f2022-04-04 15:47:50 +0200100 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
101 PxD_FLAG_VALID |
102 PxD_FLAG_ATTACHED)
103 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 else
105#endif
Olivier Deprez157378f2022-04-04 15:47:50 +0200106 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
107 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108}
109
110#define pmd_populate(mm, pmd, pte_page) \
111 pmd_populate_kernel(mm, pmd, page_address(pte_page))
112#define pmd_pgtable(pmd) pmd_page(pmd)
113
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114#endif