blob: dda55708531166b5f47b19c10404ac0c4f89512c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_PGALLOC_H
3#define _ASM_PGALLOC_H
4
5#include <linux/gfp.h>
6#include <linux/mm.h>
7#include <linux/threads.h>
8#include <asm/processor.h>
9#include <asm/fixmap.h>
10
11#include <asm/cache.h>
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013#define __HAVE_ARCH_PMD_ALLOC_ONE
14#define __HAVE_ARCH_PMD_FREE
15#define __HAVE_ARCH_PGD_FREE
16#include <asm-generic/pgalloc.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017
Olivier Deprez92d4c212022-12-06 15:05:30 +010018/* Allocate the top level pgd (page directory) */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019static inline pgd_t *pgd_alloc(struct mm_struct *mm)
20{
Olivier Deprez92d4c212022-12-06 15:05:30 +010021 pgd_t *pgd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022
Olivier Deprez92d4c212022-12-06 15:05:30 +010023 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
24 if (unlikely(pgd == NULL))
25 return NULL;
26
27 memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
28
29 return pgd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030}
31
32static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33{
Olivier Deprez92d4c212022-12-06 15:05:30 +010034 free_pages((unsigned long)pgd, PGD_ORDER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035}
36
37#if CONFIG_PGTABLE_LEVELS == 3
38
39/* Three Level Page Table Support for pmd's */
40
Olivier Deprez157378f2022-04-04 15:47:50 +020041static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042{
Olivier Deprez157378f2022-04-04 15:47:50 +020043 set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
44 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045}
46
47static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
48{
Olivier Deprez92d4c212022-12-06 15:05:30 +010049 pmd_t *pmd;
50
51 pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
52 if (likely(pmd))
53 memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
54 return pmd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055}
56
57static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
58{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 free_pages((unsigned long)pmd, PMD_ORDER);
60}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061#endif
62
63static inline void
64pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
65{
Olivier Deprez92d4c212022-12-06 15:05:30 +010066 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
67 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068}
69
70#define pmd_populate(mm, pmd, pte_page) \
71 pmd_populate_kernel(mm, pmd, page_address(pte_page))
72#define pmd_pgtable(pmd) pmd_page(pmd)
73
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074#endif