blob: eb91f1e8562906682d757e5ac8ba2f5d3d3add5f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_PGALLOC_H
3#define _ALPHA_PGALLOC_H
4
5#include <linux/mm.h>
6#include <linux/mmzone.h>
7
David Brazdil0f672f62019-12-10 10:32:29 +00008#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
9
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010/*
11 * Allocate and free page tables. The xxx_kernel() versions are
12 * used to allocate a kernel page table - this turns on ASN bits
13 * if any.
14 */
15
16static inline void
17pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
18{
19 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
20}
21#define pmd_pgtable(pmd) pmd_page(pmd)
22
23static inline void
24pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
25{
26 pmd_set(pmd, pte);
27}
28
29static inline void
30pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
31{
32 pgd_set(pgd, pmd);
33}
34
35extern pgd_t *pgd_alloc(struct mm_struct *mm);
36
37static inline void
38pgd_free(struct mm_struct *mm, pgd_t *pgd)
39{
40 free_page((unsigned long)pgd);
41}
42
43static inline pmd_t *
44pmd_alloc_one(struct mm_struct *mm, unsigned long address)
45{
David Brazdil0f672f62019-12-10 10:32:29 +000046 pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047 return ret;
48}
49
50static inline void
51pmd_free(struct mm_struct *mm, pmd_t *pmd)
52{
53 free_page((unsigned long)pmd);
54}
55
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056#endif /* _ALPHA_PGALLOC_H */