blob: f4c491044882886b5687af1d91f75ba548eaa61c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_PGALLOC_H
3#define _ASM_IA64_PGALLOC_H
4
5/*
6 * This file contains the functions and defines necessary to allocate
7 * page tables.
8 *
9 * This hopefully works with any (fixed) ia-64 page-size, as defined
10 * in <asm/page.h> (currently 8192).
11 *
12 * Copyright (C) 1998-2001 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
15 */
16
17
18#include <linux/compiler.h>
19#include <linux/mm.h>
20#include <linux/page-flags.h>
21#include <linux/threads.h>
David Brazdil0f672f62019-12-10 10:32:29 +000022
23#include <asm-generic/pgalloc.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25#include <asm/mmu_context.h>
26
27static inline pgd_t *pgd_alloc(struct mm_struct *mm)
28{
David Brazdil0f672f62019-12-10 10:32:29 +000029 return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030}
31
32static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33{
David Brazdil0f672f62019-12-10 10:32:29 +000034 free_page((unsigned long)pgd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035}
36
37#if CONFIG_PGTABLE_LEVELS == 4
38static inline void
39pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
40{
41 pgd_val(*pgd_entry) = __pa(pud);
42}
43
44static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
45{
David Brazdil0f672f62019-12-10 10:32:29 +000046 return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047}
48
49static inline void pud_free(struct mm_struct *mm, pud_t *pud)
50{
David Brazdil0f672f62019-12-10 10:32:29 +000051 free_page((unsigned long)pud);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052}
53#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
54#endif /* CONFIG_PGTABLE_LEVELS == 4 */
55
56static inline void
57pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
58{
59 pud_val(*pud_entry) = __pa(pmd);
60}
61
62static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
63{
David Brazdil0f672f62019-12-10 10:32:29 +000064 return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065}
66
67static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
68{
David Brazdil0f672f62019-12-10 10:32:29 +000069 free_page((unsigned long)pmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070}
71
72#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
73
74static inline void
75pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
76{
77 pmd_val(*pmd_entry) = page_to_phys(pte);
78}
79#define pmd_pgtable(pmd) pmd_page(pmd)
80
81static inline void
82pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
83{
84 pmd_val(*pmd_entry) = __pa(pte);
85}
86
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
88
89#endif /* _ASM_IA64_PGALLOC_H */