blob: d3a22da4d2c91b91fbeaeee316f4c2e3efd38529 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * include/asm-xtensa/pgalloc.h
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright (C) 2001-2007 Tensilica Inc.
6 */
7
8#ifndef _XTENSA_PGALLOC_H
9#define _XTENSA_PGALLOC_H
10
Olivier Deprez157378f2022-04-04 15:47:50 +020011#ifdef CONFIG_MMU
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/highmem.h>
13#include <linux/slab.h>
14
Olivier Deprez157378f2022-04-04 15:47:50 +020015#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
16#define __HAVE_ARCH_PTE_ALLOC_ONE
17#include <asm-generic/pgalloc.h>
18
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019/*
20 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
21 * inside the pgd, so has no extra memory associated with it.
22 */
23
24#define pmd_populate_kernel(mm, pmdp, ptep) \
25 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
26#define pmd_populate(mm, pmdp, page) \
27 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
28#define pmd_pgtable(pmd) pmd_page(pmd)
29
30static inline pgd_t*
31pgd_alloc(struct mm_struct *mm)
32{
33 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
34}
35
Olivier Deprez157378f2022-04-04 15:47:50 +020036static inline void ptes_clear(pte_t *ptep)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037{
Olivier Deprez157378f2022-04-04 15:47:50 +020038 int i;
39
40 for (i = 0; i < PTRS_PER_PTE; i++)
41 pte_clear(NULL, 0, ptep + i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042}
43
David Brazdil0f672f62019-12-10 10:32:29 +000044static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045{
46 pte_t *ptep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047
Olivier Deprez157378f2022-04-04 15:47:50 +020048 ptep = (pte_t *)__pte_alloc_one_kernel(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049 if (!ptep)
50 return NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +020051 ptes_clear(ptep);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 return ptep;
53}
54
David Brazdil0f672f62019-12-10 10:32:29 +000055static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057 struct page *page;
58
Olivier Deprez157378f2022-04-04 15:47:50 +020059 page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
60 if (!page)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 return NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +020062 ptes_clear(page_address(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063 return page;
64}
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066#define pmd_pgtable(pmd) pmd_page(pmd)
Olivier Deprez157378f2022-04-04 15:47:50 +020067#endif /* CONFIG_MMU */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069#endif /* _XTENSA_PGALLOC_H */