blob: cf7ce4b5735954bf19e8c0a4a5d25f1a9c76d609 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/slab.h>
4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005static struct kmem_cache *pgd_cachep;
6#if PAGETABLE_LEVELS > 2
7static struct kmem_cache *pmd_cachep;
8#endif
9
10void pgd_ctor(void *x)
11{
12 pgd_t *pgd = x;
13
Olivier Deprez157378f2022-04-04 15:47:50 +020014 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015 memcpy(pgd + USER_PTRS_PER_PGD,
16 swapper_pg_dir + USER_PTRS_PER_PGD,
17 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
18}
19
20void pgtable_cache_init(void)
21{
22 pgd_cachep = kmem_cache_create("pgd_cache",
23 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
24 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
25#if PAGETABLE_LEVELS > 2
26 pmd_cachep = kmem_cache_create("pmd_cache",
27 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
28 PAGE_SIZE, SLAB_PANIC, NULL);
29#endif
30}
31
32pgd_t *pgd_alloc(struct mm_struct *mm)
33{
Olivier Deprez157378f2022-04-04 15:47:50 +020034 return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035}
36
37void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38{
39 kmem_cache_free(pgd_cachep, pgd);
40}
41
42#if PAGETABLE_LEVELS > 2
43void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
44{
45 set_pud(pud, __pud((unsigned long)pmd));
46}
47
48pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
49{
Olivier Deprez157378f2022-04-04 15:47:50 +020050 return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051}
52
53void pmd_free(struct mm_struct *mm, pmd_t *pmd)
54{
55 kmem_cache_free(pmd_cachep, pmd);
56}
57#endif /* PAGETABLE_LEVELS > 2 */