blob: bd4b0656add3016c0b69200db063d8ec5bdadad5 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 by Ralf Baechle
7 */
8#include <linux/init.h>
9#include <linux/mm.h>
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <linux/memblock.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <linux/highmem.h>
12#include <asm/fixmap.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include <asm/pgalloc.h>
David Brazdil0f672f62019-12-10 10:32:29 +000014#include <asm/tlbflush.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015
16void pgd_init(unsigned long page)
17{
18 unsigned long *p = (unsigned long *) page;
19 int i;
20
21 for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
22 p[i + 0] = (unsigned long) invalid_pte_table;
23 p[i + 1] = (unsigned long) invalid_pte_table;
24 p[i + 2] = (unsigned long) invalid_pte_table;
25 p[i + 3] = (unsigned long) invalid_pte_table;
26 p[i + 4] = (unsigned long) invalid_pte_table;
27 p[i + 5] = (unsigned long) invalid_pte_table;
28 p[i + 6] = (unsigned long) invalid_pte_table;
29 p[i + 7] = (unsigned long) invalid_pte_table;
30 }
31}
32
David Brazdil0f672f62019-12-10 10:32:29 +000033#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
34pmd_t mk_pmd(struct page *page, pgprot_t prot)
35{
36 pmd_t pmd;
37
38 pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
39
40 return pmd;
41}
42
43
44void set_pmd_at(struct mm_struct *mm, unsigned long addr,
45 pmd_t *pmdp, pmd_t pmd)
46{
47 *pmdp = pmd;
48 flush_tlb_all();
49}
50#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */
51
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052void __init pagetable_init(void)
53{
54 unsigned long vaddr;
55 pgd_t *pgd_base;
56#ifdef CONFIG_HIGHMEM
57 pgd_t *pgd;
Olivier Deprez157378f2022-04-04 15:47:50 +020058 p4d_t *p4d;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 pud_t *pud;
60 pmd_t *pmd;
61 pte_t *pte;
62#endif
63
64 /* Initialize the entire pgd. */
65 pgd_init((unsigned long)swapper_pg_dir);
66 pgd_init((unsigned long)swapper_pg_dir
67 + sizeof(pgd_t) * USER_PTRS_PER_PGD);
68
69 pgd_base = swapper_pg_dir;
70
71 /*
72 * Fixed mappings:
73 */
74 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
75 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
76
77#ifdef CONFIG_HIGHMEM
78 /*
79 * Permanent kmaps:
80 */
81 vaddr = PKMAP_BASE;
82 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
83
Olivier Deprez157378f2022-04-04 15:47:50 +020084 pgd = swapper_pg_dir + pgd_index(vaddr);
85 p4d = p4d_offset(pgd, vaddr);
86 pud = pud_offset(p4d, vaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087 pmd = pmd_offset(pud, vaddr);
88 pte = pte_offset_kernel(pmd, vaddr);
89 pkmap_page_table = pte;
90#endif
91}