David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * This file contains the routines setting up the linux page tables. |
| 4 | * -- paulus |
| 5 | * |
| 6 | * Derived from arch/ppc/mm/init.c: |
| 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 8 | * |
| 9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 11 | * Copyright (C) 1996 Paul Mackerras |
| 12 | * |
| 13 | * Derived from "arch/i386/mm/init.c" |
| 14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/mm.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/highmem.h> |
| 24 | #include <linux/memblock.h> |
| 25 | #include <linux/slab.h> |
| 26 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | #include <asm/pgalloc.h> |
| 28 | #include <asm/fixmap.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | #include <asm/setup.h> |
| 30 | #include <asm/sections.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 31 | #include <asm/early_ioremap.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 33 | #include <mm/mmu_decl.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | |
| 35 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
| 36 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 37 | static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; |
| 38 | |
| 39 | notrace void __init early_ioremap_init(void) |
| 40 | { |
| 41 | unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); |
| 42 | pte_t *ptep = (pte_t *)early_fixmap_pagetable; |
| 43 | pmd_t *pmdp = pmd_off_k(addr); |
| 44 | |
| 45 | for (; (s32)(FIXADDR_TOP - addr) > 0; |
| 46 | addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) |
| 47 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 48 | |
| 49 | early_ioremap_setup(); |
| 50 | } |
| 51 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 52 | static void __init *early_alloc_pgtable(unsigned long size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 54 | void *ptr = memblock_alloc(size, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | if (!ptr) |
| 57 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 58 | __func__, size, size); |
| 59 | |
| 60 | return ptr; |
| 61 | } |
| 62 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 63 | pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 64 | { |
| 65 | if (pmd_none(*pmdp)) { |
| 66 | pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); |
| 67 | |
| 68 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 70 | return pte_offset_kernel(pmdp, va); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | } |
| 72 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 74 | int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | { |
| 76 | pmd_t *pd; |
| 77 | pte_t *pg; |
| 78 | int err = -ENOMEM; |
| 79 | |
| 80 | /* Use upper 10 bits of VA to index the first level map */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 81 | pd = pmd_off_k(va); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 82 | /* Use middle 10 bits of VA to index the second-level map */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 83 | if (likely(slab_is_available())) |
| 84 | pg = pte_alloc_kernel(pd, va); |
| 85 | else |
| 86 | pg = early_pte_alloc_kernel(pd, va); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | if (pg != 0) { |
| 88 | err = 0; |
| 89 | /* The PTE should never be already set nor present in the |
| 90 | * hash table |
| 91 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); |
| 93 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | } |
| 95 | smp_wmb(); |
| 96 | return err; |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | * Map in a chunk of physical memory starting at start. |
| 101 | */ |
| 102 | static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) |
| 103 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 104 | unsigned long v, s; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | phys_addr_t p; |
| 106 | int ktext; |
| 107 | |
| 108 | s = offset; |
| 109 | v = PAGE_OFFSET + s; |
| 110 | p = memstart_addr + s; |
| 111 | for (; s < top; s += PAGE_SIZE) { |
| 112 | ktext = ((char *)v >= _stext && (char *)v < etext) || |
| 113 | ((char *)v >= _sinittext && (char *)v < _einittext); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 114 | map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); |
| 115 | #ifdef CONFIG_PPC_BOOK3S_32 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | if (ktext) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 117 | hash_preload(&init_mm, v); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | #endif |
| 119 | v += PAGE_SIZE; |
| 120 | p += PAGE_SIZE; |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | void __init mapin_ram(void) |
| 125 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 126 | phys_addr_t base, end; |
| 127 | u64 i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 129 | for_each_mem_range(i, &base, &end) { |
| 130 | phys_addr_t top = min(end, total_lowmem); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 132 | if (base >= top) |
| 133 | continue; |
| 134 | base = mmu_mapin_ram(base, top); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 135 | __mapin_ram_chunk(base, top); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | } |
| 138 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | static int __change_page_attr_noflush(struct page *page, pgprot_t prot) |
| 140 | { |
| 141 | pte_t *kpte; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 142 | unsigned long address; |
| 143 | |
| 144 | BUG_ON(PageHighMem(page)); |
| 145 | address = (unsigned long)page_address(page); |
| 146 | |
| 147 | if (v_block_mapped(address)) |
| 148 | return 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 149 | kpte = virt_to_kpte(address); |
| 150 | if (!kpte) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | return -EINVAL; |
| 152 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 153 | |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | * Change the page attributes of an page in the linear mapping. |
| 159 | * |
| 160 | * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY |
| 161 | */ |
| 162 | static int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
| 163 | { |
| 164 | int i, err = 0; |
| 165 | unsigned long flags; |
| 166 | struct page *start = page; |
| 167 | |
| 168 | local_irq_save(flags); |
| 169 | for (i = 0; i < numpages; i++, page++) { |
| 170 | err = __change_page_attr_noflush(page, prot); |
| 171 | if (err) |
| 172 | break; |
| 173 | } |
| 174 | wmb(); |
| 175 | local_irq_restore(flags); |
| 176 | flush_tlb_kernel_range((unsigned long)page_address(start), |
| 177 | (unsigned long)page_address(page)); |
| 178 | return err; |
| 179 | } |
| 180 | |
| 181 | void mark_initmem_nx(void) |
| 182 | { |
| 183 | struct page *page = virt_to_page(_sinittext); |
| 184 | unsigned long numpages = PFN_UP((unsigned long)_einittext) - |
| 185 | PFN_DOWN((unsigned long)_sinittext); |
| 186 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 187 | if (v_block_mapped((unsigned long)_sinittext)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 188 | mmu_mark_initmem_nx(); |
| 189 | else |
| 190 | change_page_attr(page, numpages, PAGE_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 194 | void mark_rodata_ro(void) |
| 195 | { |
| 196 | struct page *page; |
| 197 | unsigned long numpages; |
| 198 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 199 | if (v_block_mapped((unsigned long)_stext + 1)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 200 | mmu_mark_rodata_ro(); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 201 | ptdump_check_wx(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 202 | return; |
| 203 | } |
| 204 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | page = virt_to_page(_stext); |
| 206 | numpages = PFN_UP((unsigned long)_etext) - |
| 207 | PFN_DOWN((unsigned long)_stext); |
| 208 | |
| 209 | change_page_attr(page, numpages, PAGE_KERNEL_ROX); |
| 210 | /* |
| 211 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
| 212 | * to cover NOTES and EXCEPTION_TABLE. |
| 213 | */ |
| 214 | page = virt_to_page(__start_rodata); |
| 215 | numpages = PFN_UP((unsigned long)__init_begin) - |
| 216 | PFN_DOWN((unsigned long)__start_rodata); |
| 217 | |
| 218 | change_page_attr(page, numpages, PAGE_KERNEL_RO); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 219 | |
| 220 | // mark_initmem_nx() should have already run by now |
| 221 | ptdump_check_wx(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | } |
| 223 | #endif |
| 224 | |
| 225 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 226 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
| 227 | { |
| 228 | if (PageHighMem(page)) |
| 229 | return; |
| 230 | |
| 231 | change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); |
| 232 | } |
| 233 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |