David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * This file contains the routines for initializing the MMU |
| 4 | * on the 8xx series of chips. |
| 5 | * -- christophe |
| 6 | * |
| 7 | * Derived from arch/powerpc/mm/40x_mmu.c: |
| 8 | */ |
| 9 | |
| 10 | #include <linux/memblock.h> |
| 11 | #include <linux/mmu_context.h> |
| 12 | #include <asm/fixmap.h> |
| 13 | #include <asm/code-patching.h> |
| 14 | |
| 15 | #include <mm/mmu_decl.h> |
| 16 | |
| 17 | #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT) |
| 18 | |
| 19 | extern int __map_without_ltlbs; |
| 20 | |
| 21 | static unsigned long block_mapped_ram; |
| 22 | |
| 23 | /* |
| 24 | * Return PA for this VA if it is in an area mapped with LTLBs. |
| 25 | * Otherwise, returns 0 |
| 26 | */ |
| 27 | phys_addr_t v_block_mapped(unsigned long va) |
| 28 | { |
| 29 | unsigned long p = PHYS_IMMR_BASE; |
| 30 | |
| 31 | if (__map_without_ltlbs) |
| 32 | return 0; |
| 33 | if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) |
| 34 | return p + va - VIRT_IMMR_BASE; |
| 35 | if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) |
| 36 | return __pa(va); |
| 37 | return 0; |
| 38 | } |
| 39 | |
| 40 | /* |
| 41 | * Return VA for a given PA mapped with LTLBs or 0 if not mapped |
| 42 | */ |
| 43 | unsigned long p_block_mapped(phys_addr_t pa) |
| 44 | { |
| 45 | unsigned long p = PHYS_IMMR_BASE; |
| 46 | |
| 47 | if (__map_without_ltlbs) |
| 48 | return 0; |
| 49 | if (pa >= p && pa < p + IMMR_SIZE) |
| 50 | return VIRT_IMMR_BASE + pa - p; |
| 51 | if (pa < block_mapped_ram) |
| 52 | return (unsigned long)__va(pa); |
| 53 | return 0; |
| 54 | } |
| 55 | |
| 56 | #define LARGE_PAGE_SIZE_8M (1<<23) |
| 57 | |
| 58 | /* |
| 59 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. |
| 60 | */ |
| 61 | void __init MMU_init_hw(void) |
| 62 | { |
| 63 | /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ |
| 64 | if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) { |
| 65 | unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; |
| 66 | unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; |
| 67 | int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28; |
| 68 | unsigned long addr = 0; |
| 69 | unsigned long mem = total_lowmem; |
| 70 | |
| 71 | for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { |
| 72 | mtspr(SPRN_MD_CTR, ctr | (i << 8)); |
| 73 | mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); |
| 74 | mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); |
| 75 | mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); |
| 76 | addr += LARGE_PAGE_SIZE_8M; |
| 77 | mem -= LARGE_PAGE_SIZE_8M; |
| 78 | } |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | static void __init mmu_mapin_immr(void) |
| 83 | { |
| 84 | unsigned long p = PHYS_IMMR_BASE; |
| 85 | unsigned long v = VIRT_IMMR_BASE; |
| 86 | int offset; |
| 87 | |
| 88 | for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) |
| 89 | map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); |
| 90 | } |
| 91 | |
| 92 | static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped) |
| 93 | { |
| 94 | modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16); |
| 95 | } |
| 96 | |
| 97 | static void mmu_patch_addis(s32 *site, long simm) |
| 98 | { |
| 99 | unsigned int instr = *(unsigned int *)patch_site_addr(site); |
| 100 | |
| 101 | instr &= 0xffff0000; |
| 102 | instr |= ((unsigned long)simm) >> 16; |
| 103 | patch_instruction_site(site, instr); |
| 104 | } |
| 105 | |
| 106 | unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) |
| 107 | { |
| 108 | unsigned long mapped; |
| 109 | |
| 110 | if (__map_without_ltlbs) { |
| 111 | mapped = 0; |
| 112 | mmu_mapin_immr(); |
| 113 | if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) |
| 114 | patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); |
| 115 | if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) |
| 116 | mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); |
| 117 | } else { |
| 118 | mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); |
| 119 | if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) |
| 120 | mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, |
| 121 | _ALIGN(__pa(_einittext), 8 << 20)); |
| 122 | } |
| 123 | |
| 124 | mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); |
| 125 | mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped); |
| 126 | |
| 127 | /* If the size of RAM is not an exact power of two, we may not |
| 128 | * have covered RAM in its entirety with 8 MiB |
| 129 | * pages. Consequently, restrict the top end of RAM currently |
| 130 | * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" |
| 131 | * coverage with normal-sized pages (or other reasons) do not |
| 132 | * attempt to allocate outside the allowed range. |
| 133 | */ |
| 134 | if (mapped) |
| 135 | memblock_set_current_limit(mapped); |
| 136 | |
| 137 | block_mapped_ram = mapped; |
| 138 | |
| 139 | return mapped; |
| 140 | } |
| 141 | |
| 142 | void mmu_mark_initmem_nx(void) |
| 143 | { |
| 144 | if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23) |
| 145 | mmu_patch_addis(&patch__itlbmiss_linmem_top8, |
| 146 | -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1))); |
| 147 | if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) |
| 148 | mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext)); |
| 149 | } |
| 150 | |
| 151 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 152 | void mmu_mark_rodata_ro(void) |
| 153 | { |
| 154 | if (CONFIG_DATA_SHIFT < 23) |
| 155 | mmu_patch_addis(&patch__dtlbmiss_romem_top8, |
| 156 | -__pa(((unsigned long)_sinittext) & |
| 157 | ~(LARGE_PAGE_SIZE_8M - 1))); |
| 158 | mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); |
| 159 | } |
| 160 | #endif |
| 161 | |
| 162 | void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 163 | phys_addr_t first_memblock_size) |
| 164 | { |
| 165 | /* We don't currently support the first MEMBLOCK not mapping 0 |
| 166 | * physical on those processors |
| 167 | */ |
| 168 | BUG_ON(first_memblock_base != 0); |
| 169 | |
| 170 | /* 8xx can only access 32MB at the moment */ |
| 171 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000)); |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * Set up to use a given MMU context. |
| 176 | * id is context number, pgd is PGD pointer. |
| 177 | * |
| 178 | * We place the physical address of the new task page directory loaded |
| 179 | * into the MMU base register, and set the ASID compare register with |
| 180 | * the new "context." |
| 181 | */ |
| 182 | void set_context(unsigned long id, pgd_t *pgd) |
| 183 | { |
| 184 | s16 offset = (s16)(__pa(swapper_pg_dir)); |
| 185 | |
| 186 | /* Context switch the PTE pointer for the Abatron BDI2000. |
| 187 | * The PGDIR is passed as second argument. |
| 188 | */ |
| 189 | if (IS_ENABLED(CONFIG_BDI_SWITCH)) |
| 190 | abatron_pteptrs[1] = pgd; |
| 191 | |
| 192 | /* Register M_TWB will contain base address of level 1 table minus the |
| 193 | * lower part of the kernel PGDIR base address, so that all accesses to |
| 194 | * level 1 table are done relative to lower part of kernel PGDIR base |
| 195 | * address. |
| 196 | */ |
| 197 | mtspr(SPRN_M_TWB, __pa(pgd) - offset); |
| 198 | |
| 199 | /* Update context */ |
| 200 | mtspr(SPRN_M_CASID, id - 1); |
| 201 | /* sync */ |
| 202 | mb(); |
| 203 | } |
| 204 | |
| 205 | void flush_instruction_cache(void) |
| 206 | { |
| 207 | isync(); |
| 208 | mtspr(SPRN_IC_CST, IDC_INVALL); |
| 209 | isync(); |
| 210 | } |
| 211 | |
| 212 | #ifdef CONFIG_PPC_KUEP |
| 213 | void __init setup_kuep(bool disabled) |
| 214 | { |
| 215 | if (disabled) |
| 216 | return; |
| 217 | |
| 218 | pr_info("Activating Kernel Userspace Execution Prevention\n"); |
| 219 | |
| 220 | mtspr(SPRN_MI_AP, MI_APG_KUEP); |
| 221 | } |
| 222 | #endif |
| 223 | |
| 224 | #ifdef CONFIG_PPC_KUAP |
| 225 | void __init setup_kuap(bool disabled) |
| 226 | { |
| 227 | pr_info("Activating Kernel Userspace Access Protection\n"); |
| 228 | |
| 229 | if (disabled) |
| 230 | pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n"); |
| 231 | |
| 232 | mtspr(SPRN_MD_AP, MD_APG_KUAP); |
| 233 | } |
| 234 | #endif |