David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * OpenRISC idle.c |
| 4 | * |
| 5 | * Linux architectural port borrowing liberally from similar works of |
| 6 | * others. All original copyrights apply as per the original source |
| 7 | * declaration. |
| 8 | * |
| 9 | * Modifications for the OpenRISC architecture: |
| 10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> |
| 11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <linux/signal.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/ptrace.h> |
| 21 | #include <linux/mman.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/swap.h> |
| 24 | #include <linux/smp.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 25 | #include <linux/memblock.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 26 | #include <linux/init.h> |
| 27 | #include <linux/delay.h> |
| 28 | #include <linux/blkdev.h> /* for initrd_* */ |
| 29 | #include <linux/pagemap.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | #include <asm/pgalloc.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/dma.h> |
| 34 | #include <asm/io.h> |
| 35 | #include <asm/tlb.h> |
| 36 | #include <asm/mmu_context.h> |
| 37 | #include <asm/kmap_types.h> |
| 38 | #include <asm/fixmap.h> |
| 39 | #include <asm/tlbflush.h> |
| 40 | #include <asm/sections.h> |
| 41 | |
| 42 | int mem_init_done; |
| 43 | |
| 44 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 45 | |
| 46 | static void __init zone_sizes_init(void) |
| 47 | { |
| 48 | unsigned long zones_size[MAX_NR_ZONES]; |
| 49 | |
| 50 | /* Clear the zone sizes */ |
| 51 | memset(zones_size, 0, sizeof(zones_size)); |
| 52 | |
| 53 | /* |
| 54 | * We use only ZONE_NORMAL |
| 55 | */ |
| 56 | zones_size[ZONE_NORMAL] = max_low_pfn; |
| 57 | |
| 58 | free_area_init(zones_size); |
| 59 | } |
| 60 | |
| 61 | extern const char _s_kernel_ro[], _e_kernel_ro[]; |
| 62 | |
| 63 | /* |
| 64 | * Map all physical memory into kernel's address space. |
| 65 | * |
| 66 | * This is explicitly coded for two-level page tables, so if you need |
| 67 | * something else then this needs to change. |
| 68 | */ |
| 69 | static void __init map_ram(void) |
| 70 | { |
| 71 | unsigned long v, p, e; |
| 72 | pgprot_t prot; |
| 73 | pgd_t *pge; |
| 74 | pud_t *pue; |
| 75 | pmd_t *pme; |
| 76 | pte_t *pte; |
| 77 | /* These mark extents of read-only kernel pages... |
| 78 | * ...from vmlinux.lds.S |
| 79 | */ |
| 80 | struct memblock_region *region; |
| 81 | |
| 82 | v = PAGE_OFFSET; |
| 83 | |
| 84 | for_each_memblock(memory, region) { |
| 85 | p = (u32) region->base & PAGE_MASK; |
| 86 | e = p + (u32) region->size; |
| 87 | |
| 88 | v = (u32) __va(p); |
| 89 | pge = pgd_offset_k(v); |
| 90 | |
| 91 | while (p < e) { |
| 92 | int j; |
| 93 | pue = pud_offset(pge, v); |
| 94 | pme = pmd_offset(pue, v); |
| 95 | |
| 96 | if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { |
| 97 | panic("%s: OR1K kernel hardcoded for " |
| 98 | "two-level page tables", |
| 99 | __func__); |
| 100 | } |
| 101 | |
| 102 | /* Alloc one page for holding PTE's... */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 103 | pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE); |
| 104 | if (!pte) |
| 105 | panic("%s: Failed to allocate page for PTEs\n", |
| 106 | __func__); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); |
| 108 | |
| 109 | /* Fill the newly allocated page with PTE'S */ |
| 110 | for (j = 0; p < e && j < PTRS_PER_PTE; |
| 111 | v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { |
| 112 | if (v >= (u32) _e_kernel_ro || |
| 113 | v < (u32) _s_kernel_ro) |
| 114 | prot = PAGE_KERNEL; |
| 115 | else |
| 116 | prot = PAGE_KERNEL_RO; |
| 117 | |
| 118 | set_pte(pte, mk_pte_phys(p, prot)); |
| 119 | } |
| 120 | |
| 121 | pge++; |
| 122 | } |
| 123 | |
| 124 | printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, |
| 125 | region->base, region->base + region->size); |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | void __init paging_init(void) |
| 130 | { |
| 131 | extern void tlb_init(void); |
| 132 | |
| 133 | unsigned long end; |
| 134 | int i; |
| 135 | |
| 136 | printk(KERN_INFO "Setting up paging and PTEs.\n"); |
| 137 | |
| 138 | /* clear out the init_mm.pgd that will contain the kernel's mappings */ |
| 139 | |
| 140 | for (i = 0; i < PTRS_PER_PGD; i++) |
| 141 | swapper_pg_dir[i] = __pgd(0); |
| 142 | |
| 143 | /* make sure the current pgd table points to something sane |
| 144 | * (even if it is most probably not used until the next |
| 145 | * switch_mm) |
| 146 | */ |
| 147 | current_pgd[smp_processor_id()] = init_mm.pgd; |
| 148 | |
| 149 | end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); |
| 150 | |
| 151 | map_ram(); |
| 152 | |
| 153 | zone_sizes_init(); |
| 154 | |
| 155 | /* self modifying code ;) */ |
| 156 | /* Since the old TLB miss handler has been running up until now, |
| 157 | * the kernel pages are still all RW, so we can still modify the |
| 158 | * text directly... after this change and a TLB flush, the kernel |
| 159 | * pages will become RO. |
| 160 | */ |
| 161 | { |
| 162 | extern unsigned long dtlb_miss_handler; |
| 163 | extern unsigned long itlb_miss_handler; |
| 164 | |
| 165 | unsigned long *dtlb_vector = __va(0x900); |
| 166 | unsigned long *itlb_vector = __va(0xa00); |
| 167 | |
| 168 | printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler); |
| 169 | *itlb_vector = ((unsigned long)&itlb_miss_handler - |
| 170 | (unsigned long)itlb_vector) >> 2; |
| 171 | |
| 172 | /* Soft ordering constraint to ensure that dtlb_vector is |
| 173 | * the last thing updated |
| 174 | */ |
| 175 | barrier(); |
| 176 | |
| 177 | printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler); |
| 178 | *dtlb_vector = ((unsigned long)&dtlb_miss_handler - |
| 179 | (unsigned long)dtlb_vector) >> 2; |
| 180 | |
| 181 | } |
| 182 | |
| 183 | /* Soft ordering constraint to ensure that cache invalidation and |
| 184 | * TLB flush really happen _after_ code has been modified. |
| 185 | */ |
| 186 | barrier(); |
| 187 | |
| 188 | /* Invalidate instruction caches after code modification */ |
| 189 | mtspr(SPR_ICBIR, 0x900); |
| 190 | mtspr(SPR_ICBIR, 0xa00); |
| 191 | |
| 192 | /* New TLB miss handlers and kernel page tables are in now place. |
| 193 | * Make sure that page flags get updated for all pages in TLB by |
| 194 | * flushing the TLB and forcing all TLB entries to be recreated |
| 195 | * from their page table flags. |
| 196 | */ |
| 197 | flush_tlb_all(); |
| 198 | } |
| 199 | |
| 200 | /* References to section boundaries */ |
| 201 | |
| 202 | void __init mem_init(void) |
| 203 | { |
| 204 | BUG_ON(!mem_map); |
| 205 | |
| 206 | max_mapnr = max_low_pfn; |
| 207 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); |
| 208 | |
| 209 | /* clear the zero-page */ |
| 210 | memset((void *)empty_zero_page, 0, PAGE_SIZE); |
| 211 | |
| 212 | /* this will put all low memory onto the freelists */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 213 | memblock_free_all(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | |
| 215 | mem_init_print_info(NULL); |
| 216 | |
| 217 | printk("mem_init_done ...........................................\n"); |
| 218 | mem_init_done = 1; |
| 219 | return; |
| 220 | } |