David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * c 2001 PPC 64 Team, IBM Corp |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/smp.h> |
| 7 | #include <linux/export.h> |
| 8 | #include <linux/memblock.h> |
| 9 | #include <linux/sched/task.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 10 | #include <linux/numa.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | |
| 12 | #include <asm/lppaca.h> |
| 13 | #include <asm/paca.h> |
| 14 | #include <asm/sections.h> |
| 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/kexec.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 17 | #include <asm/svm.h> |
| 18 | #include <asm/ultravisor.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | |
| 20 | #include "setup.h" |
| 21 | |
| 22 | #ifndef CONFIG_SMP |
| 23 | #define boot_cpuid 0 |
| 24 | #endif |
| 25 | |
| 26 | static void *__init alloc_paca_data(unsigned long size, unsigned long align, |
| 27 | unsigned long limit, int cpu) |
| 28 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 29 | void *ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | int nid; |
| 31 | |
| 32 | /* |
| 33 | * boot_cpuid paca is allocated very early before cpu_to_node is up. |
| 34 | * Set bottom-up mode, because the boot CPU should be on node-0, |
| 35 | * which will put its paca in the right place. |
| 36 | */ |
| 37 | if (cpu == boot_cpuid) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 38 | nid = NUMA_NO_NODE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | memblock_set_bottom_up(true); |
| 40 | } else { |
| 41 | nid = early_cpu_to_node(cpu); |
| 42 | } |
| 43 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 44 | ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
| 45 | limit, nid); |
| 46 | if (!ptr) |
| 47 | panic("cannot allocate paca data"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | |
| 49 | if (cpu == boot_cpuid) |
| 50 | memblock_set_bottom_up(false); |
| 51 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 52 | return ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | #ifdef CONFIG_PPC_PSERIES |
| 56 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 57 | #define LPPACA_SIZE 0x400 |
| 58 | |
| 59 | static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, |
| 60 | unsigned long limit, int cpu) |
| 61 | { |
| 62 | size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); |
| 63 | static unsigned long shared_lppaca_size; |
| 64 | static void *shared_lppaca; |
| 65 | void *ptr; |
| 66 | |
| 67 | if (!shared_lppaca) { |
| 68 | memblock_set_bottom_up(true); |
| 69 | |
| 70 | shared_lppaca = |
| 71 | memblock_alloc_try_nid(shared_lppaca_total_size, |
| 72 | PAGE_SIZE, MEMBLOCK_LOW_LIMIT, |
| 73 | limit, NUMA_NO_NODE); |
| 74 | if (!shared_lppaca) |
| 75 | panic("cannot allocate shared data"); |
| 76 | |
| 77 | memblock_set_bottom_up(false); |
| 78 | uv_share_page(PHYS_PFN(__pa(shared_lppaca)), |
| 79 | shared_lppaca_total_size >> PAGE_SHIFT); |
| 80 | } |
| 81 | |
| 82 | ptr = shared_lppaca + shared_lppaca_size; |
| 83 | shared_lppaca_size += size; |
| 84 | |
| 85 | /* |
| 86 | * This is very early in boot, so no harm done if the kernel crashes at |
| 87 | * this point. |
| 88 | */ |
| 89 | BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); |
| 90 | |
| 91 | return ptr; |
| 92 | } |
| 93 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | /* |
| 95 | * See asm/lppaca.h for more detail. |
| 96 | * |
| 97 | * lppaca structures must must be 1kB in size, L1 cache line aligned, |
| 98 | * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy |
| 99 | * these requirements. |
| 100 | */ |
| 101 | static inline void init_lppaca(struct lppaca *lppaca) |
| 102 | { |
| 103 | BUILD_BUG_ON(sizeof(struct lppaca) != 640); |
| 104 | |
| 105 | *lppaca = (struct lppaca) { |
| 106 | .desc = cpu_to_be32(0xd397d781), /* "LpPa" */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 107 | .size = cpu_to_be16(LPPACA_SIZE), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | .fpregs_in_use = 1, |
| 109 | .slb_count = cpu_to_be16(64), |
| 110 | .vmxregs_in_use = 0, |
| 111 | .page_ins = 0, }; |
| 112 | }; |
| 113 | |
| 114 | static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) |
| 115 | { |
| 116 | struct lppaca *lp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 118 | BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | |
| 120 | if (early_cpu_has_feature(CPU_FTR_HVMODE)) |
| 121 | return NULL; |
| 122 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 123 | if (is_secure_guest()) |
| 124 | lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu); |
| 125 | else |
| 126 | lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu); |
| 127 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | init_lppaca(lp); |
| 129 | |
| 130 | return lp; |
| 131 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 132 | #endif /* CONFIG_PPC_PSERIES */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | |
| 134 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 135 | |
| 136 | /* |
| 137 | * 3 persistent SLBs are allocated here. The buffer will be zero |
| 138 | * initially, hence will all be invaild until we actually write them. |
| 139 | * |
| 140 | * If you make the number of persistent SLB entries dynamic, please also |
| 141 | * update PR KVM to flush and restore them accordingly. |
| 142 | */ |
| 143 | static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) |
| 144 | { |
| 145 | struct slb_shadow *s; |
| 146 | |
| 147 | if (cpu != boot_cpuid) { |
| 148 | /* |
| 149 | * Boot CPU comes here before early_radix_enabled |
| 150 | * is parsed (e.g., for disable_radix). So allocate |
| 151 | * always and this will be fixed up in free_unused_pacas. |
| 152 | */ |
| 153 | if (early_radix_enabled()) |
| 154 | return NULL; |
| 155 | } |
| 156 | |
| 157 | s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | |
| 159 | s->persistent = cpu_to_be32(SLB_NUM_BOLTED); |
| 160 | s->buffer_length = cpu_to_be32(sizeof(*s)); |
| 161 | |
| 162 | return s; |
| 163 | } |
| 164 | |
| 165 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 166 | |
| 167 | /* The Paca is an array with one entry per processor. Each contains an |
| 168 | * lppaca, which contains the information shared between the |
| 169 | * hypervisor and Linux. |
| 170 | * On systems with hardware multi-threading, there are two threads |
| 171 | * per processor. The Paca array must contain an entry for each thread. |
| 172 | * The VPD Areas will give a max logical processors = 2 * max physical |
| 173 | * processors. The processor VPD array needs one entry per physical |
| 174 | * processor (not thread). |
| 175 | */ |
| 176 | struct paca_struct **paca_ptrs __read_mostly; |
| 177 | EXPORT_SYMBOL(paca_ptrs); |
| 178 | |
| 179 | void __init initialise_paca(struct paca_struct *new_paca, int cpu) |
| 180 | { |
| 181 | #ifdef CONFIG_PPC_PSERIES |
| 182 | new_paca->lppaca_ptr = NULL; |
| 183 | #endif |
| 184 | #ifdef CONFIG_PPC_BOOK3E |
| 185 | new_paca->kernel_pgd = swapper_pg_dir; |
| 186 | #endif |
| 187 | new_paca->lock_token = 0x8000; |
| 188 | new_paca->paca_index = cpu; |
| 189 | new_paca->kernel_toc = kernel_toc_addr(); |
| 190 | new_paca->kernelbase = (unsigned long) _stext; |
| 191 | /* Only set MSR:IR/DR when MMU is initialized */ |
| 192 | new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR); |
| 193 | new_paca->hw_cpu_id = 0xffff; |
| 194 | new_paca->kexec_state = KEXEC_STATE_NONE; |
| 195 | new_paca->__current = &init_task; |
| 196 | new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; |
| 197 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 198 | new_paca->slb_shadow_ptr = NULL; |
| 199 | #endif |
| 200 | |
| 201 | #ifdef CONFIG_PPC_BOOK3E |
| 202 | /* For now -- if we have threads this will be adjusted later */ |
| 203 | new_paca->tcd_ptr = &new_paca->tcd; |
| 204 | #endif |
| 205 | } |
| 206 | |
| 207 | /* Put the paca pointer into r13 and SPRG_PACA */ |
| 208 | void setup_paca(struct paca_struct *new_paca) |
| 209 | { |
| 210 | /* Setup r13 */ |
| 211 | local_paca = new_paca; |
| 212 | |
| 213 | #ifdef CONFIG_PPC_BOOK3E |
| 214 | /* On Book3E, initialize the TLB miss exception frames */ |
| 215 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); |
| 216 | #else |
| 217 | /* In HV mode, we setup both HPACA and PACA to avoid problems |
| 218 | * if we do a GET_PACA() before the feature fixups have been |
| 219 | * applied |
| 220 | */ |
| 221 | if (early_cpu_has_feature(CPU_FTR_HVMODE)) |
| 222 | mtspr(SPRN_SPRG_HPACA, local_paca); |
| 223 | #endif |
| 224 | mtspr(SPRN_SPRG_PACA, local_paca); |
| 225 | |
| 226 | } |
| 227 | |
| 228 | static int __initdata paca_nr_cpu_ids; |
| 229 | static int __initdata paca_ptrs_size; |
| 230 | static int __initdata paca_struct_size; |
| 231 | |
| 232 | void __init allocate_paca_ptrs(void) |
| 233 | { |
| 234 | paca_nr_cpu_ids = nr_cpu_ids; |
| 235 | |
| 236 | paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 237 | paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES); |
| 238 | if (!paca_ptrs) |
| 239 | panic("Failed to allocate %d bytes for paca pointers\n", |
| 240 | paca_ptrs_size); |
| 241 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | memset(paca_ptrs, 0x88, paca_ptrs_size); |
| 243 | } |
| 244 | |
| 245 | void __init allocate_paca(int cpu) |
| 246 | { |
| 247 | u64 limit; |
| 248 | struct paca_struct *paca; |
| 249 | |
| 250 | BUG_ON(cpu >= paca_nr_cpu_ids); |
| 251 | |
| 252 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 253 | /* |
| 254 | * We access pacas in real mode, and cannot take SLB faults |
| 255 | * on them when in virtual mode, so allocate them accordingly. |
| 256 | */ |
| 257 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
| 258 | #else |
| 259 | limit = ppc64_rma_size; |
| 260 | #endif |
| 261 | |
| 262 | paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES, |
| 263 | limit, cpu); |
| 264 | paca_ptrs[cpu] = paca; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 265 | |
| 266 | initialise_paca(paca, cpu); |
| 267 | #ifdef CONFIG_PPC_PSERIES |
| 268 | paca->lppaca_ptr = new_lppaca(cpu, limit); |
| 269 | #endif |
| 270 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 271 | paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); |
| 272 | #endif |
| 273 | paca_struct_size += sizeof(struct paca_struct); |
| 274 | } |
| 275 | |
| 276 | void __init free_unused_pacas(void) |
| 277 | { |
| 278 | int new_ptrs_size; |
| 279 | |
| 280 | new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; |
| 281 | if (new_ptrs_size < paca_ptrs_size) |
| 282 | memblock_free(__pa(paca_ptrs) + new_ptrs_size, |
| 283 | paca_ptrs_size - new_ptrs_size); |
| 284 | |
| 285 | paca_nr_cpu_ids = nr_cpu_ids; |
| 286 | paca_ptrs_size = new_ptrs_size; |
| 287 | |
| 288 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 289 | if (early_radix_enabled()) { |
| 290 | /* Ugly fixup, see new_slb_shadow() */ |
| 291 | memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr), |
| 292 | sizeof(struct slb_shadow)); |
| 293 | paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL; |
| 294 | } |
| 295 | #endif |
| 296 | |
| 297 | printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n", |
| 298 | paca_ptrs_size + paca_struct_size, nr_cpu_ids); |
| 299 | } |
| 300 | |
| 301 | void copy_mm_to_paca(struct mm_struct *mm) |
| 302 | { |
| 303 | #ifdef CONFIG_PPC_BOOK3S |
| 304 | mm_context_t *context = &mm->context; |
| 305 | |
| 306 | get_paca()->mm_ctx_id = context->id; |
| 307 | #ifdef CONFIG_PPC_MM_SLICES |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 308 | VM_BUG_ON(!mm_ctx_slb_addr_limit(context)); |
| 309 | get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context); |
| 310 | memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context), |
| 311 | LOW_SLICE_ARRAY_SZ); |
| 312 | memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context), |
| 313 | TASK_SLICE_ARRAY_SZ(context)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | #else /* CONFIG_PPC_MM_SLICES */ |
| 315 | get_paca()->mm_ctx_user_psize = context->user_psize; |
| 316 | get_paca()->mm_ctx_sllp = context->sllp; |
| 317 | #endif |
| 318 | #else /* !CONFIG_PPC_BOOK3S */ |
| 319 | return; |
| 320 | #endif |
| 321 | } |