Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SLB routines |
| 3 | * |
| 4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM |
| 5 | * |
| 6 | * Based on earlier C version: |
| 7 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
| 8 | * Copyright (c) 2001 Dave Engebretsen |
| 9 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License |
| 13 | * as published by the Free Software Foundation; either version |
| 14 | * 2 of the License, or (at your option) any later version. |
| 15 | */ |
| 16 | |
| 17 | #include <asm/processor.h> |
| 18 | #include <asm/ppc_asm.h> |
| 19 | #include <asm/asm-offsets.h> |
| 20 | #include <asm/cputable.h> |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/mmu.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/firmware.h> |
| 25 | #include <asm/feature-fixups.h> |
| 26 | |
| 27 | /* |
| 28 | * This macro generates asm code to compute the VSID scramble |
| 29 | * function. Used in slb_allocate() and do_stab_bolted. The function |
| 30 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS |
| 31 | * |
| 32 | * rt = register containing the proto-VSID and into which the |
| 33 | * VSID will be stored |
| 34 | * rx = scratch register (clobbered) |
| 35 | * rf = flags |
| 36 | * |
| 37 | * - rt and rx must be different registers |
| 38 | * - The answer will end up in the low VSID_BITS bits of rt. The higher |
| 39 | * bits may contain other garbage, so you may need to mask the |
| 40 | * result. |
| 41 | */ |
| 42 | #define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \ |
| 43 | lis rx,VSID_MULTIPLIER_##size@h; \ |
| 44 | ori rx,rx,VSID_MULTIPLIER_##size@l; \ |
| 45 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ |
| 46 | /* \ |
| 47 | * powermac get slb fault before feature fixup, so make 65 bit part \ |
| 48 | * the default part of feature fixup \ |
| 49 | */ \ |
| 50 | BEGIN_MMU_FTR_SECTION \ |
| 51 | srdi rx,rt,VSID_BITS_65_##size; \ |
| 52 | clrldi rt,rt,(64-VSID_BITS_65_##size); \ |
| 53 | add rt,rt,rx; \ |
| 54 | addi rx,rt,1; \ |
| 55 | srdi rx,rx,VSID_BITS_65_##size; \ |
| 56 | add rt,rt,rx; \ |
| 57 | rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \ |
| 58 | MMU_FTR_SECTION_ELSE \ |
| 59 | srdi rx,rt,VSID_BITS_##size; \ |
| 60 | clrldi rt,rt,(64-VSID_BITS_##size); \ |
| 61 | add rt,rt,rx; /* add high and low bits */ \ |
| 62 | addi rx,rt,1; \ |
| 63 | srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ |
| 64 | add rt,rt,rx; \ |
| 65 | rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \ |
| 66 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA) |
| 67 | |
| 68 | |
| 69 | /* void slb_allocate(unsigned long ea); |
| 70 | * |
| 71 | * Create an SLB entry for the given EA (user or kernel). |
| 72 | * r3 = faulting address, r13 = PACA |
| 73 | * r9, r10, r11 are clobbered by this function |
| 74 | * r3 is preserved. |
| 75 | * No other registers are examined or changed. |
| 76 | */ |
| 77 | _GLOBAL(slb_allocate) |
| 78 | /* |
| 79 | * Check if the address falls within the range of the first context, or |
| 80 | * if we may need to handle multi context. For the first context we |
| 81 | * allocate the slb entry via the fast path below. For large address we |
| 82 | * branch out to C-code and see if additional contexts have been |
| 83 | * allocated. |
| 84 | * The test here is: |
| 85 | * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT) |
| 86 | */ |
| 87 | rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4) |
| 88 | bne- 8f |
| 89 | |
| 90 | srdi r9,r3,60 /* get region */ |
| 91 | srdi r10,r3,SID_SHIFT /* get esid */ |
| 92 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
| 93 | |
| 94 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
| 95 | blt cr7,0f /* user or kernel? */ |
| 96 | |
| 97 | /* Check if hitting the linear mapping or some other kernel space |
| 98 | */ |
| 99 | bne cr7,1f |
| 100 | |
| 101 | /* Linear mapping encoding bits, the "li" instruction below will |
| 102 | * be patched by the kernel at boot |
| 103 | */ |
| 104 | .globl slb_miss_kernel_load_linear |
| 105 | slb_miss_kernel_load_linear: |
| 106 | li r11,0 |
| 107 | /* |
| 108 | * context = (ea >> 60) - (0xc - 1) |
| 109 | * r9 = region id. |
| 110 | */ |
| 111 | subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET |
| 112 | |
| 113 | BEGIN_FTR_SECTION |
| 114 | b .Lslb_finish_load |
| 115 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
| 116 | b .Lslb_finish_load_1T |
| 117 | |
| 118 | 1: |
| 119 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 120 | cmpldi cr0,r9,0xf |
| 121 | bne 1f |
| 122 | /* Check virtual memmap region. To be patched at kernel boot */ |
| 123 | .globl slb_miss_kernel_load_vmemmap |
| 124 | slb_miss_kernel_load_vmemmap: |
| 125 | li r11,0 |
| 126 | b 6f |
| 127 | 1: |
| 128 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 129 | |
| 130 | /* |
| 131 | * r10 contains the ESID, which is the original faulting EA shifted |
| 132 | * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28) |
| 133 | * which is 0xd00038000. That can't be used as an immediate, even if we |
| 134 | * ignored the 0xd, so we have to load it into a register, and we only |
| 135 | * have one register free. So we must load all of (H_VMALLOC_END >> 28) |
| 136 | * into a register and compare ESID against that. |
| 137 | */ |
| 138 | lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000 |
| 139 | ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800 |
| 140 | // Rotate left 4, then mask with 0xffffffff0 |
| 141 | rldic r11,r11,4,28 // r11 = 0xd00038000 |
| 142 | cmpld r10,r11 // if r10 >= r11 |
| 143 | bge 5f // goto io_mapping |
| 144 | |
| 145 | /* |
| 146 | * vmalloc mapping gets the encoding from the PACA as the mapping |
| 147 | * can be demoted from 64K -> 4K dynamically on some machines. |
| 148 | */ |
| 149 | lhz r11,PACAVMALLOCSLLP(r13) |
| 150 | b 6f |
| 151 | 5: |
| 152 | /* IO mapping */ |
| 153 | .globl slb_miss_kernel_load_io |
| 154 | slb_miss_kernel_load_io: |
| 155 | li r11,0 |
| 156 | 6: |
| 157 | /* |
| 158 | * context = (ea >> 60) - (0xc - 1) |
| 159 | * r9 = region id. |
| 160 | */ |
| 161 | subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET |
| 162 | |
| 163 | BEGIN_FTR_SECTION |
| 164 | b .Lslb_finish_load |
| 165 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
| 166 | b .Lslb_finish_load_1T |
| 167 | |
| 168 | 0: /* |
| 169 | * For userspace addresses, make sure this is region 0. |
| 170 | */ |
| 171 | cmpdi r9, 0 |
| 172 | bne- 8f |
| 173 | /* |
| 174 | * user space make sure we are within the allowed limit |
| 175 | */ |
| 176 | ld r11,PACA_SLB_ADDR_LIMIT(r13) |
| 177 | cmpld r3,r11 |
| 178 | bge- 8f |
| 179 | |
| 180 | /* when using slices, we extract the psize off the slice bitmaps |
| 181 | * and then we need to get the sllp encoding off the mmu_psize_defs |
| 182 | * array. |
| 183 | * |
| 184 | * XXX This is a bit inefficient especially for the normal case, |
| 185 | * so we should try to implement a fast path for the standard page |
| 186 | * size using the old sllp value so we avoid the array. We cannot |
| 187 | * really do dynamic patching unfortunately as processes might flip |
| 188 | * between 4k and 64k standard page size |
| 189 | */ |
| 190 | #ifdef CONFIG_PPC_MM_SLICES |
| 191 | /* r10 have esid */ |
| 192 | cmpldi r10,16 |
| 193 | /* below SLICE_LOW_TOP */ |
| 194 | blt 5f |
| 195 | /* |
| 196 | * Handle hpsizes, |
| 197 | * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index |
| 198 | */ |
| 199 | srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */ |
| 200 | addi r9,r11,PACAHIGHSLICEPSIZE |
| 201 | lbzx r9,r13,r9 /* r9 is hpsizes[r11] */ |
| 202 | /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */ |
| 203 | rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63 |
| 204 | b 6f |
| 205 | |
| 206 | 5: |
| 207 | /* |
| 208 | * Handle lpsizes |
| 209 | * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index |
| 210 | */ |
| 211 | srdi r11,r10,1 /* index */ |
| 212 | addi r9,r11,PACALOWSLICESPSIZE |
| 213 | lbzx r9,r13,r9 /* r9 is lpsizes[r11] */ |
| 214 | rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */ |
| 215 | 6: |
| 216 | sldi r11,r11,2 /* index * 4 */ |
| 217 | /* Extract the psize and multiply to get an array offset */ |
| 218 | srd r9,r9,r11 |
| 219 | andi. r9,r9,0xf |
| 220 | mulli r9,r9,MMUPSIZEDEFSIZE |
| 221 | |
| 222 | /* Now get to the array and obtain the sllp |
| 223 | */ |
| 224 | ld r11,PACATOC(r13) |
| 225 | ld r11,mmu_psize_defs@got(r11) |
| 226 | add r11,r11,r9 |
| 227 | ld r11,MMUPSIZESLLP(r11) |
| 228 | ori r11,r11,SLB_VSID_USER |
| 229 | #else |
| 230 | /* paca context sllp already contains the SLB_VSID_USER bits */ |
| 231 | lhz r11,PACACONTEXTSLLP(r13) |
| 232 | #endif /* CONFIG_PPC_MM_SLICES */ |
| 233 | |
| 234 | ld r9,PACACONTEXTID(r13) |
| 235 | BEGIN_FTR_SECTION |
| 236 | cmpldi r10,0x1000 |
| 237 | bge .Lslb_finish_load_1T |
| 238 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
| 239 | b .Lslb_finish_load |
| 240 | |
| 241 | 8: /* invalid EA - return an error indication */ |
| 242 | crset 4*cr0+eq /* indicate failure */ |
| 243 | blr |
| 244 | |
| 245 | /* |
| 246 | * Finish loading of an SLB entry and return |
| 247 | * |
| 248 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
| 249 | */ |
| 250 | .Lslb_finish_load: |
| 251 | rldimi r10,r9,ESID_BITS,0 |
| 252 | ASM_VSID_SCRAMBLE(r10,r9,r11,256M) |
| 253 | /* r3 = EA, r11 = VSID data */ |
| 254 | /* |
| 255 | * Find a slot, round robin. Previously we tried to find a |
| 256 | * free slot first but that took too long. Unfortunately we |
| 257 | * dont have any LRU information to help us choose a slot. |
| 258 | */ |
| 259 | |
| 260 | mr r9,r3 |
| 261 | |
| 262 | /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */ |
| 263 | 7: ld r10,PACASTABRR(r13) |
| 264 | addi r10,r10,1 |
| 265 | /* This gets soft patched on boot. */ |
| 266 | .globl slb_compare_rr_to_size |
| 267 | slb_compare_rr_to_size: |
| 268 | cmpldi r10,0 |
| 269 | |
| 270 | blt+ 4f |
| 271 | li r10,SLB_NUM_BOLTED |
| 272 | |
| 273 | 4: |
| 274 | std r10,PACASTABRR(r13) |
| 275 | |
| 276 | 3: |
| 277 | rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */ |
| 278 | oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */ |
| 279 | |
| 280 | /* r9 = ESID data, r11 = VSID data */ |
| 281 | |
| 282 | /* |
| 283 | * No need for an isync before or after this slbmte. The exception |
| 284 | * we enter with and the rfid we exit with are context synchronizing. |
| 285 | */ |
| 286 | slbmte r11,r10 |
| 287 | |
| 288 | /* we're done for kernel addresses */ |
| 289 | crclr 4*cr0+eq /* set result to "success" */ |
| 290 | bgelr cr7 |
| 291 | |
| 292 | /* Update the slb cache */ |
| 293 | lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ |
| 294 | cmpldi r9,SLB_CACHE_ENTRIES |
| 295 | bge 1f |
| 296 | |
| 297 | /* still room in the slb cache */ |
| 298 | sldi r11,r9,2 /* r11 = offset * sizeof(u32) */ |
| 299 | srdi r10,r10,28 /* get the 36 bits of the ESID */ |
| 300 | add r11,r11,r13 /* r11 = (u32 *)paca + offset */ |
| 301 | stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ |
| 302 | addi r9,r9,1 /* offset++ */ |
| 303 | b 2f |
| 304 | 1: /* offset >= SLB_CACHE_ENTRIES */ |
| 305 | li r9,SLB_CACHE_ENTRIES+1 |
| 306 | 2: |
| 307 | sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ |
| 308 | crclr 4*cr0+eq /* set result to "success" */ |
| 309 | blr |
| 310 | |
| 311 | /* |
| 312 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
| 313 | * |
| 314 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
| 315 | */ |
| 316 | .Lslb_finish_load_1T: |
| 317 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
| 318 | rldimi r10,r9,ESID_BITS_1T,0 |
| 319 | ASM_VSID_SCRAMBLE(r10,r9,r11,1T) |
| 320 | |
| 321 | li r10,MMU_SEGSIZE_1T |
| 322 | rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ |
| 323 | |
| 324 | /* r3 = EA, r11 = VSID data */ |
| 325 | clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */ |
| 326 | b 7b |
| 327 | |
| 328 | |
| 329 | _ASM_NOKPROBE_SYMBOL(slb_allocate) |
| 330 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear) |
| 331 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io) |
| 332 | _ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size) |
| 333 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 334 | _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap) |
| 335 | #endif |