Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* arch/sparc64/kernel/process.c |
| 3 | * |
| 4 | * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) |
| 5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
| 6 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * This file handles the architecture-dependent parts of process handling.. |
| 11 | */ |
| 12 | |
| 13 | #include <stdarg.h> |
| 14 | |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/export.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/sched/debug.h> |
| 19 | #include <linux/sched/task.h> |
| 20 | #include <linux/sched/task_stack.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/fs.h> |
| 24 | #include <linux/smp.h> |
| 25 | #include <linux/stddef.h> |
| 26 | #include <linux/ptrace.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/user.h> |
| 29 | #include <linux/delay.h> |
| 30 | #include <linux/compat.h> |
| 31 | #include <linux/tick.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/cpu.h> |
| 34 | #include <linux/perf_event.h> |
| 35 | #include <linux/elfcore.h> |
| 36 | #include <linux/sysrq.h> |
| 37 | #include <linux/nmi.h> |
| 38 | #include <linux/context_tracking.h> |
| 39 | #include <linux/signal.h> |
| 40 | |
| 41 | #include <linux/uaccess.h> |
| 42 | #include <asm/page.h> |
| 43 | #include <asm/pgalloc.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | #include <asm/processor.h> |
| 45 | #include <asm/pstate.h> |
| 46 | #include <asm/elf.h> |
| 47 | #include <asm/fpumacro.h> |
| 48 | #include <asm/head.h> |
| 49 | #include <asm/cpudata.h> |
| 50 | #include <asm/mmu_context.h> |
| 51 | #include <asm/unistd.h> |
| 52 | #include <asm/hypervisor.h> |
| 53 | #include <asm/syscalls.h> |
| 54 | #include <asm/irq_regs.h> |
| 55 | #include <asm/smp.h> |
| 56 | #include <asm/pcr.h> |
| 57 | |
| 58 | #include "kstack.h" |
| 59 | |
| 60 | /* Idle loop support on sparc64. */ |
| 61 | void arch_cpu_idle(void) |
| 62 | { |
| 63 | if (tlb_type != hypervisor) { |
| 64 | touch_nmi_watchdog(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 65 | raw_local_irq_enable(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | } else { |
| 67 | unsigned long pstate; |
| 68 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 69 | raw_local_irq_enable(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | |
| 71 | /* The sun4v sleeping code requires that we have PSTATE.IE cleared over |
| 72 | * the cpu sleep hypervisor call. |
| 73 | */ |
| 74 | __asm__ __volatile__( |
| 75 | "rdpr %%pstate, %0\n\t" |
| 76 | "andn %0, %1, %0\n\t" |
| 77 | "wrpr %0, %%g0, %%pstate" |
| 78 | : "=&r" (pstate) |
| 79 | : "i" (PSTATE_IE)); |
| 80 | |
| 81 | if (!need_resched() && !cpu_is_offline(smp_processor_id())) { |
| 82 | sun4v_cpu_yield(); |
| 83 | /* If resumed by cpu_poke then we need to explicitly |
| 84 | * call scheduler_ipi(). |
| 85 | */ |
| 86 | scheduler_poke(); |
| 87 | } |
| 88 | |
| 89 | /* Re-enable interrupts. */ |
| 90 | __asm__ __volatile__( |
| 91 | "rdpr %%pstate, %0\n\t" |
| 92 | "or %0, %1, %0\n\t" |
| 93 | "wrpr %0, %%g0, %%pstate" |
| 94 | : "=&r" (pstate) |
| 95 | : "i" (PSTATE_IE)); |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | #ifdef CONFIG_HOTPLUG_CPU |
| 100 | void arch_cpu_idle_dead(void) |
| 101 | { |
| 102 | sched_preempt_enable_no_resched(); |
| 103 | cpu_play_dead(); |
| 104 | } |
| 105 | #endif |
| 106 | |
| 107 | #ifdef CONFIG_COMPAT |
| 108 | static void show_regwindow32(struct pt_regs *regs) |
| 109 | { |
| 110 | struct reg_window32 __user *rw; |
| 111 | struct reg_window32 r_w; |
| 112 | mm_segment_t old_fs; |
| 113 | |
| 114 | __asm__ __volatile__ ("flushw"); |
| 115 | rw = compat_ptr((unsigned int)regs->u_regs[14]); |
| 116 | old_fs = get_fs(); |
| 117 | set_fs (USER_DS); |
| 118 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { |
| 119 | set_fs (old_fs); |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | set_fs (old_fs); |
| 124 | printk("l0: %08x l1: %08x l2: %08x l3: %08x " |
| 125 | "l4: %08x l5: %08x l6: %08x l7: %08x\n", |
| 126 | r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], |
| 127 | r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); |
| 128 | printk("i0: %08x i1: %08x i2: %08x i3: %08x " |
| 129 | "i4: %08x i5: %08x i6: %08x i7: %08x\n", |
| 130 | r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], |
| 131 | r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); |
| 132 | } |
| 133 | #else |
| 134 | #define show_regwindow32(regs) do { } while (0) |
| 135 | #endif |
| 136 | |
| 137 | static void show_regwindow(struct pt_regs *regs) |
| 138 | { |
| 139 | struct reg_window __user *rw; |
| 140 | struct reg_window *rwk; |
| 141 | struct reg_window r_w; |
| 142 | mm_segment_t old_fs; |
| 143 | |
| 144 | if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { |
| 145 | __asm__ __volatile__ ("flushw"); |
| 146 | rw = (struct reg_window __user *) |
| 147 | (regs->u_regs[14] + STACK_BIAS); |
| 148 | rwk = (struct reg_window *) |
| 149 | (regs->u_regs[14] + STACK_BIAS); |
| 150 | if (!(regs->tstate & TSTATE_PRIV)) { |
| 151 | old_fs = get_fs(); |
| 152 | set_fs (USER_DS); |
| 153 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { |
| 154 | set_fs (old_fs); |
| 155 | return; |
| 156 | } |
| 157 | rwk = &r_w; |
| 158 | set_fs (old_fs); |
| 159 | } |
| 160 | } else { |
| 161 | show_regwindow32(regs); |
| 162 | return; |
| 163 | } |
| 164 | printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", |
| 165 | rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); |
| 166 | printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", |
| 167 | rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); |
| 168 | printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", |
| 169 | rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); |
| 170 | printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", |
| 171 | rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); |
| 172 | if (regs->tstate & TSTATE_PRIV) |
| 173 | printk("I7: <%pS>\n", (void *) rwk->ins[7]); |
| 174 | } |
| 175 | |
| 176 | void show_regs(struct pt_regs *regs) |
| 177 | { |
| 178 | show_regs_print_info(KERN_DEFAULT); |
| 179 | |
| 180 | printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, |
| 181 | regs->tpc, regs->tnpc, regs->y, print_tainted()); |
| 182 | printk("TPC: <%pS>\n", (void *) regs->tpc); |
| 183 | printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", |
| 184 | regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], |
| 185 | regs->u_regs[3]); |
| 186 | printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", |
| 187 | regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], |
| 188 | regs->u_regs[7]); |
| 189 | printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", |
| 190 | regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], |
| 191 | regs->u_regs[11]); |
| 192 | printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", |
| 193 | regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], |
| 194 | regs->u_regs[15]); |
| 195 | printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); |
| 196 | show_regwindow(regs); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 197 | show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; |
| 201 | static DEFINE_SPINLOCK(global_cpu_snapshot_lock); |
| 202 | |
| 203 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, |
| 204 | int this_cpu) |
| 205 | { |
| 206 | struct global_reg_snapshot *rp; |
| 207 | |
| 208 | flushw_all(); |
| 209 | |
| 210 | rp = &global_cpu_snapshot[this_cpu].reg; |
| 211 | |
| 212 | rp->tstate = regs->tstate; |
| 213 | rp->tpc = regs->tpc; |
| 214 | rp->tnpc = regs->tnpc; |
| 215 | rp->o7 = regs->u_regs[UREG_I7]; |
| 216 | |
| 217 | if (regs->tstate & TSTATE_PRIV) { |
| 218 | struct reg_window *rw; |
| 219 | |
| 220 | rw = (struct reg_window *) |
| 221 | (regs->u_regs[UREG_FP] + STACK_BIAS); |
| 222 | if (kstack_valid(tp, (unsigned long) rw)) { |
| 223 | rp->i7 = rw->ins[7]; |
| 224 | rw = (struct reg_window *) |
| 225 | (rw->ins[6] + STACK_BIAS); |
| 226 | if (kstack_valid(tp, (unsigned long) rw)) |
| 227 | rp->rpc = rw->ins[7]; |
| 228 | } |
| 229 | } else { |
| 230 | rp->i7 = 0; |
| 231 | rp->rpc = 0; |
| 232 | } |
| 233 | rp->thread = tp; |
| 234 | } |
| 235 | |
| 236 | /* In order to avoid hangs we do not try to synchronize with the |
| 237 | * global register dump client cpus. The last store they make is to |
| 238 | * the thread pointer, so do a short poll waiting for that to become |
| 239 | * non-NULL. |
| 240 | */ |
| 241 | static void __global_reg_poll(struct global_reg_snapshot *gp) |
| 242 | { |
| 243 | int limit = 0; |
| 244 | |
| 245 | while (!gp->thread && ++limit < 100) { |
| 246 | barrier(); |
| 247 | udelay(1); |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
| 252 | { |
| 253 | struct thread_info *tp = current_thread_info(); |
| 254 | struct pt_regs *regs = get_irq_regs(); |
| 255 | unsigned long flags; |
| 256 | int this_cpu, cpu; |
| 257 | |
| 258 | if (!regs) |
| 259 | regs = tp->kregs; |
| 260 | |
| 261 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); |
| 262 | |
| 263 | this_cpu = raw_smp_processor_id(); |
| 264 | |
| 265 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
| 266 | |
| 267 | if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) |
| 268 | __global_reg_self(tp, regs, this_cpu); |
| 269 | |
| 270 | smp_fetch_global_regs(); |
| 271 | |
| 272 | for_each_cpu(cpu, mask) { |
| 273 | struct global_reg_snapshot *gp; |
| 274 | |
| 275 | if (exclude_self && cpu == this_cpu) |
| 276 | continue; |
| 277 | |
| 278 | gp = &global_cpu_snapshot[cpu].reg; |
| 279 | |
| 280 | __global_reg_poll(gp); |
| 281 | |
| 282 | tp = gp->thread; |
| 283 | printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", |
| 284 | (cpu == this_cpu ? '*' : ' '), cpu, |
| 285 | gp->tstate, gp->tpc, gp->tnpc, |
| 286 | ((tp && tp->task) ? tp->task->comm : "NULL"), |
| 287 | ((tp && tp->task) ? tp->task->pid : -1)); |
| 288 | |
| 289 | if (gp->tstate & TSTATE_PRIV) { |
| 290 | printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", |
| 291 | (void *) gp->tpc, |
| 292 | (void *) gp->o7, |
| 293 | (void *) gp->i7, |
| 294 | (void *) gp->rpc); |
| 295 | } else { |
| 296 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", |
| 297 | gp->tpc, gp->o7, gp->i7, gp->rpc); |
| 298 | } |
| 299 | |
| 300 | touch_nmi_watchdog(); |
| 301 | } |
| 302 | |
| 303 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
| 304 | |
| 305 | spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); |
| 306 | } |
| 307 | |
| 308 | #ifdef CONFIG_MAGIC_SYSRQ |
| 309 | |
| 310 | static void sysrq_handle_globreg(int key) |
| 311 | { |
| 312 | trigger_all_cpu_backtrace(); |
| 313 | } |
| 314 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 315 | static const struct sysrq_key_op sparc_globalreg_op = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | .handler = sysrq_handle_globreg, |
| 317 | .help_msg = "global-regs(y)", |
| 318 | .action_msg = "Show Global CPU Regs", |
| 319 | }; |
| 320 | |
| 321 | static void __global_pmu_self(int this_cpu) |
| 322 | { |
| 323 | struct global_pmu_snapshot *pp; |
| 324 | int i, num; |
| 325 | |
| 326 | if (!pcr_ops) |
| 327 | return; |
| 328 | |
| 329 | pp = &global_cpu_snapshot[this_cpu].pmu; |
| 330 | |
| 331 | num = 1; |
| 332 | if (tlb_type == hypervisor && |
| 333 | sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) |
| 334 | num = 4; |
| 335 | |
| 336 | for (i = 0; i < num; i++) { |
| 337 | pp->pcr[i] = pcr_ops->read_pcr(i); |
| 338 | pp->pic[i] = pcr_ops->read_pic(i); |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | static void __global_pmu_poll(struct global_pmu_snapshot *pp) |
| 343 | { |
| 344 | int limit = 0; |
| 345 | |
| 346 | while (!pp->pcr[0] && ++limit < 100) { |
| 347 | barrier(); |
| 348 | udelay(1); |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | static void pmu_snapshot_all_cpus(void) |
| 353 | { |
| 354 | unsigned long flags; |
| 355 | int this_cpu, cpu; |
| 356 | |
| 357 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); |
| 358 | |
| 359 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
| 360 | |
| 361 | this_cpu = raw_smp_processor_id(); |
| 362 | |
| 363 | __global_pmu_self(this_cpu); |
| 364 | |
| 365 | smp_fetch_global_pmu(); |
| 366 | |
| 367 | for_each_online_cpu(cpu) { |
| 368 | struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; |
| 369 | |
| 370 | __global_pmu_poll(pp); |
| 371 | |
| 372 | printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", |
| 373 | (cpu == this_cpu ? '*' : ' '), cpu, |
| 374 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], |
| 375 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); |
| 376 | |
| 377 | touch_nmi_watchdog(); |
| 378 | } |
| 379 | |
| 380 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
| 381 | |
| 382 | spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); |
| 383 | } |
| 384 | |
| 385 | static void sysrq_handle_globpmu(int key) |
| 386 | { |
| 387 | pmu_snapshot_all_cpus(); |
| 388 | } |
| 389 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | static const struct sysrq_key_op sparc_globalpmu_op = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | .handler = sysrq_handle_globpmu, |
| 392 | .help_msg = "global-pmu(x)", |
| 393 | .action_msg = "Show Global PMU Regs", |
| 394 | }; |
| 395 | |
| 396 | static int __init sparc_sysrq_init(void) |
| 397 | { |
| 398 | int ret = register_sysrq_key('y', &sparc_globalreg_op); |
| 399 | |
| 400 | if (!ret) |
| 401 | ret = register_sysrq_key('x', &sparc_globalpmu_op); |
| 402 | return ret; |
| 403 | } |
| 404 | |
| 405 | core_initcall(sparc_sysrq_init); |
| 406 | |
| 407 | #endif |
| 408 | |
| 409 | /* Free current thread data structures etc.. */ |
| 410 | void exit_thread(struct task_struct *tsk) |
| 411 | { |
| 412 | struct thread_info *t = task_thread_info(tsk); |
| 413 | |
| 414 | if (t->utraps) { |
| 415 | if (t->utraps[0] < 2) |
| 416 | kfree (t->utraps); |
| 417 | else |
| 418 | t->utraps[0]--; |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | void flush_thread(void) |
| 423 | { |
| 424 | struct thread_info *t = current_thread_info(); |
| 425 | struct mm_struct *mm; |
| 426 | |
| 427 | mm = t->task->mm; |
| 428 | if (mm) |
| 429 | tsb_context_switch(mm); |
| 430 | |
| 431 | set_thread_wsaved(0); |
| 432 | |
| 433 | /* Clear FPU register state. */ |
| 434 | t->fpsaved[0] = 0; |
| 435 | } |
| 436 | |
| 437 | /* It's a bit more tricky when 64-bit tasks are involved... */ |
| 438 | static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) |
| 439 | { |
| 440 | bool stack_64bit = test_thread_64bit_stack(psp); |
| 441 | unsigned long fp, distance, rval; |
| 442 | |
| 443 | if (stack_64bit) { |
| 444 | csp += STACK_BIAS; |
| 445 | psp += STACK_BIAS; |
| 446 | __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); |
| 447 | fp += STACK_BIAS; |
| 448 | if (test_thread_flag(TIF_32BIT)) |
| 449 | fp &= 0xffffffff; |
| 450 | } else |
| 451 | __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); |
| 452 | |
| 453 | /* Now align the stack as this is mandatory in the Sparc ABI |
| 454 | * due to how register windows work. This hides the |
| 455 | * restriction from thread libraries etc. |
| 456 | */ |
| 457 | csp &= ~15UL; |
| 458 | |
| 459 | distance = fp - psp; |
| 460 | rval = (csp - distance); |
| 461 | if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) |
| 462 | rval = 0; |
| 463 | else if (!stack_64bit) { |
| 464 | if (put_user(((u32)csp), |
| 465 | &(((struct reg_window32 __user *)rval)->ins[6]))) |
| 466 | rval = 0; |
| 467 | } else { |
| 468 | if (put_user(((u64)csp - STACK_BIAS), |
| 469 | &(((struct reg_window __user *)rval)->ins[6]))) |
| 470 | rval = 0; |
| 471 | else |
| 472 | rval = rval - STACK_BIAS; |
| 473 | } |
| 474 | |
| 475 | return rval; |
| 476 | } |
| 477 | |
| 478 | /* Standard stuff. */ |
| 479 | static inline void shift_window_buffer(int first_win, int last_win, |
| 480 | struct thread_info *t) |
| 481 | { |
| 482 | int i; |
| 483 | |
| 484 | for (i = first_win; i < last_win; i++) { |
| 485 | t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; |
| 486 | memcpy(&t->reg_window[i], &t->reg_window[i+1], |
| 487 | sizeof(struct reg_window)); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | void synchronize_user_stack(void) |
| 492 | { |
| 493 | struct thread_info *t = current_thread_info(); |
| 494 | unsigned long window; |
| 495 | |
| 496 | flush_user_windows(); |
| 497 | if ((window = get_thread_wsaved()) != 0) { |
| 498 | window -= 1; |
| 499 | do { |
| 500 | struct reg_window *rwin = &t->reg_window[window]; |
| 501 | int winsize = sizeof(struct reg_window); |
| 502 | unsigned long sp; |
| 503 | |
| 504 | sp = t->rwbuf_stkptrs[window]; |
| 505 | |
| 506 | if (test_thread_64bit_stack(sp)) |
| 507 | sp += STACK_BIAS; |
| 508 | else |
| 509 | winsize = sizeof(struct reg_window32); |
| 510 | |
| 511 | if (!copy_to_user((char __user *)sp, rwin, winsize)) { |
| 512 | shift_window_buffer(window, get_thread_wsaved() - 1, t); |
| 513 | set_thread_wsaved(get_thread_wsaved() - 1); |
| 514 | } |
| 515 | } while (window--); |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | static void stack_unaligned(unsigned long sp) |
| 520 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 521 | force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 522 | } |
| 523 | |
| 524 | static const char uwfault32[] = KERN_INFO \ |
| 525 | "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; |
| 526 | static const char uwfault64[] = KERN_INFO \ |
| 527 | "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; |
| 528 | |
| 529 | void fault_in_user_windows(struct pt_regs *regs) |
| 530 | { |
| 531 | struct thread_info *t = current_thread_info(); |
| 532 | unsigned long window; |
| 533 | |
| 534 | flush_user_windows(); |
| 535 | window = get_thread_wsaved(); |
| 536 | |
| 537 | if (likely(window != 0)) { |
| 538 | window -= 1; |
| 539 | do { |
| 540 | struct reg_window *rwin = &t->reg_window[window]; |
| 541 | int winsize = sizeof(struct reg_window); |
| 542 | unsigned long sp, orig_sp; |
| 543 | |
| 544 | orig_sp = sp = t->rwbuf_stkptrs[window]; |
| 545 | |
| 546 | if (test_thread_64bit_stack(sp)) |
| 547 | sp += STACK_BIAS; |
| 548 | else |
| 549 | winsize = sizeof(struct reg_window32); |
| 550 | |
| 551 | if (unlikely(sp & 0x7UL)) |
| 552 | stack_unaligned(sp); |
| 553 | |
| 554 | if (unlikely(copy_to_user((char __user *)sp, |
| 555 | rwin, winsize))) { |
| 556 | if (show_unhandled_signals) |
| 557 | printk_ratelimited(is_compat_task() ? |
| 558 | uwfault32 : uwfault64, |
| 559 | current->comm, current->pid, |
| 560 | sp, orig_sp, |
| 561 | regs->tpc, |
| 562 | regs->u_regs[UREG_I7]); |
| 563 | goto barf; |
| 564 | } |
| 565 | } while (window--); |
| 566 | } |
| 567 | set_thread_wsaved(0); |
| 568 | return; |
| 569 | |
| 570 | barf: |
| 571 | set_thread_wsaved(window + 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 572 | force_sig(SIGSEGV); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 573 | } |
| 574 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 575 | /* Copy a Sparc thread. The fork() return value conventions |
| 576 | * under SunOS are nothing short of bletcherous: |
| 577 | * Parent --> %o0 == childs pid, %o1 == 0 |
| 578 | * Child --> %o0 == parents pid, %o1 == 1 |
| 579 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 580 | int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, |
| 581 | struct task_struct *p, unsigned long tls) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 582 | { |
| 583 | struct thread_info *t = task_thread_info(p); |
| 584 | struct pt_regs *regs = current_pt_regs(); |
| 585 | struct sparc_stackf *parent_sf; |
| 586 | unsigned long child_stack_sz; |
| 587 | char *child_trap_frame; |
| 588 | |
| 589 | /* Calculate offset to stack_frame & pt_regs */ |
| 590 | child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); |
| 591 | child_trap_frame = (task_stack_page(p) + |
| 592 | (THREAD_SIZE - child_stack_sz)); |
| 593 | |
| 594 | t->new_child = 1; |
| 595 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; |
| 596 | t->kregs = (struct pt_regs *) (child_trap_frame + |
| 597 | sizeof(struct sparc_stackf)); |
| 598 | t->fpsaved[0] = 0; |
| 599 | |
| 600 | if (unlikely(p->flags & PF_KTHREAD)) { |
| 601 | memset(child_trap_frame, 0, child_stack_sz); |
| 602 | __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = |
| 603 | (current_pt_regs()->tstate + 1) & TSTATE_CWP; |
| 604 | t->current_ds = ASI_P; |
| 605 | t->kregs->u_regs[UREG_G1] = sp; /* function */ |
| 606 | t->kregs->u_regs[UREG_G2] = arg; |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | parent_sf = ((struct sparc_stackf *) regs) - 1; |
| 611 | memcpy(child_trap_frame, parent_sf, child_stack_sz); |
| 612 | if (t->flags & _TIF_32BIT) { |
| 613 | sp &= 0x00000000ffffffffUL; |
| 614 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; |
| 615 | } |
| 616 | t->kregs->u_regs[UREG_FP] = sp; |
| 617 | __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = |
| 618 | (regs->tstate + 1) & TSTATE_CWP; |
| 619 | t->current_ds = ASI_AIUS; |
| 620 | if (sp != regs->u_regs[UREG_FP]) { |
| 621 | unsigned long csp; |
| 622 | |
| 623 | csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); |
| 624 | if (!csp) |
| 625 | return -EFAULT; |
| 626 | t->kregs->u_regs[UREG_FP] = csp; |
| 627 | } |
| 628 | if (t->utraps) |
| 629 | t->utraps[0]++; |
| 630 | |
| 631 | /* Set the return value for the child. */ |
| 632 | t->kregs->u_regs[UREG_I0] = current->pid; |
| 633 | t->kregs->u_regs[UREG_I1] = 1; |
| 634 | |
| 635 | /* Set the second return value for the parent. */ |
| 636 | regs->u_regs[UREG_I1] = 0; |
| 637 | |
| 638 | if (clone_flags & CLONE_SETTLS) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 639 | t->kregs->u_regs[UREG_G7] = tls; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 640 | |
| 641 | return 0; |
| 642 | } |
| 643 | |
| 644 | /* TIF_MCDPER in thread info flags for current task is updated lazily upon |
| 645 | * a context switch. Update this flag in current task's thread flags |
| 646 | * before dup so the dup'd task will inherit the current TIF_MCDPER flag. |
| 647 | */ |
| 648 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 649 | { |
| 650 | if (adi_capable()) { |
| 651 | register unsigned long tmp_mcdper; |
| 652 | |
| 653 | __asm__ __volatile__( |
| 654 | ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ |
| 655 | "mov %%g1, %0\n\t" |
| 656 | : "=r" (tmp_mcdper) |
| 657 | : |
| 658 | : "g1"); |
| 659 | if (tmp_mcdper) |
| 660 | set_thread_flag(TIF_MCDPER); |
| 661 | else |
| 662 | clear_thread_flag(TIF_MCDPER); |
| 663 | } |
| 664 | |
| 665 | *dst = *src; |
| 666 | return 0; |
| 667 | } |
| 668 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 669 | unsigned long get_wchan(struct task_struct *task) |
| 670 | { |
| 671 | unsigned long pc, fp, bias = 0; |
| 672 | struct thread_info *tp; |
| 673 | struct reg_window *rw; |
| 674 | unsigned long ret = 0; |
| 675 | int count = 0; |
| 676 | |
| 677 | if (!task || task == current || |
| 678 | task->state == TASK_RUNNING) |
| 679 | goto out; |
| 680 | |
| 681 | tp = task_thread_info(task); |
| 682 | bias = STACK_BIAS; |
| 683 | fp = task_thread_info(task)->ksp + bias; |
| 684 | |
| 685 | do { |
| 686 | if (!kstack_valid(tp, fp)) |
| 687 | break; |
| 688 | rw = (struct reg_window *) fp; |
| 689 | pc = rw->ins[7]; |
| 690 | if (!in_sched_functions(pc)) { |
| 691 | ret = pc; |
| 692 | goto out; |
| 693 | } |
| 694 | fp = rw->ins[6] + bias; |
| 695 | } while (++count < 16); |
| 696 | |
| 697 | out: |
| 698 | return ret; |
| 699 | } |