blob: 22275d8518eb39fa8bd4287deaad832453cfa0fb [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/kernel/process.c
4 *
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 */
9
10#include <stdarg.h>
11
12#include <linux/compat.h>
13#include <linux/efi.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020014#include <linux/elf.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <linux/export.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/sched/task.h>
19#include <linux/sched/task_stack.h>
20#include <linux/kernel.h>
David Brazdil0f672f62019-12-10 10:32:29 +000021#include <linux/lockdep.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020022#include <linux/mman.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023#include <linux/mm.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020024#include <linux/nospec.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025#include <linux/stddef.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/sysctl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#include <linux/unistd.h>
28#include <linux/user.h>
29#include <linux/delay.h>
30#include <linux/reboot.h>
31#include <linux/interrupt.h>
32#include <linux/init.h>
33#include <linux/cpu.h>
34#include <linux/elfcore.h>
35#include <linux/pm.h>
36#include <linux/tick.h>
37#include <linux/utsname.h>
38#include <linux/uaccess.h>
39#include <linux/random.h>
40#include <linux/hw_breakpoint.h>
41#include <linux/personality.h>
42#include <linux/notifier.h>
43#include <trace/events/power.h>
44#include <linux/percpu.h>
45#include <linux/thread_info.h>
David Brazdil0f672f62019-12-10 10:32:29 +000046#include <linux/prctl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047
48#include <asm/alternative.h>
David Brazdil0f672f62019-12-10 10:32:29 +000049#include <asm/arch_gicv3.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050#include <asm/compat.h>
David Brazdil0f672f62019-12-10 10:32:29 +000051#include <asm/cpufeature.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052#include <asm/cacheflush.h>
53#include <asm/exec.h>
54#include <asm/fpsimd.h>
55#include <asm/mmu_context.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020056#include <asm/mte.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057#include <asm/processor.h>
David Brazdil0f672f62019-12-10 10:32:29 +000058#include <asm/pointer_auth.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059#include <asm/stacktrace.h>
60
David Brazdil0f672f62019-12-10 10:32:29 +000061#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062#include <linux/stackprotector.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020063unsigned long __stack_chk_guard __ro_after_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064EXPORT_SYMBOL(__stack_chk_guard);
65#endif
66
67/*
68 * Function pointers to optional machine specific functions
69 */
70void (*pm_power_off)(void);
71EXPORT_SYMBOL_GPL(pm_power_off);
72
73void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
74
Olivier Deprez157378f2022-04-04 15:47:50 +020075static void noinstr __cpu_do_idle(void)
David Brazdil0f672f62019-12-10 10:32:29 +000076{
77 dsb(sy);
78 wfi();
79}
80
Olivier Deprez157378f2022-04-04 15:47:50 +020081static void noinstr __cpu_do_idle_irqprio(void)
David Brazdil0f672f62019-12-10 10:32:29 +000082{
83 unsigned long pmr;
84 unsigned long daif_bits;
85
86 daif_bits = read_sysreg(daif);
87 write_sysreg(daif_bits | PSR_I_BIT, daif);
88
89 /*
90 * Unmask PMR before going idle to make sure interrupts can
91 * be raised.
92 */
93 pmr = gic_read_pmr();
94 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
95
96 __cpu_do_idle();
97
98 gic_write_pmr(pmr);
99 write_sysreg(daif_bits, daif);
100}
101
102/*
103 * cpu_do_idle()
104 *
105 * Idle the processor (wait for interrupt).
106 *
107 * If the CPU supports priority masking we must do additional work to
108 * ensure that interrupts are not masked at the PMR (because the core will
109 * not wake up if we block the wake up signal in the interrupt controller).
110 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200111void noinstr cpu_do_idle(void)
David Brazdil0f672f62019-12-10 10:32:29 +0000112{
113 if (system_uses_irq_prio_masking())
114 __cpu_do_idle_irqprio();
115 else
116 __cpu_do_idle();
117}
118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119/*
120 * This is our default idle handler.
121 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200122void noinstr arch_cpu_idle(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123{
124 /*
125 * This should do all the clock switching and wait for interrupt
126 * tricks
127 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128 cpu_do_idle();
Olivier Deprez157378f2022-04-04 15:47:50 +0200129 raw_local_irq_enable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130}
131
132#ifdef CONFIG_HOTPLUG_CPU
133void arch_cpu_idle_dead(void)
134{
135 cpu_die();
136}
137#endif
138
139/*
140 * Called by kexec, immediately prior to machine_kexec().
141 *
142 * This must completely disable all secondary CPUs; simply causing those CPUs
143 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
144 * kexec'd kernel to use any and all RAM as it sees fit, without having to
145 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
Olivier Deprez157378f2022-04-04 15:47:50 +0200146 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147 */
148void machine_shutdown(void)
149{
Olivier Deprez157378f2022-04-04 15:47:50 +0200150 smp_shutdown_nonboot_cpus(reboot_cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151}
152
153/*
154 * Halting simply requires that the secondary CPUs stop performing any
155 * activity (executing tasks, handling interrupts). smp_send_stop()
156 * achieves this.
157 */
158void machine_halt(void)
159{
160 local_irq_disable();
161 smp_send_stop();
162 while (1);
163}
164
165/*
166 * Power-off simply requires that the secondary CPUs stop performing any
167 * activity (executing tasks, handling interrupts). smp_send_stop()
168 * achieves this. When the system power is turned off, it will take all CPUs
169 * with it.
170 */
171void machine_power_off(void)
172{
173 local_irq_disable();
174 smp_send_stop();
175 if (pm_power_off)
176 pm_power_off();
177}
178
179/*
180 * Restart requires that the secondary CPUs stop performing any activity
181 * while the primary CPU resets the system. Systems with multiple CPUs must
182 * provide a HW restart implementation, to ensure that all CPUs reset at once.
183 * This is required so that any code running after reset on the primary CPU
184 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
185 * executing pre-reset code, and using RAM that the primary CPU's code wishes
186 * to use. Implementing such co-ordination would be essentially impossible.
187 */
188void machine_restart(char *cmd)
189{
190 /* Disable interrupts first */
191 local_irq_disable();
192 smp_send_stop();
193
194 /*
195 * UpdateCapsule() depends on the system being reset via
196 * ResetSystem().
197 */
198 if (efi_enabled(EFI_RUNTIME_SERVICES))
199 efi_reboot(reboot_mode, NULL);
200
201 /* Now call the architecture specific reboot code. */
202 if (arm_pm_restart)
203 arm_pm_restart(reboot_mode, cmd);
204 else
205 do_kernel_restart(cmd);
206
207 /*
208 * Whoops - the architecture was unable to reboot.
209 */
210 printk("Reboot failed -- System halted\n");
211 while (1);
212}
213
Olivier Deprez157378f2022-04-04 15:47:50 +0200214#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
215static const char *const btypes[] = {
216 bstr(NONE, "--"),
217 bstr( JC, "jc"),
218 bstr( C, "-c"),
219 bstr( J , "j-")
220};
221#undef bstr
222
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223static void print_pstate(struct pt_regs *regs)
224{
225 u64 pstate = regs->pstate;
226
227 if (compat_user_mode(regs)) {
228 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
229 pstate,
230 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
231 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
232 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
233 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
234 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
235 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
236 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
237 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
238 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
239 pstate & PSR_AA32_F_BIT ? 'F' : 'f');
240 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200241 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
242 PSR_BTYPE_SHIFT];
243
244 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245 pstate,
246 pstate & PSR_N_BIT ? 'N' : 'n',
247 pstate & PSR_Z_BIT ? 'Z' : 'z',
248 pstate & PSR_C_BIT ? 'C' : 'c',
249 pstate & PSR_V_BIT ? 'V' : 'v',
250 pstate & PSR_D_BIT ? 'D' : 'd',
251 pstate & PSR_A_BIT ? 'A' : 'a',
252 pstate & PSR_I_BIT ? 'I' : 'i',
253 pstate & PSR_F_BIT ? 'F' : 'f',
254 pstate & PSR_PAN_BIT ? '+' : '-',
Olivier Deprez157378f2022-04-04 15:47:50 +0200255 pstate & PSR_UAO_BIT ? '+' : '-',
256 pstate & PSR_TCO_BIT ? '+' : '-',
257 btype_str);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 }
259}
260
261void __show_regs(struct pt_regs *regs)
262{
263 int i, top_reg;
264 u64 lr, sp;
265
266 if (compat_user_mode(regs)) {
267 lr = regs->compat_lr;
268 sp = regs->compat_sp;
269 top_reg = 12;
270 } else {
271 lr = regs->regs[30];
272 sp = regs->sp;
273 top_reg = 29;
274 }
275
276 show_regs_print_info(KERN_DEFAULT);
277 print_pstate(regs);
278
279 if (!user_mode(regs)) {
280 printk("pc : %pS\n", (void *)regs->pc);
Olivier Deprez157378f2022-04-04 15:47:50 +0200281 printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 } else {
283 printk("pc : %016llx\n", regs->pc);
284 printk("lr : %016llx\n", lr);
285 }
286
287 printk("sp : %016llx\n", sp);
288
David Brazdil0f672f62019-12-10 10:32:29 +0000289 if (system_uses_irq_prio_masking())
290 printk("pmr_save: %08llx\n", regs->pmr_save);
291
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292 i = top_reg;
293
294 while (i >= 0) {
295 printk("x%-2d: %016llx ", i, regs->regs[i]);
296 i--;
297
298 if (i % 2 == 0) {
299 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
300 i--;
301 }
302
303 pr_cont("\n");
304 }
305}
306
307void show_regs(struct pt_regs * regs)
308{
309 __show_regs(regs);
Olivier Deprez157378f2022-04-04 15:47:50 +0200310 dump_backtrace(regs, NULL, KERN_DEFAULT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311}
312
313static void tls_thread_flush(void)
314{
315 write_sysreg(0, tpidr_el0);
316
317 if (is_compat_task()) {
318 current->thread.uw.tp_value = 0;
319
320 /*
321 * We need to ensure ordering between the shadow state and the
322 * hardware state, so that we don't corrupt the hardware state
323 * with a stale shadow state during context switch.
324 */
325 barrier();
326 write_sysreg(0, tpidrro_el0);
327 }
328}
329
David Brazdil0f672f62019-12-10 10:32:29 +0000330static void flush_tagged_addr_state(void)
331{
332 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
333 clear_thread_flag(TIF_TAGGED_ADDR);
334}
335
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336void flush_thread(void)
337{
338 fpsimd_flush_thread();
339 tls_thread_flush();
340 flush_ptrace_hw_breakpoint(current);
David Brazdil0f672f62019-12-10 10:32:29 +0000341 flush_tagged_addr_state();
Olivier Deprez157378f2022-04-04 15:47:50 +0200342 flush_mte_state();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000343}
344
345void release_thread(struct task_struct *dead_task)
346{
347}
348
349void arch_release_task_struct(struct task_struct *tsk)
350{
351 fpsimd_release_task(tsk);
352}
353
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
355{
356 if (current->mm)
357 fpsimd_preserve_current_state();
358 *dst = *src;
359
David Brazdil0f672f62019-12-10 10:32:29 +0000360 /* We rely on the above assignment to initialize dst's thread_flags: */
361 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
362
363 /*
364 * Detach src's sve_state (if any) from dst so that it does not
365 * get erroneously used or freed prematurely. dst's sve_state
366 * will be allocated on demand later on if dst uses SVE.
367 * For consistency, also clear TIF_SVE here: this could be done
368 * later in copy_process(), but to avoid tripping up future
369 * maintainers it is best not to leave TIF_SVE and sve_state in
370 * an inconsistent state, even temporarily.
371 */
372 dst->thread.sve_state = NULL;
373 clear_tsk_thread_flag(dst, TIF_SVE);
374
Olivier Deprez157378f2022-04-04 15:47:50 +0200375 /* clear any pending asynchronous tag fault raised by the parent */
376 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
377
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378 return 0;
379}
380
381asmlinkage void ret_from_fork(void) asm("ret_from_fork");
382
Olivier Deprez157378f2022-04-04 15:47:50 +0200383int copy_thread(unsigned long clone_flags, unsigned long stack_start,
Olivier Deprez0e641232021-09-23 10:07:05 +0200384 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385{
386 struct pt_regs *childregs = task_pt_regs(p);
387
388 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
389
390 /*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 * In case p was allocated the same task_struct pointer as some
392 * other recently-exited task, make sure p is disassociated from
393 * any cpu that may have run that now-exited task recently.
394 * Otherwise we could erroneously skip reloading the FPSIMD
395 * registers for p.
396 */
397 fpsimd_flush_task_state(p);
398
Olivier Deprez157378f2022-04-04 15:47:50 +0200399 ptrauth_thread_init_kernel(p);
400
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401 if (likely(!(p->flags & PF_KTHREAD))) {
402 *childregs = *current_pt_regs();
403 childregs->regs[0] = 0;
404
405 /*
406 * Read the current TLS pointer from tpidr_el0 as it may be
407 * out-of-sync with the saved value.
408 */
409 *task_user_tls(p) = read_sysreg(tpidr_el0);
410
411 if (stack_start) {
412 if (is_compat_thread(task_thread_info(p)))
413 childregs->compat_sp = stack_start;
414 else
415 childregs->sp = stack_start;
416 }
417
418 /*
Olivier Deprez0e641232021-09-23 10:07:05 +0200419 * If a TLS pointer was passed to clone, use it for the new
420 * thread.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 */
422 if (clone_flags & CLONE_SETTLS)
Olivier Deprez0e641232021-09-23 10:07:05 +0200423 p->thread.uw.tp_value = tls;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424 } else {
425 memset(childregs, 0, sizeof(struct pt_regs));
426 childregs->pstate = PSR_MODE_EL1h;
427 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
428 cpus_have_const_cap(ARM64_HAS_UAO))
429 childregs->pstate |= PSR_UAO_BIT;
David Brazdil0f672f62019-12-10 10:32:29 +0000430
Olivier Deprez157378f2022-04-04 15:47:50 +0200431 spectre_v4_enable_task_mitigation(p);
David Brazdil0f672f62019-12-10 10:32:29 +0000432
433 if (system_uses_irq_prio_masking())
434 childregs->pmr_save = GIC_PRIO_IRQON;
435
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436 p->thread.cpu_context.x19 = stack_start;
437 p->thread.cpu_context.x20 = stk_sz;
438 }
439 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
440 p->thread.cpu_context.sp = (unsigned long)childregs;
441
442 ptrace_hw_copy_thread(p);
443
444 return 0;
445}
446
447void tls_preserve_current_state(void)
448{
449 *task_user_tls(current) = read_sysreg(tpidr_el0);
450}
451
452static void tls_thread_switch(struct task_struct *next)
453{
454 tls_preserve_current_state();
455
456 if (is_compat_thread(task_thread_info(next)))
457 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
458 else if (!arm64_kernel_unmapped_at_el0())
459 write_sysreg(0, tpidrro_el0);
460
461 write_sysreg(*task_user_tls(next), tpidr_el0);
462}
463
464/* Restore the UAO state depending on next's addr_limit */
465void uao_thread_switch(struct task_struct *next)
466{
467 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
468 if (task_thread_info(next)->addr_limit == KERNEL_DS)
469 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
470 else
471 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
472 }
473}
474
475/*
David Brazdil0f672f62019-12-10 10:32:29 +0000476 * Force SSBS state on context-switch, since it may be lost after migrating
477 * from a CPU which treats the bit as RES0 in a heterogeneous system.
478 */
479static void ssbs_thread_switch(struct task_struct *next)
480{
David Brazdil0f672f62019-12-10 10:32:29 +0000481 /*
482 * Nothing to do for kernel threads, but 'regs' may be junk
483 * (e.g. idle task) so check the flags and bail early.
484 */
485 if (unlikely(next->flags & PF_KTHREAD))
486 return;
487
Olivier Deprez0e641232021-09-23 10:07:05 +0200488 /*
489 * If all CPUs implement the SSBS extension, then we just need to
490 * context-switch the PSTATE field.
491 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200492 if (cpus_have_const_cap(ARM64_SSBS))
Olivier Deprez0e641232021-09-23 10:07:05 +0200493 return;
494
Olivier Deprez157378f2022-04-04 15:47:50 +0200495 spectre_v4_enable_task_mitigation(next);
David Brazdil0f672f62019-12-10 10:32:29 +0000496}
497
498/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
500 * shadow copy so that we can restore this upon entry from userspace.
501 *
502 * This is *only* for exception entry from EL0, and is not valid until we
503 * __switch_to() a user task.
504 */
505DEFINE_PER_CPU(struct task_struct *, __entry_task);
506
507static void entry_task_switch(struct task_struct *next)
508{
509 __this_cpu_write(__entry_task, next);
510}
511
512/*
Olivier Deprez0e641232021-09-23 10:07:05 +0200513 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
Olivier Deprez157378f2022-04-04 15:47:50 +0200514 * Ensure access is disabled when switching to a 32bit task, ensure
515 * access is enabled when switching to a 64bit task.
Olivier Deprez0e641232021-09-23 10:07:05 +0200516 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200517static void erratum_1418040_thread_switch(struct task_struct *next)
Olivier Deprez0e641232021-09-23 10:07:05 +0200518{
Olivier Deprez157378f2022-04-04 15:47:50 +0200519 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
520 !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
Olivier Deprez0e641232021-09-23 10:07:05 +0200521 return;
522
Olivier Deprez157378f2022-04-04 15:47:50 +0200523 if (is_compat_thread(task_thread_info(next)))
524 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +0200525 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200526 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
527}
Olivier Deprez0e641232021-09-23 10:07:05 +0200528
Olivier Deprez157378f2022-04-04 15:47:50 +0200529static void erratum_1418040_new_exec(void)
530{
531 preempt_disable();
532 erratum_1418040_thread_switch(current);
533 preempt_enable();
Olivier Deprez0e641232021-09-23 10:07:05 +0200534}
535
536/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 * Thread switching.
538 */
539__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
540 struct task_struct *next)
541{
542 struct task_struct *last;
543
544 fpsimd_thread_switch(next);
545 tls_thread_switch(next);
546 hw_breakpoint_thread_switch(next);
547 contextidr_thread_switch(next);
548 entry_task_switch(next);
549 uao_thread_switch(next);
David Brazdil0f672f62019-12-10 10:32:29 +0000550 ssbs_thread_switch(next);
Olivier Deprez157378f2022-04-04 15:47:50 +0200551 erratum_1418040_thread_switch(next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552
553 /*
554 * Complete any pending TLB or cache maintenance on this CPU in case
555 * the thread migrates to a different CPU.
556 * This full barrier is also required by the membarrier system
557 * call.
558 */
559 dsb(ish);
560
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 /*
562 * MTE thread switching must happen after the DSB above to ensure that
563 * any asynchronous tag check faults have been logged in the TFSR*_EL1
564 * registers.
565 */
566 mte_thread_switch(next);
567
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000568 /* the actual thread switch */
569 last = cpu_switch_to(prev, next);
570
571 return last;
572}
573
574unsigned long get_wchan(struct task_struct *p)
575{
576 struct stackframe frame;
577 unsigned long stack_page, ret = 0;
578 int count = 0;
579 if (!p || p == current || p->state == TASK_RUNNING)
580 return 0;
581
582 stack_page = (unsigned long)try_get_task_stack(p);
583 if (!stack_page)
584 return 0;
585
David Brazdil0f672f62019-12-10 10:32:29 +0000586 start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
587
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588 do {
589 if (unwind_frame(p, &frame))
590 goto out;
591 if (!in_sched_functions(frame.pc)) {
592 ret = frame.pc;
593 goto out;
594 }
595 } while (count ++ < 16);
596
597out:
598 put_task_stack(p);
599 return ret;
600}
601
602unsigned long arch_align_stack(unsigned long sp)
603{
604 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
605 sp -= get_random_int() & ~PAGE_MASK;
606 return sp & ~0xf;
607}
608
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000609/*
610 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
611 */
612void arch_setup_new_exec(void)
613{
614 current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000615
616 ptrauth_thread_init_user(current);
Olivier Deprez157378f2022-04-04 15:47:50 +0200617 erratum_1418040_new_exec();
618
619 if (task_spec_ssb_noexec(current)) {
620 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
621 PR_SPEC_ENABLE);
622 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623}
624
David Brazdil0f672f62019-12-10 10:32:29 +0000625#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
626/*
627 * Control the relaxed ABI allowing tagged user addresses into the kernel.
628 */
629static unsigned int tagged_addr_disabled;
630
Olivier Deprez157378f2022-04-04 15:47:50 +0200631long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632{
Olivier Deprez157378f2022-04-04 15:47:50 +0200633 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
634 struct thread_info *ti = task_thread_info(task);
635
636 if (is_compat_thread(ti))
David Brazdil0f672f62019-12-10 10:32:29 +0000637 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200638
639 if (system_supports_mte())
640 valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
641
642 if (arg & ~valid_mask)
David Brazdil0f672f62019-12-10 10:32:29 +0000643 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644
645 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000646 * Do not allow the enabling of the tagged address ABI if globally
647 * disabled via sysctl abi.tagged_addr_disabled.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648 */
David Brazdil0f672f62019-12-10 10:32:29 +0000649 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
650 return -EINVAL;
651
Olivier Deprez157378f2022-04-04 15:47:50 +0200652 if (set_mte_ctrl(task, arg) != 0)
653 return -EINVAL;
654
655 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
David Brazdil0f672f62019-12-10 10:32:29 +0000656
657 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658}
David Brazdil0f672f62019-12-10 10:32:29 +0000659
Olivier Deprez157378f2022-04-04 15:47:50 +0200660long get_tagged_addr_ctrl(struct task_struct *task)
David Brazdil0f672f62019-12-10 10:32:29 +0000661{
Olivier Deprez157378f2022-04-04 15:47:50 +0200662 long ret = 0;
663 struct thread_info *ti = task_thread_info(task);
664
665 if (is_compat_thread(ti))
David Brazdil0f672f62019-12-10 10:32:29 +0000666 return -EINVAL;
667
Olivier Deprez157378f2022-04-04 15:47:50 +0200668 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
669 ret = PR_TAGGED_ADDR_ENABLE;
David Brazdil0f672f62019-12-10 10:32:29 +0000670
Olivier Deprez157378f2022-04-04 15:47:50 +0200671 ret |= get_mte_ctrl(task);
672
673 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000674}
675
676/*
677 * Global sysctl to disable the tagged user addresses support. This control
678 * only prevents the tagged address ABI enabling via prctl() and does not
679 * disable it for tasks that already opted in to the relaxed ABI.
680 */
David Brazdil0f672f62019-12-10 10:32:29 +0000681
682static struct ctl_table tagged_addr_sysctl_table[] = {
683 {
684 .procname = "tagged_addr_disabled",
685 .mode = 0644,
686 .data = &tagged_addr_disabled,
687 .maxlen = sizeof(int),
688 .proc_handler = proc_dointvec_minmax,
Olivier Deprez157378f2022-04-04 15:47:50 +0200689 .extra1 = SYSCTL_ZERO,
690 .extra2 = SYSCTL_ONE,
David Brazdil0f672f62019-12-10 10:32:29 +0000691 },
692 { }
693};
694
695static int __init tagged_addr_init(void)
696{
697 if (!register_sysctl("abi", tagged_addr_sysctl_table))
698 return -EINVAL;
699 return 0;
700}
701
702core_initcall(tagged_addr_init);
703#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
704
705asmlinkage void __sched arm64_preempt_schedule_irq(void)
706{
707 lockdep_assert_irqs_disabled();
708
709 /*
710 * Preempting a task from an IRQ means we leave copies of PSTATE
711 * on the stack. cpufeature's enable calls may modify PSTATE, but
712 * resuming one of these preempted tasks would undo those changes.
713 *
714 * Only allow a task to be preempted once cpufeatures have been
715 * enabled.
716 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200717 if (system_capabilities_finalized())
David Brazdil0f672f62019-12-10 10:32:29 +0000718 preempt_schedule_irq();
719}
Olivier Deprez157378f2022-04-04 15:47:50 +0200720
721#ifdef CONFIG_BINFMT_ELF
722int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
723 bool has_interp, bool is_interp)
724{
725 /*
726 * For dynamically linked executables the interpreter is
727 * responsible for setting PROT_BTI on everything except
728 * itself.
729 */
730 if (is_interp != has_interp)
731 return prot;
732
733 if (!(state->flags & ARM64_ELF_BTI))
734 return prot;
735
736 if (prot & PROT_EXEC)
737 prot |= PROT_BTI;
738
739 return prot;
740}
741#endif