Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 4c8b13b..478491f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -7,12 +7,12 @@
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_insn.o = -pg
-CFLAGS_REMOVE_return_address.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
# Object file lists.
-arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
+obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o cpu_ops.o insn.o \
@@ -27,39 +27,45 @@
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
-arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
+obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
-arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
-arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
-arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
-arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
-arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-arm64-obj-$(CONFIG_KGDB) += kgdb.o
-arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
+ifneq ($(CONFIG_COMPAT_VDSO), y)
+obj-$(CONFIG_COMPAT) += sigreturn32.o
+endif
+obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
efi-rt-wrapper.o
-arm64-obj-$(CONFIG_PCI) += pci.o
-arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
-arm64-obj-$(CONFIG_ACPI) += acpi.o
-arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
-arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
-arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
-arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
-arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
+obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
+obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
cpu-reset.o
-arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
+obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
+obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
-arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
-arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
-arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_CRASH_CORE) += crash_core.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
+obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
-obj-y += $(arm64-obj-y) vdso/ probes/
-obj-m += $(arm64-obj-m)
+obj-y += vdso/ probes/
+obj-$(CONFIG_COMPAT_VDSO) += vdso32/
head-y := head.o
extra-y += $(head-y) vmlinux.lds
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index ed46dc1..3a58e9d 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM64 Specific Low-Level ACPI Boot Support
*
@@ -7,16 +8,11 @@
* Author: Hanjun Guo <hanjun.guo@linaro.org>
* Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
* Author: Naresh Bhat <naresh.bhat@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
@@ -28,8 +24,10 @@
#include <linux/smp.h>
#include <linux/serial_core.h>
+#include <acpi/ghes.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/pgtable.h>
#include <asm/smp_plat.h>
@@ -154,10 +152,14 @@
*/
if (table->revision < 5 ||
(table->revision == 5 && fadt->minor_revision < 1)) {
- pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
table->revision, fadt->minor_revision);
- ret = -EINVAL;
- goto out;
+
+ if (!fadt->arm_boot_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
}
if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
@@ -257,3 +259,32 @@
return __pgprot(PROT_NORMAL_NC);
return __pgprot(PROT_DEVICE_nGnRnE);
}
+
+/*
+ * Claim Synchronous External Aborts as a firmware first notification.
+ *
+ * Used by KVM and the arch do_sea handler.
+ * @regs may be NULL when called from process context.
+ */
+int apei_claim_sea(struct pt_regs *regs)
+{
+ int err = -ENOENT;
+ unsigned long current_flags;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return err;
+
+ current_flags = arch_local_save_flags();
+
+ /*
+ * SEA can interrupt SError, mask it and describe this as an NMI so
+ * that APEI defers the handling.
+ */
+ local_daif_restore(DAIF_ERRCTX);
+ nmi_enter();
+ err = ghes_notify_sea();
+ nmi_exit();
+ local_daif_restore(current_flags);
+
+ return err;
+}
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index 4f4f181..7ff8000 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -18,7 +18,6 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
@@ -46,7 +45,7 @@
return -EINVAL;
}
-static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header,
+static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_gicc_affinity *pa;
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index 98a20e5..e7c941d 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM64 ACPI Parking Protocol implementation
*
* Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
* Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
#include <linux/mm.h>
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d6039..d1757ef 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* alternative runtime patching
* inspired by the x86 version
*
* Copyright (C) 2014 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "alternatives: " fmt
@@ -32,13 +21,23 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
-int alternatives_applied;
+static int all_alternatives_applied;
+
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
};
+bool alternative_is_applied(u16 cpufeature)
+{
+ if (WARN_ON(cpufeature >= ARM64_NCAPS))
+ return false;
+
+ return test_bit(cpufeature, applied_alternatives);
+}
+
/*
* Check if the target PC is within an alternative block.
*/
@@ -145,7 +144,8 @@
} while (cur += d_size, cur < end);
}
-static void __apply_alternatives(void *alt_region, bool is_module)
+static void __apply_alternatives(void *alt_region, bool is_module,
+ unsigned long *feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -155,6 +155,9 @@
for (alt = region->begin; alt < region->end; alt++) {
int nr_inst;
+ if (!test_bit(alt->cpufeature, feature_mask))
+ continue;
+
/* Use ARM64_CB_PATCH as an unconditional patch */
if (alt->cpufeature < ARM64_CB_PATCH &&
!cpus_have_cap(alt->cpufeature))
@@ -192,6 +195,12 @@
dsb(ish);
__flush_icache_all();
isb();
+
+ /* Ignore ARM64_CB bit from feature mask */
+ bitmap_or(applied_alternatives, applied_alternatives,
+ feature_mask, ARM64_NCAPS);
+ bitmap_and(applied_alternatives, applied_alternatives,
+ cpu_hwcaps, ARM64_NCAPS);
}
}
@@ -208,14 +217,19 @@
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(alternatives_applied))
+ while (!READ_ONCE(all_alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(alternatives_applied);
- __apply_alternatives(®ion, false);
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
+
+ bitmap_complement(remaining_capabilities, boot_capabilities,
+ ARM64_NPATCHABLE);
+
+ BUG_ON(all_alternatives_applied);
+ __apply_alternatives(®ion, false, remaining_capabilities);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(alternatives_applied, 1);
+ WRITE_ONCE(all_alternatives_applied, 1);
}
return 0;
@@ -227,6 +241,24 @@
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void __init apply_boot_alternatives(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ /* If called on non-boot cpu things could go wrong */
+ WARN_ON(smp_processor_id() != 0);
+
+ __apply_alternatives(®ion, false, &boot_capabilities[0]);
+}
+
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length)
{
@@ -234,7 +266,10 @@
.begin = start,
.end = start + length,
};
+ DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
- __apply_alternatives(®ion, true);
+ bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
+
+ __apply_alternatives(®ion, true, &all_capabilities[0]);
}
#endif
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
deleted file mode 100644
index d894a20..0000000
--- a/arch/arm64/kernel/arm64ksyms.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Based on arch/arm/kernel/armksyms.c
- *
- * Copyright (C) 2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/cryptohash.h>
-#include <linux/delay.h>
-#include <linux/in6.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/arm-smccc.h>
-#include <linux/kprobes.h>
-
-#include <asm/checksum.h>
-
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
- /* user mem (segment) */
-EXPORT_SYMBOL(__arch_copy_from_user);
-EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__arch_clear_user);
-EXPORT_SYMBOL(__arch_copy_in_user);
-
- /* physical memory */
-EXPORT_SYMBOL(memstart_addr);
-
- /* string / mem functions */
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
-
- /* atomic bitops */
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(change_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-NOKPROBE_SYMBOL(_mcount);
-#endif
-
- /* arm-smccc */
-EXPORT_SYMBOL(__arm_smccc_smc);
-EXPORT_SYMBOL(__arm_smccc_hvc);
-
- /* tishift.S */
-extern long long __ashlti3(long long a, int b);
-EXPORT_SYMBOL(__ashlti3);
-extern long long __ashrti3(long long a, int b);
-EXPORT_SYMBOL(__ashrti3);
-extern long long __lshrti3(long long a, int b);
-EXPORT_SYMBOL(__lshrti3);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 92be1d1..ca158be 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/cpu.h>
@@ -177,6 +174,9 @@
struct insn_emulation *insn;
insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return;
+
insn->ops = ops;
insn->min = INSN_UNDEF;
@@ -236,6 +236,8 @@
insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
GFP_KERNEL);
+ if (!insns_sysctl)
+ return;
raw_spin_lock_irqsave(&insn_emulation_lock, flags);
list_for_each_entry(insn, &insn_emulation, node) {
@@ -402,7 +404,7 @@
/* Check access in reasonable access range for both SWP and SWPB */
user_ptr = (const void __user *)(unsigned long)(address & ~3);
- if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
+ if (!access_ok(user_ptr, 4)) {
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
address);
goto fault;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 323aeb5..2146857 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/asm-offsets.c
*
* Copyright (C) 1995-2003 Russell King
* 2001-2002 Keith Owens
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm_sdei.h>
@@ -29,9 +18,9 @@
#include <asm/fixmap.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/signal32.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
-#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
#include <linux/arm-smccc.h>
@@ -46,17 +35,16 @@
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+ DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
- DEFINE(S_X1, offsetof(struct pt_regs, regs[1]));
DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
- DEFINE(S_X3, offsetof(struct pt_regs, regs[3]));
DEFINE(S_X4, offsetof(struct pt_regs, regs[4]));
- DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
- DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
@@ -70,17 +58,19 @@
DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
-#ifdef CONFIG_COMPAT
- DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp));
-#endif
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc));
- DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_COMPAT
+ DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
+ DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0));
+ BLANK();
+#endif
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
@@ -90,46 +80,11 @@
BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
- DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
BLANK();
- DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
- DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
- DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
- DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
- DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- BLANK();
- DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
- DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
- DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
- DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
- DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
- DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
- DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
- DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
- DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
- DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
- DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
- DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
- DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
- DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
- DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
- DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall));
- BLANK();
- DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec));
- DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec));
- DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
- DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
- BLANK();
- DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
- DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
- BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
@@ -137,15 +92,18 @@
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
+ DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
+ DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+ DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
+ DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
- DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
- DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
- DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif
#ifdef CONFIG_CPU_PM
- DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 0bf0a83..7fa6828 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM64 cacheinfo support
*
* Copyright (C) 2015 ARM Ltd.
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
@@ -28,6 +17,15 @@
#define CLIDR_CTYPE(clidr, level) \
(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+int cache_line_size(void)
+{
+ if (coherency_max_size != 0)
+ return coherency_max_size;
+
+ return cache_line_size_of_cpu();
+}
+EXPORT_SYMBOL_GPL(cache_line_size);
+
static inline enum cache_type get_cache_type(int level)
{
u64 clidr;
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 8021b46..6ea337d 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* CPU reset routines
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Huawei Futurewei Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
@@ -22,11 +19,11 @@
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
* cpu_soft_restart.
*
- * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @el2_switch: Flag to indicate a switch to EL2 is needed.
* @entry: Location to jump to for soft reset.
- * arg0: First argument passed to @entry.
- * arg1: Second argument passed to @entry.
- * arg2: Third argument passed to @entry.
+ * arg0: First argument passed to @entry. (relocation list)
+ * arg1: Second argument passed to @entry.(physical kernel entry)
+ * arg2: Third argument passed to @entry. (physical dtb address)
*
* Put the CPU into the same state as it would be if it had been reset, and
* branch to what would be the reset vector. It must be executed with the
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index fad90e4..ed50e95 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* CPU reset routines
*
* Copyright (C) 2015 Huawei Futurewei Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARM64_CPU_RESET_H
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index dec1089..93f34b4 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -1,27 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Contains CPU specific errata definitions
*
* Copyright (C) 2014 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
#include <linux/psci.h>
#include <linux/types.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -68,26 +59,47 @@
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u64 mask = CTR_CACHE_MINLINE_MASK;
-
- /* Skip matching the min line sizes for cache type check */
- if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
- mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
+ u64 ctr_raw, ctr_real;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & mask);
+
+ /*
+ * We want to make sure that all the CPUs in the system expose
+ * a consistent CTR_EL0 to make sure that applications behaves
+ * correctly with migration.
+ *
+ * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
+ *
+ * 1) It is safe if the system doesn't support IDC, as CPU anyway
+ * reports IDC = 0, consistent with the rest.
+ *
+ * 2) If the system has IDC, it is still safe as we trap CTR_EL0
+ * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
+ *
+ * So, we need to make sure either the raw CTR_EL0 or the effective
+ * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
+ */
+ ctr_raw = read_cpuid_cachetype() & mask;
+ ctr_real = read_cpuid_effective_cachetype() & mask;
+
+ return (ctr_real != sys) && (ctr_raw != sys);
}
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+
+ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
+ if ((read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -109,14 +121,23 @@
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
{
- static DEFINE_SPINLOCK(bp_lock);
+ static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
- spin_lock(&bp_lock);
+ /*
+ * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+ * we're a guest. Skip the hyp-vectors work.
+ */
+ if (!hyp_vecs_start) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
+ raw_spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
@@ -132,13 +153,13 @@
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
- spin_unlock(&bp_lock);
+ raw_spin_unlock(&bp_lock);
}
#else
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
@@ -146,23 +167,6 @@
}
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
- bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- u64 pfr0;
-
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
- return;
-
- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
#include <linux/psci.h>
@@ -189,60 +193,83 @@
: "=&r" (tmp));
}
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ * 0: No workaround required
+ * 1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
{
bp_hardening_cb_t cb;
void *smccc_start, *smccc_end;
struct arm_smccc_res res;
u32 midr = read_cpuid_id();
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
- return;
+ return -1;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_hvc_arch_workaround_1;
+ /* This is a guest, no need to patch KVM vectors */
+ smccc_start = NULL;
+ smccc_end = NULL;
+ break;
+ default:
+ return -1;
+ }
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+ default:
+ return -1;
+ }
break;
default:
- return;
+ return -1;
}
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization;
- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+ install_bp_hardening_cb(cb, smccc_start, smccc_end);
- return;
+ return 1;
}
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
static const struct ssbd_options {
const char *str;
@@ -312,6 +339,19 @@
void arm64_set_ssbd_mitigation(bool state)
{
+ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+ pr_info_once("SSBD disabled by kernel configuration\n");
+ return;
+ }
+
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (state)
+ asm volatile(SET_PSTATE_SSBS(0));
+ else
+ asm volatile(SET_PSTATE_SSBS(1));
+ return;
+ }
+
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
@@ -333,11 +373,28 @@
struct arm_smccc_res res;
bool required = true;
s32 val;
+ bool this_cpu_safe = false;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (cpu_mitigations_off())
+ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+ /* delay setting __ssb_safe until we get a firmware response */
+ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+ this_cpu_safe = true;
+
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (!this_cpu_safe)
+ __ssb_safe = false;
+ required = false;
+ goto out_printmsg;
+ }
+
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -354,6 +411,8 @@
default:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -362,14 +421,18 @@
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
+ /* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
+ __ssb_safe = false;
required = true;
break;
@@ -379,12 +442,13 @@
default:
WARN_ON(1);
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
- pr_info_once("%s disabled from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(false);
required = false;
break;
@@ -397,7 +461,6 @@
break;
case ARM64_SSBD_FORCE_ENABLE:
- pr_info_once("%s forced from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(true);
required = true;
break;
@@ -407,9 +470,50 @@
break;
}
+out_printmsg:
+ switch (ssbd_state) {
+ case ARM64_SSBD_FORCE_DISABLE:
+ pr_info_once("%s disabled from command-line\n", entry->desc);
+ break;
+
+ case ARM64_SSBD_FORCE_ENABLE:
+ pr_info_once("%s forced from command-line\n", entry->desc);
+ break;
+ }
+
return required;
}
-#endif /* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ {},
+};
+
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ /* Cortex-A76 r0p0 - r3p1 */
+ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
+static void __maybe_unused
+cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+}
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
@@ -448,58 +552,103 @@
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
+
+int get_spectre_v2_workaround_state(void)
+{
+ if (__spectrev2_safe)
+ return ARM64_BP_HARDEN_NOT_REQUIRED;
+
+ if (!__hardenbp_enab)
+ return ARM64_BP_HARDEN_UNKNOWN;
+
+ return ARM64_BP_HARDEN_WA_NEEDED;
+}
+
/*
- * Generic helper for handling capabilties with multiple (match,enable) pairs
- * of call backs, sharing the same capability bit.
- * Iterate over each entry to see if at least one matches.
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+ */
+static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ { /* sentinel */ }
+};
+
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
*/
static bool __maybe_unused
-multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
{
- const struct arm64_cpu_capabilities *caps;
+ int need_wa;
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, scope))
- return true;
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return false;
+ /* If the CPU has CSV2 set, we're safe */
+ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+ ID_AA64PFR0_CSV2_SHIFT))
+ return false;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return false;
+
+ /* Fallback to firmware detection */
+ need_wa = detect_harden_bp_fw();
+ if (!need_wa)
+ return false;
+
+ __spectrev2_safe = false;
+
+ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ /* forced off */
+ if (__nospectre_v2 || cpu_mitigations_off()) {
+ pr_info_once("spectrev2 mitigation disabled by command line option\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ if (need_wa < 0) {
+ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+ __hardenbp_enab = false;
+ }
+
+ return (need_wa > 0);
}
-/*
- * Take appropriate action for all matching entries in the shared capability
- * entry.
- */
-static void __maybe_unused
-multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
-{
- const struct arm64_cpu_capabilities *caps;
-
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
- caps->cpu_enable)
- caps->cpu_enable(caps);
-}
-
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-
-/*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
- */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
- MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
{},
};
-#endif
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ int i;
+
+ if (!is_affected_midr_range_list(entry, scope) ||
+ !is_hyp_mode_available())
+ return false;
+
+ for_each_possible_cpu(i) {
+ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+ return true;
+ }
+
+ return false;
+}
#ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -511,24 +660,124 @@
#endif
-const struct arm64_cpu_capabilities arm64_errata[] = {
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1286807
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ },
+#endif
+ {},
+};
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+const struct midr_range cavium_erratum_27456_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
+ /* Cavium ThunderX, T81 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+static const struct midr_range cavium_erratum_30115_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
+ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+ MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
+ /* Cavium ThunderX, T83 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+static const struct midr_range workaround_clean_cache[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
defined(CONFIG_ARM64_ERRATUM_824069)
- {
- /* Cortex-A53 r0p[012] */
- .desc = "ARM errata 826319, 827319, 824069",
- .capability = ARM64_WORKAROUND_CLEAN_CACHE,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
- .cpu_enable = cpu_enable_cache_maint_trap,
- },
+ /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
#endif
-#ifdef CONFIG_ARM64_ERRATUM_819472
+#ifdef CONFIG_ARM64_ERRATUM_819472
+ /* Cortex-A53 r0p[01] : ARM errata 819472 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+#endif
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+/*
+ * - 1188873 affects r0p0 to r2p0
+ * - 1418040 affects r0p0 to r3p1
+ */
+static const struct midr_range erratum_1418040_list[] = {
+ /* Cortex-A76 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Neoverse-N1 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+ /* Cortex-A53 r0p[01234] */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ /* Brahma-B53 r0p[0] */
+ MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
{
- /* Cortex-A53 r0p[01] */
- .desc = "ARM errata 819472",
+ /* Cortex-A53 r0p[01234] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ MIDR_FIXED(0x4, BIT(8)),
+ },
+ {
+ /* Brahma-B53 r0p[0] */
+ .matches = is_affected_midr_range,
+ ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ },
+ {},
+};
+#endif
+
+const struct arm64_cpu_capabilities arm64_errata[] = {
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ {
+ .desc = "ARM errata 826319, 827319, 824069, 819472",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+ ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
.cpu_enable = cpu_enable_cache_maint_trap,
},
#endif
@@ -554,19 +803,18 @@
#endif
#ifdef CONFIG_ARM64_ERRATUM_843419
{
- /* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 843419",
.capability = ARM64_WORKAROUND_843419,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
- MIDR_FIXED(0x4, BIT(8)),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = erratum_843419_list,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
{
- /* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 845719",
.capability = ARM64_WORKAROUND_845719,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
},
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -579,51 +827,20 @@
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
{
- /* Cavium ThunderX, T88 pass 1.x - 2.1 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
- ERRATA_MIDR_RANGE(MIDR_THUNDERX,
- 0, 0,
- 1, 1),
- },
- {
- /* Cavium ThunderX, T81 pass 1.0 */
- .desc = "Cavium erratum 27456",
- .capability = ARM64_WORKAROUND_CAVIUM_27456,
- ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
},
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
{
- /* Cavium ThunderX, T88 pass 1.x - 2.2 */
.desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_RANGE(MIDR_THUNDERX,
- 0, 0,
- 1, 2),
- },
- {
- /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
- .desc = "Cavium erratum 30115",
- .capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
- },
- {
- /* Cavium ThunderX, T83 pass 1.0 */
- .desc = "Cavium erratum 30115",
- .capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
},
#endif
{
- .desc = "Mismatched cache line size",
- .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
- .matches = has_mismatched_cache_type,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .cpu_enable = cpu_enable_trap_ctr_access,
- },
- {
- .desc = "Mismatched cache type",
+ .desc = "Mismatched cache type (CTR_EL0)",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
@@ -631,23 +848,20 @@
},
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
- .desc = "Qualcomm Technologies Falkor erratum 1003",
- .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
- ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
- },
- {
- .desc = "Qualcomm Technologies Kryo erratum 1003",
+ .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .midr_range.model = MIDR_QCOM_KRYO,
- .matches = is_kryo_midr,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = qcom_erratum_1003_list,
},
#endif
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
{
- .desc = "Qualcomm Technologies Falkor erratum 1009",
+ .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
.capability = ARM64_WORKAROUND_REPEAT_TLBI,
- ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = arm64_repeat_tlbi_list,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
@@ -658,13 +872,11 @@
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
},
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
- .cpu_enable = enable_smccc_arch_workaround_1,
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = check_branch_predictor,
},
-#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
{
.desc = "EL2 vector hardening",
@@ -672,14 +884,86 @@
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation,
+ .midr_range_list = arm64_ssb_cpus,
+ },
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+ {
+ /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1165522",
+ .capability = ARM64_WORKAROUND_1165522,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+ {
+ .desc = "ARM erratum 1463225",
+ .capability = ARM64_WORKAROUND_1463225,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
+#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ .matches = needs_tx2_tvm_workaround,
+ },
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
},
#endif
{
}
};
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ switch (get_spectre_v2_workaround_state()) {
+ case ARM64_BP_HARDEN_NOT_REQUIRED:
+ return sprintf(buf, "Not affected\n");
+ case ARM64_BP_HARDEN_WA_NEEDED:
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+ case ARM64_BP_HARDEN_UNKNOWN:
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (__ssb_safe)
+ return sprintf(buf, "Not affected\n");
+
+ switch (ssbd_state) {
+ case ARM64_SSBD_KERNEL:
+ case ARM64_SSBD_FORCE_ENABLE:
+ if (IS_ENABLED(CONFIG_ARM64_SSBD))
+ return sprintf(buf,
+ "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ }
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index ea00124..7e07072 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU kernel entry/exit control
*
* Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
@@ -85,6 +74,7 @@
pr_err("%pOF: missing enable-method property\n",
dn);
}
+ of_node_put(dn);
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 93f69d8..80f459a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1,29 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Contains CPU feature definitions
*
* Copyright (C) 2015 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "CPU features: " fmt
#include <linux/bsearch.h>
#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -34,8 +25,8 @@
#include <asm/traps.h>
#include <asm/virt.h>
-unsigned long elf_hwcap __read_mostly;
-EXPORT_SYMBOL_GPL(elf_hwcap);
+/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+static unsigned long elf_hwcap __read_mostly;
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT \
@@ -51,6 +42,10 @@
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
+
+/* Need also bit for ARM64_CB_PATCH */
+DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
/*
* Flag to indicate if we have computed the system wide
@@ -117,6 +112,7 @@
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
/*
* NOTE: Any changes to the visibility of features should be kept in
@@ -139,9 +135,19 @@
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -164,10 +170,37 @@
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_END,
+};
+
+static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ /*
+ * We already refuse to boot CPUs that don't support our configured
+ * page size, so we can only detect mismatches for a page size other
+ * than the one we're currently using. Unfortunately, SoCs like this
+ * exist in the wild so, even though we don't like it, we'll have to go
+ * along with it and treat them as non-strict.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
@@ -206,8 +239,8 @@
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -371,8 +404,8 @@
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
- ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -449,6 +482,10 @@
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
@@ -511,6 +548,29 @@
}
extern const struct arm64_cpu_capabilities arm64_errata[];
+static const struct arm64_cpu_capabilities arm64_features[];
+
+static void __init
+init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
+{
+ for (; caps->matches; caps++) {
+ if (WARN(caps->capability >= ARM64_NCAPS,
+ "Invalid capability %d\n", caps->capability))
+ continue;
+ if (WARN(cpu_hwcaps_ptrs[caps->capability],
+ "Duplicate entry for capability %d\n",
+ caps->capability))
+ continue;
+ cpu_hwcaps_ptrs[caps->capability] = caps;
+ }
+}
+
+static void __init init_cpu_hwcaps_indirect_list(void)
+{
+ init_cpu_hwcaps_indirect_list_from_array(arm64_features);
+ init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
+}
+
static void __init setup_boot_cpu_capabilities(void);
void __init init_cpu_features(struct cpuinfo_arm64 *info)
@@ -557,6 +617,12 @@
}
/*
+ * Initialize the indirect array of CPU hwcaps capabilities pointers
+ * before we handle the boot CPU below.
+ */
+ init_cpu_hwcaps_indirect_list();
+
+ /*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
*/
@@ -657,7 +723,6 @@
/*
* EL3 is not our concern.
- * ID_AA64PFR1 is currently RES0.
*/
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -772,6 +837,7 @@
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
@@ -834,7 +900,7 @@
u32 midr = read_cpuid_id();
/* Cavium ThunderX pass 1.x and 2.x */
- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+ return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
@@ -855,11 +921,23 @@
if (scope == SCOPE_SYSTEM)
ctr = arm64_ftr_reg_ctrel0.sys_val;
else
- ctr = read_cpuid_cachetype();
+ ctr = read_cpuid_effective_cachetype();
return ctr & BIT(CTR_IDC_SHIFT);
}
+static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
+ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
+ * to the CTR_EL0 on this CPU and emulate it with the real/safe
+ * value.
+ */
+ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+}
+
static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
int scope)
{
@@ -873,7 +951,21 @@
return ctr & BIT(CTR_DIC_SHIFT);
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __maybe_unused
+has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /*
+ * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
+ * may share TLB entries with a CPU stuck in the crashed
+ * kernel.
+ */
+ if (is_kdump_kernel())
+ return false;
+
+ return has_cpuid_feature(entry, scope);
+}
+
+static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -883,9 +975,26 @@
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};
- char const *str = "command line option";
+ char const *str = "kpti command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -897,6 +1006,24 @@
__kpti_forced = -1;
}
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (cpu_mitigations_off() && !__kpti_forced) {
+ str = "mitigations=off";
+ __kpti_forced = -1;
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
+ }
+
/* Forced? */
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -904,18 +1031,10 @@
return __kpti_forced > 0;
}
- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
-
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
@@ -926,7 +1045,12 @@
static bool kpti_applied = false;
int cpu = smp_processor_id();
- if (kpti_applied)
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (kpti_applied || kaslr_offset() > 0)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -940,6 +1064,12 @@
return;
}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
static int __init parse_kpti(char *str)
{
@@ -953,7 +1083,6 @@
return 0;
}
early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
@@ -1036,7 +1165,7 @@
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
* do anything here.
*/
- if (!alternatives_applied)
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
#endif
@@ -1049,11 +1178,99 @@
WARN_ON(val & (7 << 27 | 7 << 21));
}
+#ifdef CONFIG_ARM64_SSBD
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+ if (user_mode(regs))
+ return 1;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+ .instr_mask = ~(1U << PSTATE_Imm_shift),
+ .instr_val = 0xd500401f | PSTATE_SSBS,
+ .fn = ssbs_emulation_handler,
+};
+
+static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+{
+ static bool undef_hook_registered = false;
+ static DEFINE_RAW_SPINLOCK(hook_lock);
+
+ raw_spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+ raw_spin_unlock(&hook_lock);
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ arm64_set_ssbd_mitigation(false);
+ } else {
+ arm64_set_ssbd_mitigation(true);
+ }
+}
+#endif /* CONFIG_ARM64_SSBD */
+
+#ifdef CONFIG_ARM64_PAN
+static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+}
+#endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_RAS_EXTN
+static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+}
+#endif /* CONFIG_ARM64_RAS_EXTN */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+static bool enable_pseudo_nmi;
+
+static int __init early_enable_pseudo_nmi(char *p)
+{
+ return strtobool(p, &enable_pseudo_nmi);
+}
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
+
+static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
+}
+#endif
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
@@ -1132,7 +1349,6 @@
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1148,7 +1364,6 @@
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -1166,6 +1381,16 @@
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
.min_field_value = 1,
},
+ {
+ .desc = "Data cache clean to Point of Deep Persistence",
+ .capability = ARM64_HAS_DCPODP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 2,
+ },
#endif
#ifdef CONFIG_ARM64_SVE
{
@@ -1198,6 +1423,7 @@
.capability = ARM64_HAS_CACHE_IDC,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_idc,
+ .cpu_enable = cpu_emulate_effective_ctr,
},
{
.desc = "Instruction cache invalidation not required for I/D coherence",
@@ -1236,50 +1462,208 @@
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
+ {
+ .desc = "CRC32 instructions",
+ .capability = ARM64_HAS_CRC32,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_SSBD
+ {
+ .desc = "Speculative Store Bypassing Safe (SSBS)",
+ .capability = ARM64_SSBS,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+ .cpu_enable = cpu_enable_ssbs,
+ },
+#endif
+#ifdef CONFIG_ARM64_CNP
+ {
+ .desc = "Common not Private translations",
+ .capability = ARM64_HAS_CNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_useable_cnp,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_CNP_SHIFT,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_cnp,
+ },
+#endif
+ {
+ .desc = "Speculation barrier (SB)",
+ .capability = ARM64_HAS_SB,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_SB_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PTR_AUTH
+ {
+ .desc = "Address authentication (architected algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Address authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Generic authentication (architected algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "Generic authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPI_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
+ .matches = has_cpuid_feature,
+ },
+#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ {
+ /*
+ * Depends on having GICv3
+ */
+ .desc = "IRQ priority masking",
+ .capability = ARM64_HAS_IRQ_PRIO_MASKING,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = can_use_gic_priorities,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#endif
{},
};
-#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
- { \
- .desc = #cap, \
- .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
- .matches = has_cpuid_feature, \
- .sys_reg = reg, \
- .field_pos = field, \
- .sign = s, \
- .min_field_value = min_value, \
- .hwcap_type = cap_type, \
- .hwcap = cap, \
+#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ .matches = has_cpuid_feature, \
+ .sys_reg = reg, \
+ .field_pos = field, \
+ .sign = s, \
+ .min_field_value = min_value,
+
+#define __HWCAP_CAP(name, cap_type, cap) \
+ .desc = name, \
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
+ .hwcap_type = cap_type, \
+ .hwcap = cap, \
+
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
+#define HWCAP_MULTI_CAP(list, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = cpucap_multi_entry_cap_matches, \
+ .match_list = list, \
+ }
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
+ },
+ {},
+};
+
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
+ },
+ {},
+};
+#endif
+
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
- HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+#endif
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
+#ifdef CONFIG_ARM64_PTR_AUTH
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
{},
};
@@ -1299,7 +1683,7 @@
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
- elf_hwcap |= cap->hwcap;
+ cpu_set_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1322,7 +1706,7 @@
switch (cap->hwcap_type) {
case CAP_HWCAP:
- rc = (elf_hwcap & cap->hwcap) != 0;
+ rc = cpu_have_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1343,58 +1727,55 @@
static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
- elf_hwcap |= HWCAP_CPUID;
+ cpu_set_named_feature(CPUID);
for (; hwcaps->matches; hwcaps++)
if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
cap_set_elf_hwcap(hwcaps);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
- unsigned int cap)
+static void update_cpu_capabilities(u16 scope_mask)
{
+ int i;
const struct arm64_cpu_capabilities *caps;
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = cap_array; caps->matches; caps++)
- if (caps->capability == cap)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
-}
-
-static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask, const char *info)
-{
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask) ||
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask) ||
+ cpus_have_cap(caps->capability) ||
!caps->matches(caps, cpucap_default_scope(caps)))
continue;
- if (!cpus_have_cap(caps->capability) && caps->desc)
- pr_info("%s %s\n", info, caps->desc);
+ if (caps->desc)
+ pr_info("detected: %s\n", caps->desc);
cpus_set_cap(caps->capability);
+
+ if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
+ set_bit(caps->capability, boot_capabilities);
}
}
-static void update_cpu_capabilities(u16 scope_mask)
+/*
+ * Enable all the available capabilities on this CPU. The capabilities
+ * with BOOT_CPU scope are handled separately and hence skipped here.
+ */
+static int cpu_enable_non_boot_scope_capabilities(void *__unused)
{
- __update_cpu_capabilities(arm64_errata, scope_mask,
- "enabling workaround for");
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
-}
+ int i;
+ u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
-static int __enable_cpu_capability(void *arg)
-{
- const struct arm64_cpu_capabilities *cap = arg;
+ for_each_available_cap(i) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
- cap->cpu_enable(cap);
+ if (WARN_ON(!cap))
+ continue;
+
+ if (!(cap->type & non_boot_scope))
+ continue;
+
+ if (cap->cpu_enable)
+ cap->cpu_enable(cap);
+ }
return 0;
}
@@ -1402,21 +1783,29 @@
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static void __init enable_cpu_capabilities(u16 scope_mask)
{
- scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- unsigned int num = caps->capability;
+ int i;
+ const struct arm64_cpu_capabilities *caps;
+ bool boot_scope;
- if (!(caps->type & scope_mask) || !cpus_have_cap(num))
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+ boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
+
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ unsigned int num;
+
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
+ continue;
+ num = caps->capability;
+ if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
- if (caps->cpu_enable) {
+ if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
* before any secondary CPU boots. Thus, each secondary
@@ -1425,25 +1814,19 @@
* the boot CPU, for which the capability must be
* enabled here. This approach avoids costly
* stop_machine() calls for this case.
- *
- * Otherwise, use stop_machine() as it schedules the
- * work allowing us to modify PSTATE, instead of
- * on_each_cpu() which uses an IPI, giving us a PSTATE
- * that disappears when we return.
*/
- if (scope_mask & SCOPE_BOOT_CPU)
- caps->cpu_enable(caps);
- else
- stop_machine(__enable_cpu_capability,
- (void *)caps, cpu_online_mask);
- }
+ caps->cpu_enable(caps);
}
-}
-static void __init enable_cpu_capabilities(u16 scope_mask)
-{
- __enable_cpu_capabilities(arm64_errata, scope_mask);
- __enable_cpu_capabilities(arm64_features, scope_mask);
+ /*
+ * For all non-boot scope capabilities, use stop_machine()
+ * as it schedules the work allowing us to modify PSTATE,
+ * instead of on_each_cpu() which uses an IPI, giving us a
+ * PSTATE that disappears when we return.
+ */
+ if (!boot_scope)
+ stop_machine(cpu_enable_non_boot_scope_capabilities,
+ NULL, cpu_online_mask);
}
/*
@@ -1453,16 +1836,17 @@
*
* Returns "false" on conflicts.
*/
-static bool
-__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static bool verify_local_cpu_caps(u16 scope_mask)
{
+ int i;
bool cpu_has_cap, system_has_cap;
+ const struct arm64_cpu_capabilities *caps;
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask))
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
continue;
cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
@@ -1493,7 +1877,7 @@
}
}
- if (caps->matches) {
+ if (i < ARM64_NCAPS) {
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap);
@@ -1503,12 +1887,6 @@
return true;
}
-static bool verify_local_cpu_caps(u16 scope_mask)
-{
- return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
- __verify_local_cpu_caps(arm64_features, scope_mask);
-}
-
/*
* Check for CPU features that are used in early boot
* based on the Boot CPU value.
@@ -1545,7 +1923,7 @@
unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) {
- pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
@@ -1617,12 +1995,45 @@
static_branch_enable(&arm64_const_caps_ready);
}
-extern const struct arm64_cpu_capabilities arm64_errata[];
-
-bool this_cpu_has_cap(unsigned int cap)
+bool this_cpu_has_cap(unsigned int n)
{
- return (__this_cpu_has_cap(arm64_features, cap) ||
- __this_cpu_has_cap(arm64_errata, cap));
+ if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_LOCAL_CPU);
+ }
+
+ return false;
+}
+
+void cpu_set_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ elf_hwcap |= BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_set_feature);
+
+bool cpu_have_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ return elf_hwcap & BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_have_feature);
+
+unsigned long cpu_get_elf_hwcap(void)
+{
+ /*
+ * We currently only populate the first 32 bits of AT_HWCAP. Please
+ * note that for userspace compatibility we guarantee that bits 62
+ * and 63 will always be returned as 0.
+ */
+ return lower_32_bits(elf_hwcap);
+}
+
+unsigned long cpu_get_elf_hwcap2(void)
+{
+ return upper_32_bits(elf_hwcap);
}
static void __init setup_system_capabilities(void)
@@ -1672,6 +2083,11 @@
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
+static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
+{
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+}
+
/*
* We emulate only the following system register space.
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
@@ -1733,25 +2149,30 @@
return 0;
}
-static int emulate_mrs(struct pt_regs *regs, u32 insn)
+int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
{
int rc;
- u32 sys_reg, dst;
u64 val;
+ rc = emulate_sys_reg(sys_reg, &val);
+ if (!rc) {
+ pt_regs_write_reg(regs, rt, val);
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
+ return rc;
+}
+
+static int emulate_mrs(struct pt_regs *regs, u32 insn)
+{
+ u32 sys_reg, rt;
+
/*
* sys_reg values are defined as used in mrs/msr instruction.
* shift the imm value to get the encoding.
*/
sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
- rc = emulate_sys_reg(sys_reg, &val);
- if (!rc) {
- dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
- pt_regs_write_reg(regs, dst, val);
- arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
- }
-
- return rc;
+ rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+ return do_emulate_mrs(regs, sys_reg, rt);
}
static struct undef_hook mrs_hook = {
@@ -1770,8 +2191,14 @@
core_initcall(enable_mrs_emulation);
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- /* Firmware may have left a deferred SError in this register. */
- write_sysreg_s(0, SYS_DISR_EL1);
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index f2d1381..e4d6af2 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM64 CPU idle arch support
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -14,6 +11,7 @@
#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/psci.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
@@ -49,17 +47,58 @@
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
- return arm_cpuidle_init(cpu);
+ return psci_acpi_cpu_init_idle(cpu);
}
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
+ u32 state = lpi->address;
+
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
- return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
- lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
else
- return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
}
#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index e9ab7b3..05933c0 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Record and handle CPU attributes.
*
* Copyright (C) 2014 ARM Ltd.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <asm/arch_timer.h>
#include <asm/cache.h>
@@ -43,7 +33,7 @@
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
-static char *icache_policy_str[] = {
+static const char *icache_policy_str[] = {
[0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
[ICACHE_POLICY_VIPT] = "VIPT",
[ICACHE_POLICY_PIPT] = "PIPT",
@@ -81,6 +71,19 @@
"uscat",
"ilrcpc",
"flagm",
+ "ssbs",
+ "sb",
+ "paca",
+ "pacg",
+ "dcpodp",
+ "sve2",
+ "sveaes",
+ "svepmull",
+ "svebitperm",
+ "svesha3",
+ "svesm4",
+ "flagm2",
+ "frint",
NULL
};
@@ -163,7 +166,7 @@
#endif /* CONFIG_COMPAT */
} else {
for (j = 0; hwcap_str[j]; j++)
- if (elf_hwcap & (1 << j))
+ if (cpu_have_feature(j))
seq_printf(m, " %s", hwcap_str[j]);
}
seq_puts(m, "\n");
@@ -324,7 +327,15 @@
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
- info->reg_ctr = read_cpuid_cachetype();
+ /*
+ * Use the effective value of the CTR_EL0 than the raw value
+ * exposed by the CPU. CTR_E0.IDC field value must be interpreted
+ * with the CLIDR_EL1 fields to avoid triggering false warnings
+ * when there is a mismatch across the CPUs. Keep track of the
+ * effective value of the CTR_EL0 in our internal records for
+ * acurate sanity check and feature enablement.
+ */
+ info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
info->reg_revidr = read_cpuid(REVIDR_EL1);
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index f46d57c..e6e2842 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Routines for doing kexec-based kdump
*
* Copyright (C) 2017 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/crash_dump.h>
@@ -58,7 +55,7 @@
/**
* elfcorehdr_read - read from ELF core header
* @buf: buffer where the data is placed
- * @csize: number of bytes to read
+ * @count: number of bytes to read
* @ppos: address in the memory
*
* This function reads @count bytes from elf core header which exists
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 06ca574..48222a4 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARMv8 single-step debug support and mdscr context switching.
*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*/
@@ -135,6 +124,7 @@
*/
static int clear_os_lock(unsigned int cpu)
{
+ write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;
@@ -163,25 +153,46 @@
}
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
-/* EL1 Single Step Handler hooks */
-static LIST_HEAD(step_hook);
-static DEFINE_SPINLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(debug_hook_lock);
+static LIST_HEAD(user_step_hook);
+static LIST_HEAD(kernel_step_hook);
-void register_step_hook(struct step_hook *hook)
+static void register_debug_hook(struct list_head *node, struct list_head *list)
{
- spin_lock(&step_hook_lock);
- list_add_rcu(&hook->node, &step_hook);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_add_rcu(node, list);
+ spin_unlock(&debug_hook_lock);
+
}
-void unregister_step_hook(struct step_hook *hook)
+static void unregister_debug_hook(struct list_head *node)
{
- spin_lock(&step_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_del_rcu(node);
+ spin_unlock(&debug_hook_lock);
synchronize_rcu();
}
+void register_user_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &user_step_hook);
+}
+
+void unregister_user_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_step_hook);
+}
+
+void unregister_kernel_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
/*
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
@@ -191,18 +202,21 @@
static int call_step_hook(struct pt_regs *regs, unsigned int esr)
{
struct step_hook *hook;
+ struct list_head *list;
int retval = DBG_HOOK_ERROR;
- rcu_read_lock();
+ list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
- list_for_each_entry_rcu(hook, &step_hook, node) {
+ /*
+ * Since single-step exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
+ list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- rcu_read_unlock();
-
return retval;
}
NOKPROBE_SYMBOL(call_step_hook);
@@ -210,13 +224,6 @@
static void send_user_sigtrap(int si_code)
{
struct pt_regs *regs = current_pt_regs();
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)instruction_pointer(regs);
if (WARN_ON(!user_mode(regs)))
return;
@@ -224,10 +231,12 @@
if (interrupts_enabled(regs))
local_irq_enable();
- arm64_force_sig_info(&info, "User debug trap", current);
+ arm64_force_sig_fault(SIGTRAP, si_code,
+ (void __user *)instruction_pointer(regs),
+ "User debug trap");
}
-static int single_step_handler(unsigned long addr, unsigned int esr,
+static int single_step_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
bool handler_found = false;
@@ -239,10 +248,6 @@
if (!reinstall_suspended_bps(regs))
return 0;
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
-#endif
if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
handler_found = true;
@@ -269,61 +274,61 @@
}
NOKPROBE_SYMBOL(single_step_handler);
-/*
- * Breakpoint handler is re-entrant as another breakpoint can
- * hit within breakpoint handler, especically in kprobes.
- * Use reader/writer locks instead of plain spinlock.
- */
-static LIST_HEAD(break_hook);
-static DEFINE_SPINLOCK(break_hook_lock);
+static LIST_HEAD(user_break_hook);
+static LIST_HEAD(kernel_break_hook);
-void register_break_hook(struct break_hook *hook)
+void register_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_add_rcu(&hook->node, &break_hook);
- spin_unlock(&break_hook_lock);
+ register_debug_hook(&hook->node, &user_break_hook);
}
-void unregister_break_hook(struct break_hook *hook)
+void unregister_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&break_hook_lock);
- synchronize_rcu();
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_break_hook(struct break_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_break_hook);
+}
+
+void unregister_kernel_break_hook(struct break_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{
struct break_hook *hook;
+ struct list_head *list;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(hook, &break_hook, node)
- if ((esr & hook->esr_mask) == hook->esr_val)
+ list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
+
+ /*
+ * Since brk exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
+ list_for_each_entry_rcu(hook, list, node) {
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
- rcu_read_unlock();
+ }
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
NOKPROBE_SYMBOL(call_break_hook);
-static int brk_handler(unsigned long addr, unsigned int esr,
+static int brk_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
- bool handler_found = false;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
-#ifdef CONFIG_KPROBES
- if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
- if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
- }
-#endif
- if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
-
- if (!handler_found && user_mode(regs)) {
+ if (user_mode(regs)) {
send_user_sigtrap(TRAP_BRKPT);
- } else if (!handler_found) {
+ } else {
pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 6b9736c..304d5b0 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* EFI entry point.
*
* Copyright (C) 2013, 2014 Red Hat, Inc.
* Author: Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/linkage.h>
#include <linux/init.h>
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
index 613fc30..a7cfacc 100644
--- a/arch/arm64/kernel/efi-header.S
+++ b/arch/arm64/kernel/efi-header.S
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 - 2017 Linaro, Ltd.
* Copyright (C) 2013, 2014 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/pe.h>
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
index 05235eb..3fc7110 100644
--- a/arch/arm64/kernel/efi-rt-wrapper.S
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 4f9acb5..d0cf596 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013, 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/efi.h>
@@ -86,8 +82,7 @@
return 0;
}
-static int __init set_permissions(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = READ_ONCE(*ptep);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 12d4958..0f24eae 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* FP/SIMD state saving and restoring
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 1175f58..33d003d 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/entry-ftrace.S
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
@@ -79,7 +76,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
- mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
@@ -121,6 +117,8 @@
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
ENDPROC(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
/*
@@ -132,6 +130,8 @@
ENTRY(_mcount)
ret
ENDPROC(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
/*
* void ftrace_caller(unsigned long return_address)
@@ -148,14 +148,12 @@
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
- .global ftrace_call
-ftrace_call: // tracer(pc, lr);
+GLOBAL(ftrace_call) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .global ftrace_graph_call
-ftrace_graph_call: // ftrace_graph_caller();
+GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@@ -169,24 +167,6 @@
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* save return value regs*/
- .macro save_return_regs
- sub sp, sp, #64
- stp x0, x1, [sp]
- stp x2, x3, [sp, #16]
- stp x4, x5, [sp, #32]
- stp x6, x7, [sp, #48]
- .endm
-
- /* restore return value regs*/
- .macro restore_return_regs
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #16]
- ldp x4, x5, [sp, #32]
- ldp x6, x7, [sp, #48]
- add sp, sp, #64
- .endm
-
/*
* void ftrace_graph_caller(void)
*
@@ -197,10 +177,10 @@
* and run return_to_handler() later on its exit.
*/
ENTRY(ftrace_graph_caller)
- mcount_get_lr_addr x0 // pointer to function's saved lr
- mcount_get_pc x1 // function's pc
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
- bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
+ bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
ENDPROC(ftrace_graph_caller)
@@ -209,15 +189,27 @@
* void return_to_handler(void)
*
* Run ftrace_return_to_handler() before going back to parent.
- * @fp is checked against the value passed by ftrace_graph_caller()
- * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
+ * @fp is checked against the value passed by ftrace_graph_caller().
*/
ENTRY(return_to_handler)
- save_return_regs
+ /* save return value regs */
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- restore_return_regs
+
+ /* restore return value regs */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 8556876..cf3bd29 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1,21 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Low-level exception handling code
*
* Copyright (C) 2012 ARM Ltd.
* Authors: Catalin Marinas <catalin.marinas@arm.com>
* Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
@@ -41,9 +30,9 @@
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
- .macro ct_user_exit
+ .macro ct_user_exit_irqoff
#ifdef CONFIG_CONTEXT_TRACKING
- bl context_tracking_user_exit
+ bl enter_from_user_mode
#endif
.endm
@@ -185,7 +174,7 @@
.else
add x21, sp, #S_FRAME_SIZE
- get_thread_info tsk
+ get_current_task tsk
/* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
@@ -249,9 +238,16 @@
msr sp_el0, tsk
.endif
+ /* Save pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mrs_s x20, SYS_ICC_PMR_EL1
+ str x20, [sp, #S_PMR_SAVE]
+alternative_else_nop_endif
+
/*
* Registers that may be useful after this macro is invoked:
*
+ * x20 - ICC_PMR_EL1
* x21 - aborted SP
* x22 - aborted PC
* x23 - aborted PSTATE
@@ -269,6 +265,14 @@
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
+ /* Restore pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ ldr x20, [sp, #S_PMR_SAVE]
+ msr_s SYS_ICC_PMR_EL1, x20
+ /* Ensure priority change is seen by redistributor */
+ dsb sy
+alternative_else_nop_endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
@@ -322,6 +326,21 @@
alternative_else_nop_endif
#endif
3:
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if_not ARM64_WORKAROUND_1418040
+ b 4f
+alternative_else_nop_endif
+ /*
+ * if (x22.mode32 == cntkctl_el1.el0vcten)
+ * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
+ */
+ mrs x1, cntkctl_el1
+ eon x0, x1, x22, lsr #3
+ tbz x0, #1, 4f
+ eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x1
+4:
+#endif
apply_ssbd 0, x0, x1
.endif
@@ -344,25 +363,22 @@
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
- /*
- * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
- * when returning from IPI handler, and when returning to user-space.
- */
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 4f
+ bne 5f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
br x30
-4:
+5:
tramp_alias x30, tramp_exit_compat
br x30
#endif
.else
eret
.endif
+ sb
.endm
.macro irq_stack_entry
@@ -395,17 +411,7 @@
mov sp, x19
.endm
-/*
- * These are the registers used in the syscall handler, and allow us to
- * have in theory up to 7 arguments to a function - x0 to x6.
- *
- * x7 is reserved for the system call number in 32-bit mode.
- */
-wsc_nr .req w25 // number of system calls
-xsc_nr .req x25 // number of system calls (zero-extended)
-wscno .req w26 // syscall number
-xscno .req x26 // syscall number (zero-extended)
-stbl .req x27 // syscall table pointer
+/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
@@ -419,6 +425,38 @@
irq_stack_exit
.endm
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+ * Set res to 0 if irqs were unmasked in interrupted context.
+ * Otherwise set res to non-0 value.
+ */
+ .macro test_irqs_unmasked res:req, pmr:req
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ sub \res, \pmr, #GIC_PRIO_IRQON
+alternative_else
+ mov \res, xzr
+alternative_endif
+ .endm
+#endif
+
+ .macro gic_prio_kentry_setup, tmp:req
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ alternative_else_nop_endif
+#endif
+ .endm
+
+ .macro gic_prio_irq_setup, pmr:req, tmp:req
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ alternative_else_nop_endif
+#endif
+ .endm
+
.text
/*
@@ -548,10 +586,8 @@
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el1_sp_pc
+ b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
@@ -568,14 +604,16 @@
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
- clear_address_tag x0, x3
+ untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
kernel_exit 1
-el1_sp_pc:
+el1_pc:
/*
- * Stack or PC alignment exception handling
+ * PC alignment exception handling. We don't handle SP alignment faults,
+ * since we will have hit a recursive exception when trying to push the
+ * initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
@@ -597,6 +635,7 @@
cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
+ gic_prio_kentry_setup tmp=x3
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
@@ -614,7 +653,16 @@
.align 6
el1_irq:
kernel_entry 1
+ gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ test_irqs_unmasked res=x0, pmr=x20
+ cbz x0, 1f
+ bl asm_nmi_enter
+1:
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -622,28 +670,44 @@
irq_handler
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
- cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
- bl el1_preempt
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ /*
+ * DA_F were cleared at start of handling. If anything is set in DAIF,
+ * we come back from an NMI, so skip preemption
+ */
+ mrs x0, daif
+ orr x24, x24, x0
+alternative_else_nop_endif
+ cbnz x24, 1f // preempt count != 0 || NMI return path
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+ * When using IRQ priority masking, we can get spurious interrupts while
+ * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
+ * section with interrupts disabled. Skip tracing in those cases.
+ */
+ test_irqs_unmasked res=x0, pmr=x20
+ cbz x0, 1f
+ bl asm_nmi_exit
+1:
#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ test_irqs_unmasked res=x0, pmr=x20
+ cbnz x0, 1f
+#endif
+ bl trace_hardirqs_on
+1:
+#endif
+
kernel_exit 1
ENDPROC(el1_irq)
-#ifdef CONFIG_PREEMPT
-el1_preempt:
- mov x24, lr
-1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
- ret x24
-#endif
-
/*
* EL0 mode handlers.
*/
@@ -665,11 +729,12 @@
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
+ ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el0_sp_pc
+ b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
@@ -693,13 +758,13 @@
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
@@ -710,6 +775,7 @@
b.ge el0_dbg
b el0_inv
el0_svc_compat:
+ gic_prio_kentry_setup tmp=x1
mov x0, sp
bl el0_svc_compat_handler
b ret_to_user
@@ -722,6 +788,17 @@
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked
+
+el0_cp15:
+ /*
+ * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
+ */
+ ct_user_exit_irqoff
+ enable_daif
+ mov x0, x25
+ mov x1, sp
+ bl do_cp15instr
+ b ret_to_user
#endif
el0_da:
@@ -729,9 +806,9 @@
* Data abort handling
*/
mrs x26, far_el1
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
- clear_address_tag x0, x26
+ untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
@@ -741,11 +818,12 @@
* Instruction abort handling
*/
mrs x26, far_el1
+ gic_prio_kentry_setup tmp=x0
+ ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -755,8 +833,8 @@
/*
* Floating Point or Advanced SIMD access
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
@@ -765,8 +843,8 @@
/*
* Scalable Vector Extension access
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_sve_acc
@@ -775,22 +853,27 @@
/*
* Floating Point, Advanced SIMD or SVE exception
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
+el0_sp:
+ ldr x26, [sp, #S_SP]
+ b el0_sp_pc
+el0_pc:
+ mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
- mrs x26, far_el1
+ gic_prio_kentry_setup tmp=x0
+ ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -800,8 +883,8 @@
/*
* Undefined instruction
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, sp
bl do_undefinstr
b ret_to_user
@@ -809,8 +892,8 @@
/*
* System instructions, for trapped cache maintenance instructions
*/
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, x25
mov x1, sp
bl do_sysinstr
@@ -820,16 +903,18 @@
* Debug exception handling
*/
tbnz x24, #0, el0_inv // EL0 only
- mrs x0, far_el1
+ mrs x24, far_el1
+ gic_prio_kentry_setup tmp=x3
+ ct_user_exit_irqoff
+ mov x0, x24
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_daif
- ct_user_exit
+ enable_da_f
b ret_to_user
el0_inv:
+ ct_user_exit_irqoff
enable_daif
- ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
@@ -841,12 +926,14 @@
el0_irq:
kernel_entry 0
el0_irq_naked:
+ gic_prio_irq_setup pmr=x20, tmp=x0
+ ct_user_exit_irqoff
enable_da_f
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- ct_user_exit
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
@@ -863,6 +950,7 @@
el1_error:
kernel_entry 1
mrs x1, esr_el1
+ gic_prio_kentry_setup tmp=x2
enable_dbg
mov x0, sp
bl do_serror
@@ -872,12 +960,14 @@
el0_error:
kernel_entry 0
el0_error_naked:
- mrs x1, esr_el1
+ mrs x25, esr_el1
+ gic_prio_kentry_setup tmp=x2
+ ct_user_exit_irqoff
enable_dbg
mov x0, sp
+ mov x1, x25
bl do_serror
- enable_daif
- ct_user_exit
+ enable_da_f
b ret_to_user
ENDPROC(el0_error)
@@ -897,6 +987,7 @@
*/
ret_to_user:
disable_daif
+ gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -913,6 +1004,7 @@
*/
.align 6
el0_svc:
+ gic_prio_kentry_setup tmp=x1
mov x0, sp
bl el0_svc_handler
b ret_to_user
@@ -979,7 +1071,9 @@
#else
ldr x30, =vectors
#endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
prfm plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
msr vbar_el1, x30
add x30, x30, #(1b - tramp_vectors)
isb
@@ -994,6 +1088,7 @@
mrs x30, far_el1
.endif
eret
+ sb
.endm
.align 11
@@ -1072,7 +1167,7 @@
cbz x19, 1f // not a kernel thread
mov x0, x20
blr x19
-1: get_thread_info tsk
+1: get_current_task tsk
b ret_to_user
ENDPROC(ret_from_fork)
NOKPROBE(ret_from_fork)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 58c53bc..37d3912 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* FP/SIMD context switching and fault handling
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/bottom_half.h>
#include <linux/bug.h>
#include <linux/cache.h>
@@ -38,6 +28,7 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
+#include <linux/swab.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
@@ -48,6 +39,7 @@
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
+#include <asm/virt.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -90,7 +82,8 @@
* To prevent this from racing with the manipulation of the task's FPSIMD state
* from task context and thereby corrupting the state, it is necessary to
* protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
- * flag with local_bh_disable() unless softirqs are already masked.
+ * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
+ * run but prevent them to use FPSIMD.
*
* For a certain task, the sequence may look something like this:
* - the task gets scheduled in; if both the task's fpsimd_cpu field
@@ -119,6 +112,8 @@
*/
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
+ void *sve_state;
+ unsigned int sve_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -130,18 +125,77 @@
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
-/* Set of available vector lengths, as vq_to_bit(vq): */
-static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+
+/*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+/* Set of vector lengths present on at least one cpu: */
+static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+
static void __percpu *efi_sve_state;
#else /* ! CONFIG_ARM64_SVE */
/* Dummy declaration for code that will be optimised out: */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
+DEFINE_PER_CPU(bool, fpsimd_context_busy);
+EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
+
+static void __get_cpu_fpsimd_context(void)
+{
+ bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
+
+ WARN_ON(busy);
+}
+
+/*
+ * Claim ownership of the CPU FPSIMD context for use by the calling context.
+ *
+ * The caller may freely manipulate the FPSIMD context metadata until
+ * put_cpu_fpsimd_context() is called.
+ *
+ * The double-underscore version must only be called if you know the task
+ * can't be preempted.
+ */
+static void get_cpu_fpsimd_context(void)
+{
+ preempt_disable();
+ __get_cpu_fpsimd_context();
+}
+
+static void __put_cpu_fpsimd_context(void)
+{
+ bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+ WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+/*
+ * Release the CPU FPSIMD context.
+ *
+ * Must be called from a context in which get_cpu_fpsimd_context() was
+ * previously called, with no call to put_cpu_fpsimd_context() in the
+ * meantime.
+ */
+static void put_cpu_fpsimd_context(void)
+{
+ __put_cpu_fpsimd_context();
+ preempt_enable();
+}
+
+static bool have_cpu_fpsimd_context(void)
+{
+ return !preemptible() && __this_cpu_read(fpsimd_context_busy);
+}
+
/*
* Call __sve_free() directly only if you know task can't be scheduled
* or preempted.
@@ -212,12 +266,10 @@
* This function should be called only when the FPSIMD/SVE state in
* thread_struct is known to be up to date, when preparing to enter
* userspace.
- *
- * Softirqs (and preemption) must be disabled.
*/
static void task_fpsimd_load(void)
{
- WARN_ON(!in_softirq() && !irqs_disabled());
+ WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(¤t->thread),
@@ -230,19 +282,18 @@
/*
* Ensure FPSIMD/SVE storage in memory for the loaded context is up to
* date with respect to the CPU registers.
- *
- * Softirqs (and preemption) must be disabled.
*/
-void fpsimd_save(void)
+static void fpsimd_save(void)
{
- struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ struct fpsimd_last_state_struct const *last =
+ this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
- WARN_ON(!in_softirq() && !irqs_disabled());
+ WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -252,32 +303,15 @@
return;
}
- sve_save_state(sve_pffr(¤t->thread), &st->fpsr);
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(last->sve_vl),
+ &last->st->fpsr);
} else
- fpsimd_save_state(st);
+ fpsimd_save_state(last->st);
}
}
/*
- * Helpers to translate bit indices in sve_vq_map to VQ values (and
- * vice versa). This allows find_next_bit() to be used to find the
- * _maximum_ VQ not exceeding a certain value.
- */
-
-static unsigned int vq_to_bit(unsigned int vq)
-{
- return SVE_VQ_MAX - vq;
-}
-
-static unsigned int bit_to_vq(unsigned int bit)
-{
- if (WARN_ON(bit >= SVE_VQ_MAX))
- bit = SVE_VQ_MAX - 1;
-
- return SVE_VQ_MAX - bit;
-}
-
-/*
* All vector length selection from userspace comes through here.
* We're on a slow path, so some sanity-checks are included.
* If things go wrong there's a bug somewhere, but try to fall back to a
@@ -298,8 +332,8 @@
vl = max_vl;
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
- vq_to_bit(sve_vq_from_vl(vl)));
- return sve_vl_from_vq(bit_to_vq(bit));
+ __vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
@@ -355,12 +389,42 @@
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ u64 a = swab64(x);
+ u64 b = swab64(x >> 64);
+
+ return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
+static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
+ unsigned int vq)
+{
+ unsigned int i;
+ __uint128_t *p;
+
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
+}
+
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
*
* Task can be a non-runnable task, or current. In the latter case,
- * softirqs (and preemption) must be disabled.
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
* task->thread.sve_state must point to at least sve_state_size(task)
* bytes of allocated kernel memory.
* task->thread.uw.fpsimd_state must be up to date before calling this
@@ -371,15 +435,12 @@
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ __fpsimd_to_sve(sst, fst, vq);
}
/*
@@ -387,7 +448,8 @@
* task->thread.uw.fpsimd_state.
*
* Task can be a non-runnable task, or current. In the latter case,
- * softirqs (and preemption) must be disabled.
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
* task->thread.sve_state must point to at least sve_state_size(task)
* bytes of allocated kernel memory.
* task->thread.sve_state must be up to date before calling this function.
@@ -398,14 +460,16 @@
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t const *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(&fst->vregs[i], ZREG(sst, vq, i),
- sizeof(fst->vregs[i]));
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t const *)ZREG(sst, vq, i);
+ fst->vregs[i] = arm64_le128_to_cpu(*p);
+ }
}
#ifdef CONFIG_ARM64_SVE
@@ -493,7 +557,6 @@
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@@ -501,10 +564,7 @@
vq = sve_vq_from_vl(task->thread.sve_vl);
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
-
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ __fpsimd_to_sve(sst, fst, vq);
}
int sve_set_vector_length(struct task_struct *task,
@@ -547,10 +607,9 @@
* non-SVE thread.
*/
if (task == current) {
- local_bh_disable();
+ get_cpu_fpsimd_context();
fpsimd_save();
- set_thread_flag(TIF_FOREIGN_FPSTATE);
}
fpsimd_flush_task_state(task);
@@ -558,7 +617,7 @@
sve_to_fpsimd(task);
if (task == current)
- local_bh_enable();
+ put_cpu_fpsimd_context();
/*
* Force reallocation of task SVE state to the correct size
@@ -624,12 +683,6 @@
return sve_prctl_status(0);
}
-/*
- * Bitmap for temporary storage of the per-CPU set of supported vector lengths
- * during secondary boot.
- */
-static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
-
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
{
unsigned int vq, vl;
@@ -644,40 +697,82 @@
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
vl = sve_get_vl();
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
- set_bit(vq_to_bit(vq), map);
+ set_bit(__vq_to_bit(vq), map);
}
}
+/*
+ * Initialise the set of known supported VQs for the boot CPU.
+ * This is called during kernel boot, before secondary CPUs are brought up.
+ */
void __init sve_init_vq_map(void)
{
sve_probe_vqs(sve_vq_map);
+ bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
}
/*
* If we haven't committed to the set of supported VQs yet, filter out
* those not supported by the current CPU.
+ * This function is called during the bring-up of early secondary CPUs only.
*/
void sve_update_vq_map(void)
{
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+
+ sve_probe_vqs(tmp_map);
+ bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
+ bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
}
-/* Check whether the current CPU supports all VQs in the committed set */
+/*
+ * Check whether the current CPU supports all VQs in the committed set.
+ * This function is called during the bring-up of late secondary CPUs only.
+ */
int sve_verify_vq_map(void)
{
- int ret = 0;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
- SVE_VQ_MAX);
- if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ sve_probe_vqs(tmp_map);
+
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
smp_processor_id());
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return 0;
+
+ /*
+ * For KVM, it is necessary to ensure that this CPU doesn't
+ * support any vector length that guests may have probed as
+ * unsupported.
+ */
+
+ /* Recover the set of supported VQs: */
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ /* Find VQs supported that are not globally supported: */
+ bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
+
+ /* Find the lowest such VQ, if any: */
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ return 0; /* no mismatches */
+
+ /*
+ * Mismatches above sve_max_virtualisable_vl are fine, since
+ * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
+ */
+ if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+ pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ return 0;
}
static void __init sve_efi_setup(void)
@@ -744,6 +839,8 @@
void __init sve_setup(void)
{
u64 zcr;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
if (!system_supports_sve())
return;
@@ -753,8 +850,8 @@
* so sve_vq_map must have at least SVE_VQ_MIN set.
* If something went wrong, at least try to patch it up:
*/
- if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
- set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+ if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
@@ -772,11 +869,31 @@
*/
sve_default_vl = find_supported_vector_length(64);
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ /* No non-virtualisable VLs found */
+ sve_max_virtualisable_vl = SVE_VQ_MAX;
+ else if (WARN_ON(b == SVE_VQ_MAX - 1))
+ /* No virtualisable VLs? This is architecturally forbidden. */
+ sve_max_virtualisable_vl = SVE_VQ_MIN;
+ else /* b + 1 < SVE_VQ_MAX */
+ sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
+
+ if (sve_max_virtualisable_vl > sve_max_vl)
+ sve_max_virtualisable_vl = sve_max_vl;
+
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+ pr_warn("SVE: unvirtualisable vector lengths present\n");
+
sve_efi_setup();
}
@@ -813,19 +930,18 @@
sve_alloc(current);
- local_bh_disable();
+ get_cpu_fpsimd_context();
fpsimd_save();
- fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
fpsimd_flush_task_state(current);
- set_thread_flag(TIF_FOREIGN_FPSTATE);
+ fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
- local_bh_enable();
+ put_cpu_fpsimd_context();
}
/*
@@ -842,7 +958,6 @@
*/
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
- siginfo_t info;
unsigned int si_code = FPE_FLTUNK;
if (esr & ESR_ELx_FP_EXC_TFV) {
@@ -858,12 +973,9 @@
si_code = FPE_FLTRES;
}
- clear_siginfo(&info);
- info.si_signo = SIGFPE;
- info.si_code = si_code;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- send_sig_info(SIGFPE, &info, current);
+ send_sig_fault(SIGFPE, si_code,
+ (void __user *)instruction_pointer(regs),
+ current);
}
void fpsimd_thread_switch(struct task_struct *next)
@@ -873,6 +985,8 @@
if (!system_supports_fpsimd())
return;
+ __get_cpu_fpsimd_context();
+
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
@@ -887,6 +1001,8 @@
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+
+ __put_cpu_fpsimd_context();
}
void fpsimd_flush_thread(void)
@@ -896,11 +1012,11 @@
if (!system_supports_fpsimd())
return;
- local_bh_disable();
+ get_cpu_fpsimd_context();
+ fpsimd_flush_task_state(current);
memset(¤t->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
- fpsimd_flush_task_state(current);
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@@ -937,9 +1053,7 @@
current->thread.sve_vl_onexec = 0;
}
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-
- local_bh_enable();
+ put_cpu_fpsimd_context();
}
/*
@@ -951,9 +1065,9 @@
if (!system_supports_fpsimd())
return;
- local_bh_disable();
+ get_cpu_fpsimd_context();
fpsimd_save();
- local_bh_enable();
+ put_cpu_fpsimd_context();
}
/*
@@ -970,7 +1084,8 @@
/*
* Associate current's FPSIMD context with this cpu
- * Preemption must be disabled when calling this function.
+ * The caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
*/
void fpsimd_bind_task_to_cpu(void)
{
@@ -978,6 +1093,8 @@
this_cpu_ptr(&fpsimd_last_state);
last->st = ¤t->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
@@ -991,7 +1108,8 @@
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -999,6 +1117,8 @@
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->sve_state = sve_state;
+ last->sve_vl = sve_vl;
}
/*
@@ -1011,14 +1131,14 @@
if (!system_supports_fpsimd())
return;
- local_bh_disable();
+ get_cpu_fpsimd_context();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
task_fpsimd_load();
fpsimd_bind_task_to_cpu();
}
- local_bh_enable();
+ put_cpu_fpsimd_context();
}
/*
@@ -1031,7 +1151,7 @@
if (!system_supports_fpsimd())
return;
- local_bh_disable();
+ get_cpu_fpsimd_context();
current->thread.uw.fpsimd_state = *state;
if (system_supports_sve() && test_thread_flag(TIF_SVE))
@@ -1042,27 +1162,55 @@
clear_thread_flag(TIF_FOREIGN_FPSTATE);
- local_bh_enable();
+ put_cpu_fpsimd_context();
}
/*
* Invalidate live CPU copies of task t's FPSIMD state
+ *
+ * This function may be called with preemption enabled. The barrier()
+ * ensures that the assignment to fpsimd_cpu is visible to any
+ * preemption/softirq that could race with set_tsk_thread_flag(), so
+ * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
+ *
+ * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
+ * subsequent code.
*/
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
+
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+ barrier();
}
-void fpsimd_flush_cpu_state(void)
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+ * before calling this function.
+ */
+static void fpsimd_flush_cpu_state(void)
{
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
-#ifdef CONFIG_KERNEL_MODE_NEON
+/*
+ * Save the FPSIMD state to memory and invalidate cpu view.
+ * This function must be called with preemption disabled.
+ */
+void fpsimd_save_and_flush_cpu_state(void)
+{
+ WARN_ON(preemptible());
+ __get_cpu_fpsimd_context();
+ fpsimd_save();
+ fpsimd_flush_cpu_state();
+ __put_cpu_fpsimd_context();
+}
-DEFINE_PER_CPU(bool, kernel_neon_busy);
-EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
+#ifdef CONFIG_KERNEL_MODE_NEON
/*
* Kernel-side NEON support functions
@@ -1088,19 +1236,13 @@
BUG_ON(!may_use_simd());
- local_bh_disable();
-
- __this_cpu_write(kernel_neon_busy, true);
+ get_cpu_fpsimd_context();
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
-
- preempt_disable();
-
- local_bh_enable();
}
EXPORT_SYMBOL(kernel_neon_begin);
@@ -1115,15 +1257,10 @@
*/
void kernel_neon_end(void)
{
- bool busy;
-
if (!system_supports_fpsimd())
return;
- busy = __this_cpu_xchg(kernel_neon_busy, false);
- WARN_ON(!busy); /* No matching kernel_neon_begin()? */
-
- preempt_enable();
+ put_cpu_fpsimd_context();
}
EXPORT_SYMBOL(kernel_neon_end);
@@ -1215,8 +1352,7 @@
{
switch (cmd) {
case CPU_PM_ENTER:
- fpsimd_save();
- fpsimd_flush_cpu_state();
+ fpsimd_save_and_flush_cpu_state();
break;
case CPU_PM_EXIT:
break;
@@ -1262,14 +1398,14 @@
*/
static int __init fpsimd_init(void)
{
- if (elf_hwcap & HWCAP_FP) {
+ if (cpu_have_named_feature(FP)) {
fpsimd_pm_init();
fpsimd_hotplug_init();
} else {
pr_notice("Floating-point is not implemented\n");
}
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
return sve_sysctl_init();
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 57e9622..06e56b4 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/ftrace.c
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ftrace.h>
@@ -76,7 +73,7 @@
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
+ struct plt_entry trampoline, *dst;
struct module *mod;
/*
@@ -103,25 +100,39 @@
* to be revisited if support for multiple ftrace entry points
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
+ *
+ * Note that PLTs are place relative, and plt_entries_equal()
+ * checks whether they point to the same target. Here, we need
+ * to check if the actual opcodes are in fact identical,
+ * regardless of the offset in memory so use memcmp() instead.
*/
- trampoline = get_plt_entry(addr);
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &trampoline)) {
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &(struct plt_entry){})) {
+ dst = mod->arch.ftrace_trampoline;
+ trampoline = get_plt_entry(addr, dst);
+ if (memcmp(dst, &trampoline, sizeof(trampoline))) {
+ if (plt_entry_is_initialized(dst)) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
+ *dst = trampoline;
module_enable_ro(mod, true);
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /*
+ * Ensure updated trampoline is visible to instruction
+ * fetch before we patch in the branch. Although the
+ * architecture doesn't require an IPI in this case,
+ * Neoverse-N1 erratum #1542419 does require one
+ * if the TLB maintenance in module_enable_ro() is
+ * skipped due to rodata_enabled. It doesn't seem worth
+ * it to make it conditional given that this is
+ * certainly not a fast-path.
+ */
+ flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
@@ -193,6 +204,7 @@
void arch_ftrace_update_code(int command)
{
+ command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
@@ -211,7 +223,7 @@
*
* Note that @frame_pointer is used only for sanity check later.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b085306..989b194 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Low-level CPU initialisation
* Based on arch/arm/kernel/head.S
@@ -6,18 +7,6 @@
* Copyright (C) 2003-2012 ARM Ltd.
* Authors: Catalin Marinas <catalin.marinas@arm.com>
* Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
@@ -31,6 +20,7 @@
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/elf.h>
+#include <asm/image.h>
#include <asm/kernel-pgtable.h>
#include <asm/kvm_arm.h>
#include <asm/memory.h>
@@ -91,7 +81,7 @@
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
- .ascii "ARM\x64" // Magic number
+ .ascii ARM64_IMAGE_MAGIC // Magic number
#ifdef CONFIG_EFI
.long pe_header - _head // Offset to the PE header.
@@ -112,6 +102,8 @@
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
* x28 __create_page_tables() callee preserved temp register
* x19/x20 __primary_switch() callee preserved temp registers
+ * x24 __primary_switch() .. relocate_kernel()
+ * current RELR displacement
*/
ENTRY(stext)
bl preserve_boot_args
@@ -287,19 +279,21 @@
mov x28, lr
/*
- * Invalidate the idmap and swapper page tables to avoid potential
- * dirty cache lines being evicted.
+ * Invalidate the init page tables to avoid potential dirty cache lines
+ * being evicted. Other page tables are allocated in rodata as part of
+ * the kernel image, and thus are clean to the PoC per the boot
+ * protocol.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
bl __inval_dcache_area
/*
- * Clear the idmap and swapper page tables.
+ * Clear the init page tables.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -316,6 +310,19 @@
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+#ifdef CONFIG_ARM64_VA_BITS_52
+ mrs_s x6, SYS_ID_AA64MMFR2_EL1
+ and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ mov x5, #52
+ cbnz x6, 1f
+#endif
+ mov x5, #VA_BITS_MIN
+1:
+ adr_l x6, vabits_actual
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
/*
* VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the
@@ -373,7 +380,7 @@
/*
* Map the kernel image (starting with PHYS_OFFSET).
*/
- adrp x0, swapper_pg_dir
+ adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD
@@ -390,7 +397,7 @@
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x1, init_pg_end
sub x1, x1, x0
dmb sy
bl __inval_dcache_area
@@ -459,6 +466,7 @@
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
+EXPORT_SYMBOL(kimage_vaddr)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
@@ -488,16 +496,15 @@
* kernel is intended to run at EL2.
*/
mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #8, #4
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
#else
mov x2, xzr
#endif
/* Hyp configuration. */
- mov x0, #HCR_RW // 64-bit EL1
+ mov_q x0, HCR_HOST_NVHE_FLAGS
cbz x2, set_hcr
- orr x0, x0, #HCR_TGE // Enable Host Extensions
- orr x0, x0, #HCR_E2H
+ mov_q x0, HCR_HOST_VHE_FLAGS
set_hcr:
msr hcr_el2, x0
isb
@@ -522,9 +529,8 @@
#ifdef CONFIG_ARM_GIC_V3
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
- ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne 3f
+ ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
+ cbz x0, 3f
mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
@@ -549,8 +555,8 @@
#endif
/* EL2 debug */
- mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx x0, x1, #8, #4
+ mrs x1, id_aa64dfr0_el1
+ sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
@@ -559,7 +565,7 @@
csel x3, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
- ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
+ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
cbz x0, 7f // Skip if SPE not present
cbnz x2, 6f // VHE?
mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
@@ -669,7 +675,7 @@
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
- .long 0
+ .quad 0
.popsection
@@ -705,7 +711,9 @@
/*
* Common entry point for secondary CPUs.
*/
+ bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
@@ -718,14 +726,22 @@
adr_l x0, secondary_data
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ cbz x1, __secondary_too_slow
mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
+ cbz x2, __secondary_too_slow
msr sp_el0, x2
mov x29, #0
mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
+__secondary_too_slow:
+ wfe
+ wfi
+ b __secondary_too_slow
+ENDPROC(__secondary_too_slow)
+
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
@@ -748,6 +764,7 @@
* Enable the MMU.
*
* x0 = SCTLR_EL1 value for turning on the MMU.
+ * x1 = TTBR1_EL1 value
*
* Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section.
@@ -756,17 +773,17 @@
* If it isn't, park the CPU
*/
ENTRY(__enable_mmu)
- mrs x1, ID_AA64MMFR0_EL1
- ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+ mrs x2, ID_AA64MMFR0_EL1
+ ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
- update_early_cpu_boot_status 0, x1, x2
- adrp x1, idmap_pg_dir
- adrp x2, swapper_pg_dir
- phys_to_ttbr x3, x1
- phys_to_ttbr x4, x2
- msr ttbr0_el1, x3 // load TTBR0
- msr ttbr1_el1, x4 // load TTBR1
+ update_early_cpu_boot_status 0, x2, x3
+ adrp x2, idmap_pg_dir
+ phys_to_ttbr x1, x1
+ phys_to_ttbr x2, x2
+ msr ttbr0_el1, x2 // load TTBR0
+ offset_ttbr1 x1, x3
+ msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
isb
@@ -781,9 +798,30 @@
ret
ENDPROC(__enable_mmu)
+ENTRY(__cpu_secondary_check52bitva)
+#ifdef CONFIG_ARM64_VA_BITS_52
+ ldr_l x0, vabits_actual
+ cmp x0, #52
+ b.ne 2f
+
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
+ and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz x0, 2f
+
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
+1: wfe
+ wfi
+ b 1b
+
+#endif
+2: ret
+ENDPROC(__cpu_secondary_check52bitva)
+
__no_granule_support:
/* Indicate that this CPU can't boot and is stuck in the kernel */
- update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
1:
wfe
wfi
@@ -806,14 +844,93 @@
0: cmp x9, x10
b.hs 1f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
+ ldp x12, x13, [x9], #24
+ ldr x14, [x9, #-8]
+ cmp w13, #R_AARCH64_RELATIVE
b.ne 0b
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
+ add x14, x14, x23 // relocate
+ str x14, [x12, x23]
b 0b
-1: ret
+
+1:
+#ifdef CONFIG_RELR
+ /*
+ * Apply RELR relocations.
+ *
+ * RELR is a compressed format for storing relative relocations. The
+ * encoded sequence of entries looks like:
+ * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
+ *
+ * i.e. start with an address, followed by any number of bitmaps. The
+ * address entry encodes 1 relocation. The subsequent bitmap entries
+ * encode up to 63 relocations each, at subsequent offsets following
+ * the last address entry.
+ *
+ * The bitmap entries must have 1 in the least significant bit. The
+ * assumption here is that an address cannot have 1 in lsb. Odd
+ * addresses are not supported. Any odd addresses are stored in the RELA
+ * section, which is handled above.
+ *
+ * Excluding the least significant bit in the bitmap, each non-zero
+ * bit in the bitmap represents a relocation to be applied to
+ * a corresponding machine word that follows the base address
+ * word. The second least significant bit represents the machine
+ * word immediately following the initial address, and each bit
+ * that follows represents the next word, in linear order. As such,
+ * a single bitmap can encode up to 63 relocations in a 64-bit object.
+ *
+ * In this implementation we store the address of the next RELR table
+ * entry in x9, the address being relocated by the current address or
+ * bitmap entry in x13 and the address being relocated by the current
+ * bit in x14.
+ *
+ * Because addends are stored in place in the binary, RELR relocations
+ * cannot be applied idempotently. We use x24 to keep track of the
+ * currently applied displacement so that we can correctly relocate if
+ * __relocate_kernel is called twice with non-zero displacements (i.e.
+ * if there is both a physical misalignment and a KASLR displacement).
+ */
+ ldr w9, =__relr_offset // offset to reloc table
+ ldr w10, =__relr_size // size of reloc table
+ add x9, x9, x11 // __va(.relr)
+ add x10, x9, x10 // __va(.relr) + sizeof(.relr)
+
+ sub x15, x23, x24 // delta from previous offset
+ cbz x15, 7f // nothing to do if unchanged
+ mov x24, x23 // save new offset
+
+2: cmp x9, x10
+ b.hs 7f
+ ldr x11, [x9], #8
+ tbnz x11, #0, 3f // branch to handle bitmaps
+ add x13, x11, x23
+ ldr x12, [x13] // relocate address entry
+ add x12, x12, x15
+ str x12, [x13], #8 // adjust to start of bitmap
+ b 2b
+
+3: mov x14, x13
+4: lsr x11, x11, #1
+ cbz x11, 6f
+ tbz x11, #0, 5f // skip bit if not set
+ ldr x12, [x14] // relocate bit
+ add x12, x12, x15
+ str x12, [x14]
+
+5: add x14, x14, #8 // move to next bit's address
+ b 4b
+
+6: /*
+ * Move to the next bitmap's address. 8 is the word size, and 63 is the
+ * number of significant bits in a bitmap entry.
+ */
+ add x13, x13, #(8 * 63)
+ b 2b
+
+7:
+#endif
+ ret
+
ENDPROC(__relocate_kernel)
#endif
@@ -823,8 +940,12 @@
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif
+ adrp x1, init_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_RELR
+ mov x24, #0 // no RELR displacement yet
+#endif
bl __relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE
ldr x8, =__primary_switched
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index dd14ab8..38bcd4d 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hibernate low-level support
*
* Copyright (C) 2016 ARM Ltd.
* Author: James Morse <james.morse@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <linux/errno.h>
@@ -33,13 +22,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
+ offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp
isb
.endm
@@ -80,7 +70,7 @@
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0, x6
+ break_before_make_ttbr_switch x5, x0, x6, x8
mov x21, x1
mov x30, x2
@@ -111,7 +101,7 @@
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21, x6
+ break_before_make_ttbr_switch x25, x21, x6, x8
ic ialluis
dsb ish
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99..a96b292 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*:
* Hibernate support specific for ARM64
*
@@ -11,8 +12,6 @@
* https://patchwork.kernel.org/patch/96442/
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(x) "hibernate: " x
#include <linux/cpu.h>
@@ -202,6 +201,7 @@
gfp_t mask)
{
int rc = 0;
+ pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
@@ -216,7 +216,13 @@
memcpy((void *)dst, src_start, length);
__flush_icache_range(dst, dst + length);
- pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+ trans_pgd = allocator(mask);
+ if (!trans_pgd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = allocator(mask);
if (!pudp) {
@@ -299,8 +305,10 @@
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
- if (el2_reset_needed())
+ if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+ dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ }
/* make the crash dump kernel image protected again */
crash_post_resume();
@@ -495,7 +503,7 @@
rc = -ENOMEM;
goto out;
}
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
if (rc)
goto out;
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 8c96443..38ee151 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers.
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "hw-breakpoint: " fmt
@@ -547,13 +536,18 @@
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+
+ /* Fallthrough */
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
+
+ /* Fallthrough */
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fb..73d4607 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hypervisor stub
*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
@@ -28,6 +17,8 @@
#include <asm/virt.h>
.text
+ .pushsection .hyp.text, "ax"
+
.align 11
ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
new file mode 100644
index 0000000..25a2a9b
--- /dev/null
+++ b/arch/arm64/kernel/image-vars.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linker script variables to be set after section resolution, as
+ * ld.lld does not like variables assigned before SECTIONS is processed.
+ */
+#ifndef __ARM64_KERNEL_IMAGE_VARS_H
+#define __ARM64_KERNEL_IMAGE_VARS_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#ifdef CONFIG_EFI
+
+__efistub_stext_offset = stext - _text;
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strnlen = __pi_strnlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub_strrchr = __pi_strrchr;
+__efistub___flush_dcache_area = __pi___flush_dcache_area;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
+#endif
+
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
+__efistub_screen_info = screen_info;
+
+#endif
+
+#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed0..c7d38c6 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -1,27 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Linker script macros to generate Image header fields.
*
* Copyright (C) 2014 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_IMAGE_H
-#define __ASM_IMAGE_H
+#ifndef __ARM64_KERNEL_IMAGE_H
+#define __ARM64_KERNEL_IMAGE_H
#ifndef LINKER_SCRIPT
#error This file should only be included in vmlinux.lds.S
#endif
+#include <asm/image.h>
+
/*
* There aren't any ELF relocations we can use to endian-swap values known only
* at link time (e.g. the subtraction of two symbol addresses), so we must get
@@ -47,19 +38,22 @@
sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
sym##_hi32 = DATA_LE32((data) >> 32)
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ ARM64_IMAGE_FLAG_##field##_SHIFT)
+
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
#else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
#endif
#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
#define __HEAD_FLAG_PHYS_BASE 1
-#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
- (__HEAD_FLAG_PAGE_SIZE << 1) | \
- (__HEAD_FLAG_PHYS_BASE << 3))
+#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \
+ __HEAD_FLAG(PAGE_SIZE) | \
+ __HEAD_FLAG(PHYS_BASE))
/*
* These will output as part of the Image header, which should be little-endian
@@ -71,52 +65,4 @@
DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
-#ifdef CONFIG_EFI
-
-__efistub_stext_offset = stext - _text;
-
-/*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
-
-/*
- * The EFI stub has its own symbol namespace prefixed by __efistub_, to
- * isolate it from the kernel proper. The following symbols are legally
- * accessed by the stub, so provide some aliases to make them accessible.
- * Only include data symbols here, or text symbols of functions that are
- * guaranteed to be safe when executed at another offset than they were
- * linked at. The routines below are all implemented in assembler in a
- * position independent manner
- */
-__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset = KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
-__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
-__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
-__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr);
-__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
-
-#ifdef CONFIG_KASAN
-__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset = KALLSYMS_HIDE(__pi_memset);
-#endif
-
-__efistub__text = KALLSYMS_HIDE(_text);
-__efistub__end = KALLSYMS_HIDE(_end);
-__efistub__edata = KALLSYMS_HIDE(_edata);
-__efistub_screen_info = KALLSYMS_HIDE(screen_info);
-
-#endif
-
-#endif /* __ASM_IMAGE_H */
+#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 2b34135..d801a70 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -37,7 +26,7 @@
#define AARCH64_INSN_N_BIT BIT(22)
#define AARCH64_INSN_LSL_12 BIT(22)
-static int aarch64_insn_encoding_class[] = {
+static const int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
@@ -734,6 +723,46 @@
state);
}
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ u32 insn = aarch64_insn_get_ldadd_value();
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
+}
+
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ /*
+ * STADD is simply encoded as an alias for LDADD with XZR as
+ * the destination register.
+ */
+ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
+ value, size);
+}
+
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy,
@@ -1239,6 +1268,35 @@
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_adr_type type)
+{
+ u32 insn;
+ s32 offset;
+
+ switch (type) {
+ case AARCH64_INSN_ADR_TYPE_ADR:
+ insn = aarch64_insn_get_adr_value();
+ offset = addr - pc;
+ break;
+ case AARCH64_INSN_ADR_TYPE_ADRP:
+ insn = aarch64_insn_get_adrp_value();
+ offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
+ break;
+ default:
+ pr_err("%s: unknown adr encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ if (offset < -SZ_1M || offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
+}
+
/*
* Decode the imm field of a branch, and return the byte offset as a
* signed value (so it can be used when computing a new branch
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 79b1738..aa7a4ec 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/io.c
*
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/export.h>
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 780a12f..04a327c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/irq.c
*
@@ -7,18 +8,6 @@
* Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel_stat.h>
@@ -27,12 +16,17 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/irqchip.h>
+#include <linux/kprobes.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <asm/daifflags.h>
#include <asm/vmap_stack.h>
unsigned long irq_err_count;
+/* Only access this in an NMI enter/exit */
+DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
+
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
int arch_show_interrupts(struct seq_file *p, int prec)
@@ -72,4 +66,28 @@
irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
+
+ if (system_uses_irq_prio_masking()) {
+ /*
+ * Now that we have a stack for our IRQ handler, set
+ * the PMR/PSR pair to a consistent state.
+ */
+ WARN_ON(read_sysreg(daif) & PSR_A_BIT);
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ }
}
+
+/*
+ * Stubs to make nmi_enter/exit() code callable from ASM
+ */
+asmlinkage void notrace asm_nmi_enter(void)
+{
+ nmi_enter();
+}
+NOKPROBE_SYMBOL(asm_nmi_enter);
+
+asmlinkage void notrace asm_nmi_exit(void)
+{
+ nmi_exit();
+}
+NOKPROBE_SYMBOL(asm_nmi_exit);
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index e075641..9a8a0ae 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -1,36 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Based on arch/arm/kernel/jump_label.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/insn.h>
-#ifdef HAVE_JUMP_LABEL
-
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- void *addr = (void *)entry->code;
+ void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
- insn = aarch64_insn_gen_branch_imm(entry->code,
- entry->target,
+ insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
+ jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();
@@ -49,5 +36,3 @@
* NOP needs to be replaced by a branch.
*/
}
-
-#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8..416f537 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/cache.h>
@@ -14,6 +11,7 @@
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
@@ -43,7 +41,7 @@
return ret;
}
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
{
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@@ -64,9 +62,6 @@
return default_cmdline;
}
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
- pgprot_t prot);
-
/*
* This routine will be executed with the kernel mapped at its default virtual
* address, and if it returns successfully, the kernel will be remapped, and
@@ -87,6 +82,7 @@
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
@@ -94,7 +90,7 @@
* attempt at mapping the FDT in setup_machine()
*/
early_fixmap_init();
- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
if (!fdt)
return 0;
@@ -109,7 +105,7 @@
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- cmdline = get_cmdline(fdt);
+ cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0;
@@ -117,15 +113,15 @@
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+ * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs
*/
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = BIT(VA_BITS - 3) + (seed & mask);
+ mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+ offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
@@ -143,15 +139,15 @@
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
- * Randomize the module region over a 4 GB window covering the
+ * Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
- module_range = SZ_4G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end + offset - SZ_4G,
+ module_range = SZ_2G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
/*
@@ -169,5 +165,8 @@
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
return offset;
}
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
new file mode 100644
index 0000000..29a9428
--- /dev/null
+++ b/arch/arm64/kernel/kexec_image.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kexec image loader
+
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#define pr_fmt(fmt) "kexec_file(Image): " fmt
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/pe.h>
+#include <linux/string.h>
+#include <linux/verification.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/image.h>
+#include <asm/memory.h>
+
+static int image_probe(const char *kernel_buf, unsigned long kernel_len)
+{
+ const struct arm64_image_header *h =
+ (const struct arm64_image_header *)(kernel_buf);
+
+ if (!h || (kernel_len < sizeof(*h)))
+ return -EINVAL;
+
+ if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *image_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ struct arm64_image_header *h;
+ u64 flags, value;
+ bool be_image, be_kernel;
+ struct kexec_buf kbuf;
+ unsigned long text_offset;
+ struct kexec_segment *kernel_segment;
+ int ret;
+
+ /* We don't support crash kernels yet. */
+ if (image->type == KEXEC_TYPE_CRASH)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /*
+ * We require a kernel with an unambiguous Image header. Per
+ * Documentation/arm64/booting.rst, this is the case when image_size
+ * is non-zero (practically speaking, since v3.17).
+ */
+ h = (struct arm64_image_header *)kernel;
+ if (!h->image_size)
+ return ERR_PTR(-EINVAL);
+
+ /* Check cpu features */
+ flags = le64_to_cpu(h->flags);
+ be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE);
+ be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ if ((be_image != be_kernel) && !system_supports_mixed_endian())
+ return ERR_PTR(-EINVAL);
+
+ value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE);
+ if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) &&
+ !system_supports_4kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) &&
+ !system_supports_64kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) &&
+ !system_supports_16kb_granule()))
+ return ERR_PTR(-EINVAL);
+
+ /* Load the kernel */
+ kbuf.image = image;
+ kbuf.buf_min = 0;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kbuf.buffer = kernel;
+ kbuf.bufsz = kernel_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = le64_to_cpu(h->image_size);
+ text_offset = le64_to_cpu(h->text_offset);
+ kbuf.buf_align = MIN_KIMG_ALIGN;
+
+ /* Adjust kernel segment with TEXT_OFFSET */
+ kbuf.memsz += text_offset;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ kernel_segment = &image->segment[image->nr_segments - 1];
+ kernel_segment->mem += text_offset;
+ kernel_segment->memsz -= text_offset;
+ image->start = kernel_segment->mem;
+
+ pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz,
+ kernel_segment->memsz);
+
+ /* Load additional data */
+ ret = load_other_segments(image,
+ kernel_segment->mem, kernel_segment->memsz,
+ initrd, initrd_len, cmdline);
+
+ return ERR_PTR(ret);
+}
+
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+static int image_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+ return verify_pefile_signature(kernel, kernel_len, NULL,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+}
+#endif
+
+const struct kexec_file_ops kexec_image_ops = {
+ .probe = image_probe,
+ .load = image_load,
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+ .verify_sig = image_verify_sig,
+#endif
+};
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index a20de58..4311992 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* AArch64 KGDB support
*
@@ -5,18 +6,6 @@
*
* Copyright (C) 2013 Cavium Inc.
* Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bug.h>
@@ -245,7 +234,7 @@
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_brk_fn)
@@ -254,7 +243,7 @@
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
@@ -264,38 +253,24 @@
return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
- .fn = kgdb_brk_fn
+ .fn = kgdb_brk_fn,
+ .imm = KGDB_DYN_DBG_BRK_IMM,
};
static struct break_hook kgdb_compiled_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
- .fn = kgdb_compiled_brk_fn
+ .fn = kgdb_compiled_brk_fn,
+ .imm = KGDB_COMPILED_DBG_BRK_IMM,
};
static struct step_hook kgdb_step_hook = {
.fn = kgdb_step_brk_fn
};
-static void kgdb_call_nmi_hook(void *ignored)
-{
- kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
-}
-
-void kgdb_roundup_cpus(unsigned long flags)
-{
- local_irq_enable();
- smp_call_function(kgdb_call_nmi_hook, NULL, 0);
- local_irq_disable();
-}
-
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
@@ -338,9 +313,9 @@
if (ret != 0)
return ret;
- register_break_hook(&kgdb_brkpt_hook);
- register_break_hook(&kgdb_compiled_brkpt_hook);
- register_step_hook(&kgdb_step_hook);
+ register_kernel_break_hook(&kgdb_brkpt_hook);
+ register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ register_kernel_step_hook(&kgdb_step_hook);
return 0;
}
@@ -351,13 +326,13 @@
*/
void kgdb_arch_exit(void)
{
- unregister_break_hook(&kgdb_brkpt_hook);
- unregister_break_hook(&kgdb_compiled_brkpt_hook);
- unregister_step_hook(&kgdb_step_hook);
+ unregister_kernel_break_hook(&kgdb_brkpt_hook);
+ unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_kernel_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}
-struct kgdb_arch arch_kgdb_ops;
+const struct kgdb_arch arch_kgdb_ops;
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 997e6b2..42bd8c0 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -1,31 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Low-level user helpers placed in the vectors page for AArch32.
+ * AArch32 user helpers.
* Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
*
* Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2012-2018 ARM Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * The kuser helpers below are mapped at a fixed address by
+ * aarch32_setup_additional_pages() and are provided for compatibility
+ * reasons with 32 bit (aarch32) applications that need them.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * AArch32 user helpers.
- *
- * Each segment is 32-byte aligned and will be moved to the top of the high
- * vector page. New segments (if ever needed) must be added in front of
- * existing ones. This mechanism should be used only for things that are
- * really small and justified, and not be abused freely.
- *
- * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
+ * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
*/
#include <asm/unistd.h>
@@ -77,42 +62,3 @@
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
-
-/*
- * AArch32 sigreturn code
- *
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- *
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
- .globl __aarch32_sigret_code_start
-__aarch32_sigret_code_start:
-
- /*
- * ARM Code
- */
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
-
- /*
- * ARM code
- */
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
-
- .globl __aarch32_sigret_code_end
-__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 922add8..0df8493 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* kexec for arm64
*
* Copyright (C) Linaro.
* Copyright (C) Huawei Futurewei Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
@@ -212,9 +209,17 @@
* uses physical addressing to relocate the new image to its final
* position and transfers control to the image entry point when the
* relocation is complete.
+ * In kexec case, kimage->start points to purgatory assuming that
+ * kernel entry and dtb address are embedded in purgatory by
+ * userspace (kexec-tools).
+ * In kexec_file case, the kernel starts directly without purgatory.
*/
-
- cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
+ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
+#ifdef CONFIG_KEXEC_FILE
+ kimage->arch.dtb_mem);
+#else
+ 0);
+#endif
BUG(); /* Should never get here. */
}
@@ -313,7 +318,7 @@
* but does not hold any data of loaded kernel image.
*
* Note that all the pages in crash dump kernel memory have been initially
- * marked as Reserved in kexec_reserve_crashkres_pages().
+ * marked as Reserved as memory was allocated via memblock_reserve().
*
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
* from the hibernation iamge. crash_is_nosave() does thich check for crash
@@ -353,7 +358,6 @@
for (addr = begin; addr < end; addr += PAGE_SIZE) {
page = phys_to_page(addr);
- ClearPageReserved(page);
free_reserved_page(page);
}
}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
new file mode 100644
index 0000000..7b08bf9
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kexec_file for arm64
+ *
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * Most code is derived from arm64 port of kexec-tools
+ */
+
+#define pr_fmt(fmt) "kexec_file: " fmt
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+/* relevant device tree properties */
+#define FDT_PROP_INITRD_START "linux,initrd-start"
+#define FDT_PROP_INITRD_END "linux,initrd-end"
+#define FDT_PROP_BOOTARGS "bootargs"
+#define FDT_PROP_KASLR_SEED "kaslr-seed"
+#define FDT_PROP_RNG_SEED "rng-seed"
+#define RNG_SEED_SIZE 128
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_image_ops,
+ NULL
+};
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->arch.dtb);
+ image->arch.dtb = NULL;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
+static int setup_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void *dtb)
+{
+ int off, ret;
+
+ ret = fdt_path_offset(dtb, "/chosen");
+ if (ret < 0)
+ goto out;
+
+ off = ret;
+
+ /* add bootargs */
+ if (cmdline) {
+ ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add initrd-* */
+ if (initrd_load_addr) {
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START,
+ initrd_load_addr);
+ if (ret)
+ goto out;
+
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END,
+ initrd_load_addr + initrd_len);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add kaslr-seed */
+ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
+ if (ret == -FDT_ERR_NOTFOUND)
+ ret = 0;
+ else if (ret)
+ goto out;
+
+ if (rng_is_initialized()) {
+ u64 seed = get_random_u64();
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_KASLR_SEED);
+ }
+
+ /* add rng-seed */
+ if (rng_is_initialized()) {
+ u8 rng_seed[RNG_SEED_SIZE];
+ get_random_bytes(rng_seed, RNG_SEED_SIZE);
+ ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed,
+ RNG_SEED_SIZE);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_RNG_SEED);
+ }
+
+out:
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
+
+ return 0;
+}
+
+/*
+ * More space needed so that we can add initrd, bootargs, kaslr-seed, and
+ * rng-seed.
+ */
+#define DTB_EXTRA_SPACE 0x1000
+
+static int create_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void **dtb)
+{
+ void *buf;
+ size_t buf_size;
+ size_t cmdline_len;
+ int ret;
+
+ cmdline_len = cmdline ? strlen(cmdline) : 0;
+ buf_size = fdt_totalsize(initial_boot_params)
+ + cmdline_len + DTB_EXTRA_SPACE;
+
+ for (;;) {
+ buf = vmalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ /* duplicate a device tree blob */
+ ret = fdt_open_into(initial_boot_params, buf, buf_size);
+ if (ret)
+ return -EINVAL;
+
+ ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ cmdline, buf);
+ if (ret) {
+ vfree(buf);
+ if (ret == -ENOMEM) {
+ /* unlikely, but just in case */
+ buf_size += DTB_EXTRA_SPACE;
+ continue;
+ } else {
+ return ret;
+ }
+ }
+
+ /* trim it */
+ fdt_pack(buf);
+ *dtb = buf;
+
+ return 0;
+ }
+}
+
+int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr,
+ unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline)
+{
+ struct kexec_buf kbuf;
+ void *dtb = NULL;
+ unsigned long initrd_load_addr = 0, dtb_len;
+ int ret = 0;
+
+ kbuf.image = image;
+ /* not allocate anything below the kernel */
+ kbuf.buf_min = kernel_load_addr + kernel_size;
+
+ /* load initrd */
+ if (initrd) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = initrd_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = initrd_len;
+ kbuf.buf_align = 0;
+ /* within 1GB-aligned window of up to 32GB in size */
+ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
+ + (unsigned long)SZ_1G * 32;
+ kbuf.top_down = false;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ initrd_load_addr = kbuf.mem;
+
+ pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, initrd_len, initrd_len);
+ }
+
+ /* load dtb */
+ ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb);
+ if (ret) {
+ pr_err("Preparing for new dtb failed\n");
+ goto out_err;
+ }
+
+ dtb_len = fdt_totalsize(dtb);
+ kbuf.buffer = dtb;
+ kbuf.bufsz = dtb_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = dtb_len;
+ /* not across 2MB boundary */
+ kbuf.buf_align = SZ_2M;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ image->arch.dtb = dtb;
+ image->arch.dtb_mem = kbuf.mem;
+
+ pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kbuf.mem, dtb_len, dtb_len);
+
+ return 0;
+
+out_err:
+ vfree(dtb);
+ return ret;
+}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index f0690c2..b182442 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/elf.h>
@@ -11,31 +8,91 @@
#include <linux/module.h>
#include <linux/sort.h>
+static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+ enum aarch64_insn_register reg)
+{
+ u32 adrp, add;
+
+ adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
+ add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_ADSB_ADD);
+
+ return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
+}
+
+struct plt_entry get_plt_entry(u64 dst, void *pc)
+{
+ struct plt_entry plt;
+ static u32 br;
+
+ if (!br)
+ br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
+ plt.br = cpu_to_le32(br);
+
+ return plt;
+}
+
+bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
+{
+ u64 p, q;
+
+ /*
+ * Check whether both entries refer to the same target:
+ * do the cheapest checks first.
+ * If the 'add' or 'br' opcodes are different, then the target
+ * cannot be the same.
+ */
+ if (a->add != b->add || a->br != b->br)
+ return false;
+
+ p = ALIGN_DOWN((u64)a, SZ_4K);
+ q = ALIGN_DOWN((u64)b, SZ_4K);
+
+ /*
+ * If the 'adrp' opcodes are the same then we just need to check
+ * that they refer to the same 4k region.
+ */
+ if (a->adrp == b->adrp && p == q)
+ return true;
+
+ return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
+ (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
+}
+
static bool in_init(const struct module *mod, void *loc)
{
return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
}
-u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
- struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries;
+ int j = i - 1;
u64 val = sym->st_value + rela->r_addend;
- plt[i] = get_plt_entry(val);
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i++;
+
+ plt[i] = get_plt_entry(val, &plt[i]);
/*
* Check if the entry we just created is a duplicate. Given that the
* relocations are sorted, this will be the last entry we allocated.
* (if one exists).
*/
- if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
- return (u64)&plt[i - 1];
+ if (j >= 0 && plt_entries_equal(plt + i, plt + j))
+ return (u64)&plt[j];
- pltsec->plt_num_entries++;
+ pltsec->plt_num_entries += i - j;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
@@ -43,41 +100,31 @@
}
#ifdef CONFIG_ARM64_ERRATUM_843419
-u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
+u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, u64 val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
- struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries++;
- u32 mov0, mov1, mov2, br;
+ u32 br;
int rd;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i = pltsec->plt_num_entries++;
+
/* get the destination register of the ADRP instruction */
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
le32_to_cpup((__le32 *)loc));
- /* generate the veneer instructions */
- mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_INVERSE);
- mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_KEEP);
- mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_KEEP);
br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
AARCH64_INSN_BRANCH_NOLINK);
- plt[i] = (struct plt_entry){
- cpu_to_le32(mov0),
- cpu_to_le32(mov1),
- cpu_to_le32(mov2),
- cpu_to_le32(br)
- };
+ plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
+ plt[i].br = cpu_to_le32(br);
return (u64)&plt[i];
}
@@ -193,6 +240,15 @@
break;
}
}
+
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ /*
+ * Add some slack so we can skip PLT slots that may trigger
+ * the erratum due to the placement of the ADRP instruction.
+ */
+ ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
+
return ret;
}
@@ -202,7 +258,7 @@
unsigned long core_plts = 0;
unsigned long init_plts = 0;
Elf64_Sym *syms = NULL;
- Elf_Shdr *tramp = NULL;
+ Elf_Shdr *pltsec, *tramp = NULL;
int i;
/*
@@ -211,9 +267,9 @@
*/
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
- mod->arch.core.plt = sechdrs + i;
+ mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
- mod->arch.init.plt = sechdrs + i;
+ mod->arch.init.plt_shndx = i;
else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
@@ -222,7 +278,7 @@
syms = (Elf64_Sym *)sechdrs[i].sh_addr;
}
- if (!mod->arch.core.plt || !mod->arch.init.plt) {
+ if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
@@ -246,7 +302,7 @@
/* sort by type, symbol index and addend */
sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
- if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+ if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
core_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info, dstsec);
else
@@ -254,17 +310,19 @@
sechdrs[i].sh_info, dstsec);
}
- mod->arch.core.plt->sh_type = SHT_NOBITS;
- mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
+ pltsec = sechdrs + mod->arch.core.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
mod->arch.core.plt_num_entries = 0;
mod->arch.core.plt_max_entries = core_plts;
- mod->arch.init.plt->sh_type = SHT_NOBITS;
- mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
+ pltsec = sechdrs + mod->arch.init.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
mod->arch.init.plt_num_entries = 0;
mod->arch.init.plt_max_entries = init_plts;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f0f27ae..03ff15b 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* AArch64 loadable module support.
*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*/
@@ -32,6 +21,7 @@
void *module_alloc(unsigned long size)
{
+ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL;
void *p;
@@ -39,9 +29,12 @@
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
+ if (IS_ENABLED(CONFIG_KASAN))
+ /* don't exceed the static module region - see below */
+ module_alloc_end = MODULES_END;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
- gfp_mask, PAGE_KERNEL_EXEC, 0,
+ module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -56,8 +49,8 @@
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + SZ_4G, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ module_alloc_base + SZ_2G, GFP_KERNEL,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
@@ -96,16 +89,50 @@
{
s64 sval = do_reloc(op, place, val);
+ /*
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
+ * or [-2^31, 2^32), respectively. However, in order to be able to
+ * detect overflows reliably, we have to choose whether we interpret
+ * such quantities as signed or as unsigned, and stick with it.
+ * The way we organize our address space requires a signed
+ * interpretation of 32-bit relative references, so let's use that
+ * for all R_AARCH64_PRELxx relocations. This means our upper
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+ */
+
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U16_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S16_MIN || sval > S16_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U32_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 64:
*(s64 *)place = sval;
@@ -198,13 +225,12 @@
return 0;
}
-static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
+static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ __le32 *place, u64 val)
{
u32 insn;
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
- !cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
- ((u64)place & 0xfff) < 0xff8)
+ if (!is_forbidden_offset_for_adrp(place))
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
AARCH64_INSN_IMM_ADR);
@@ -215,7 +241,7 @@
insn &= ~BIT(31);
} else {
/* out of range for ADR -> emit a veneer */
- val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
+ val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
if (!val)
return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,
@@ -288,18 +314,21 @@
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -367,8 +396,9 @@
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_ADR_PREL_PG_HI21:
- ovf = reloc_insn_adrp(me, loc, val);
+ ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
return ovf;
break;
@@ -413,7 +443,7 @@
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) {
- val = module_emit_plt_entry(me, loc, &rel[i], sym);
+ val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 53f371e..4cfed91 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 Citrix Systems
*
@@ -21,5 +14,5 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops pv_time_ops;
-EXPORT_SYMBOL_GPL(pv_time_ops);
+struct paravirt_patch_template pv_ops;
+EXPORT_SYMBOL_GPL(pv_ops);
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 0e2ea1c..570988c 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Code borrowed from powerpc/kernel/pci-common.c
*
* Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2014 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
*/
#include <linux/acpi.h>
@@ -165,16 +161,16 @@
/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
- int node = acpi_get_node(root->device->handle);
struct acpi_pci_generic_root_info *ri;
struct pci_bus *bus, *child;
struct acpi_pci_root_ops *root_ops;
+ struct pci_host_bridge *host;
- ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return NULL;
- root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
if (!root_ops) {
kfree(ri);
return NULL;
@@ -194,8 +190,16 @@
if (!bus)
return NULL;
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index bcafd7d..b0e03e0 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arm64 callchain support
*
* Copyright (C) 2015 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
struct frame_tail {
@@ -35,9 +25,10 @@
{
struct frame_tail buftail;
unsigned long err;
+ unsigned long lr;
/* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ if (!access_ok(tail, sizeof(buftail)))
return NULL;
pagefault_disable();
@@ -47,7 +38,9 @@
if (err)
return NULL;
- perf_callchain_store(entry, buftail.lr);
+ lr = ptrauth_strip_insn_pac(buftail.lr);
+
+ perf_callchain_store(entry, lr);
/*
* Frame pointers should strictly progress back up the stack
@@ -82,7 +75,7 @@
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ if (!access_ok(tail, sizeof(buftail)))
return NULL;
pagefault_disable();
@@ -161,12 +154,7 @@
return;
}
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index e213f8e..a0b4f1b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1,22 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * PMU support
+ * ARMv8 PMUv3 Performance Events handling code.
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
*
* This code is based heavily on the ARMv7 perf event code.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <asm/irq_regs.h>
@@ -26,152 +15,11 @@
#include <linux/acpi.h>
#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
-
-/*
- * ARMv8 PMUv3 Performance Events handling code.
- * Common event types (some are defined in asm/perf_event.h).
- */
-
-/* At least one of the following is required. */
-#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
-#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
-
-/* Common architectural events. */
-#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
-#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
-#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
-#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
-#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
-#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
-#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
-#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
-#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
-#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
-#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
-#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
-
-/* Common microarchitectural events. */
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
-#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
-#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
-#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
-#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
-#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
-#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
-
-/* ARMv8 recommended implementation defined event types */
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
-
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
-
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
-
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
-
-#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
-#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
-#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
-#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
-#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
-#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
-#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
-#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
-#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
-#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
-#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
-#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
-#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
-#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
-#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
-
-#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
-#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
-#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
-#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
-#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
-#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
-#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
-#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
-#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
-#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
+#include <linux/smp.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -183,12 +31,10 @@
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
-/* PMUv3 HW events mapping. */
-
/*
* ARMv8 Architectural defined events, not all of these may
- * be supported on any given implementation. Undefined events will
- * be disabled at run-time.
+ * be supported on any given implementation. Unsupported events will
+ * be disabled at run-time based on the PMCEID registers.
*/
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
@@ -210,8 +56,6 @@
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
@@ -224,8 +68,6 @@
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -316,7 +158,6 @@
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
}
-#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
#define ARMV8_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
config, armv8pmu_events_sysfs_show)
@@ -370,6 +211,18 @@
ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
+ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
+ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
+ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
+ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
+ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
+ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
+ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
+ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
+ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
+ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
+ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
+ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
static struct attribute *armv8_pmuv3_event_attrs[] = {
&armv8_event_attr_sw_incr.attr.attr,
@@ -420,6 +273,18 @@
&armv8_event_attr_l2i_tlb_refill.attr.attr,
&armv8_event_attr_l2d_tlb.attr.attr,
&armv8_event_attr_l2i_tlb.attr.attr,
+ &armv8_event_attr_remote_access.attr.attr,
+ &armv8_event_attr_ll_cache.attr.attr,
+ &armv8_event_attr_ll_cache_miss.attr.attr,
+ &armv8_event_attr_dtlb_walk.attr.attr,
+ &armv8_event_attr_itlb_walk.attr.attr,
+ &armv8_event_attr_ll_cache_rd.attr.attr,
+ &armv8_event_attr_ll_cache_miss_rd.attr.attr,
+ &armv8_event_attr_remote_access_rd.attr.attr,
+ &armv8_event_attr_sample_pop.attr.attr,
+ &armv8_event_attr_sample_feed.attr.attr,
+ &armv8_event_attr_sample_filtrate.attr.attr,
+ &armv8_event_attr_sample_collision.attr.attr,
NULL,
};
@@ -434,7 +299,13 @@
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
- if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ return attr->mode;
+
+ pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
+ if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
return attr->mode;
return 0;
@@ -550,7 +421,7 @@
return val;
}
-static inline u64 armv8pmu_read_counter(struct perf_event *event)
+static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -587,7 +458,7 @@
}
}
-static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
+static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -647,12 +518,21 @@
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
+ struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
- armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
- isb();
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_set_pmu_events(counter_bits, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ }
}
static inline int armv8pmu_disable_counter(int idx)
@@ -665,11 +545,21 @@
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_clr_pmu_events(counter_bits);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+ }
}
static inline int armv8pmu_enable_intens(int idx)
@@ -929,7 +819,7 @@
}
/*
- * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ * Add an event filter to a given event.
*/
static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
@@ -946,14 +836,23 @@
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
- if (!attr->exclude_kernel)
+ if (!attr->exclude_kernel && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
- } else {
- if (attr->exclude_kernel)
+ if (attr->exclude_guest)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
+ if (attr->exclude_host)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+ } else {
+ if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
@@ -983,6 +882,9 @@
armv8pmu_disable_intens(idx);
}
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
@@ -1009,7 +911,7 @@
if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT;
- /* Onl expose micro/arch events supported by this PMU */
+ /* Only expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
return hw_event_id;
@@ -1061,6 +963,7 @@
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
u64 dfr0;
+ u64 pmceid_raw[2];
u32 pmceid[2];
int pmuver;
@@ -1079,11 +982,17 @@
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
- pmceid[0] = read_sysreg(pmceid0_el0);
- pmceid[1] = read_sysreg(pmceid1_el0);
+ pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
+ pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ pmceid[0] = pmceid_raw[0] >> 32;
+ pmceid[1] = pmceid_raw[1] >> 32;
+
+ bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
+ pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
@@ -1109,16 +1018,16 @@
if (ret)
return ret;
- cpu_pmu->handle_irq = armv8pmu_handle_irq,
- cpu_pmu->enable = armv8pmu_enable_event,
- cpu_pmu->disable = armv8pmu_disable_event,
- cpu_pmu->read_counter = armv8pmu_read_counter,
- cpu_pmu->write_counter = armv8pmu_write_counter,
- cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
- cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
- cpu_pmu->start = armv8pmu_start,
- cpu_pmu->stop = armv8pmu_stop,
- cpu_pmu->reset = armv8pmu_reset,
+ cpu_pmu->handle_irq = armv8pmu_handle_irq;
+ cpu_pmu->enable = armv8pmu_enable_event;
+ cpu_pmu->disable = armv8pmu_disable_event;
+ cpu_pmu->read_counter = armv8pmu_read_counter;
+ cpu_pmu->write_counter = armv8pmu_write_counter;
+ cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
+ cpu_pmu->start = armv8pmu_start;
+ cpu_pmu->stop = armv8pmu_stop;
+ cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
@@ -1274,6 +1183,7 @@
.driver = {
.name = ARMV8_PMU_PDEV_NAME,
.of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
new file mode 100644
index 0000000..c507b58
--- /dev/null
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errno.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <asm/cpufeature.h>
+#include <asm/pointer_auth.h>
+
+int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
+{
+ struct ptrauth_keys *keys = &tsk->thread.keys_user;
+ unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY;
+ unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
+
+ if (!system_supports_address_auth() && !system_supports_generic_auth())
+ return -EINVAL;
+
+ if (!arg) {
+ ptrauth_keys_init(keys);
+ ptrauth_keys_switch(keys);
+ return 0;
+ }
+
+ if (arg & ~key_mask)
+ return -EINVAL;
+
+ if (((arg & addr_key_mask) && !system_supports_address_auth()) ||
+ ((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth()))
+ return -EINVAL;
+
+ if (arg & PR_PAC_APIAKEY)
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+ if (arg & PR_PAC_APIBKEY)
+ get_random_bytes(&keys->apib, sizeof(keys->apib));
+ if (arg & PR_PAC_APDAKEY)
+ get_random_bytes(&keys->apda, sizeof(keys->apda));
+ if (arg & PR_PAC_APDBKEY)
+ get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+ if (arg & PR_PAC_APGAKEY)
+ get_random_bytes(&keys->apga, sizeof(keys->apga));
+
+ ptrauth_keys_switch(keys);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 6bf6657..b78fac9 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/decode-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index 192ab00..8b758c5 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/decode-insn.h
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_ARM64_H
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index b5a367d..c445282 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/kprobes.c
*
@@ -5,16 +6,6 @@
*
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#include <linux/kasan.h>
#include <linux/kernel.h>
@@ -30,6 +21,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -91,8 +83,6 @@
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
- extern char __start_rodata[];
- extern char __end_rodata[];
if (probe_addr & 0x3)
return -EINVAL;
@@ -100,10 +90,7 @@
/* copy instruction */
p->opcode = le32_to_cpu(*p->addr);
- if (in_exception_text(probe_addr))
- return -EINVAL;
- if (probe_addr >= (unsigned long) __start_rodata &&
- probe_addr <= (unsigned long) __end_rodata)
+ if (search_exception_tables(probe_addr))
return -EINVAL;
/* decode instruction */
@@ -120,7 +107,7 @@
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
- };
+ }
/* prepare the instruction */
if (p->ainsn.api.insn)
@@ -136,8 +123,10 @@
void *page;
page = vmalloc_exec(PAGE_SIZE);
- if (page)
+ if (page) {
set_memory_ro((unsigned long)page, 1);
+ set_vm_flush_reset_perms(page);
+ }
return page;
}
@@ -180,33 +169,6 @@
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -217,17 +179,17 @@
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -264,8 +226,6 @@
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
@@ -444,7 +404,7 @@
return DBG_HOOK_ERROR;
}
-int __kprobes
+static int __kprobes
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -463,33 +423,53 @@
return retval;
}
-int __kprobes
+static struct step_hook kprobes_step_hook = {
+ .fn = kprobe_single_step_handler,
+};
+
+static int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
{
kprobe_handler(regs);
return DBG_HOOK_HANDLED;
}
-bool arch_within_kprobe_blacklist(unsigned long addr)
+static struct break_hook kprobes_break_hook = {
+ .imm = KPROBES_BRK_IMM,
+ .fn = kprobe_breakpoint_handler,
+};
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
{
- if ((addr >= (unsigned long)__kprobes_text_start &&
- addr < (unsigned long)__kprobes_text_end) ||
- (addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end) ||
- (addr >= (unsigned long)__idmap_text_start &&
- addr < (unsigned long)__idmap_text_end) ||
- !!search_exception_tables(addr))
- return true;
+ int ret;
- if (!is_kernel_in_hyp_mode()) {
- if ((addr >= (unsigned long)__hyp_text_start &&
- addr < (unsigned long)__hyp_text_end) ||
- (addr >= (unsigned long)__hyp_idmap_text_start &&
- addr < (unsigned long)__hyp_idmap_text_end))
- return true;
- }
-
- return false;
+ ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+ (unsigned long)__entry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
+ (unsigned long)__exception_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
+ if (ret || is_kernel_in_hyp_mode())
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
@@ -587,5 +567,8 @@
int __init arch_init_kprobes(void)
{
+ register_kernel_break_hook(&kprobes_break_hook);
+ register_kernel_step_hook(&kprobes_step_hook);
+
return 0;
}
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index be05868..25f67ec 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/simulate-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/bitops.h>
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
index 050bde6..e065dc9 100644
--- a/arch/arm64/kernel/probes/simulate-insn.h
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/simulate-insn.h
*
* Copyright (C) 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 636ca01..a412d8e 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/highmem.h>
#include <linux/ptrace.h>
@@ -171,7 +168,7 @@
static int uprobe_breakpoint_handler(struct pt_regs *regs,
unsigned int esr)
{
- if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+ if (uprobe_pre_sstep_notifier(regs))
return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
@@ -182,21 +179,16 @@
{
struct uprobe_task *utask = current->utask;
- if (user_mode(regs)) {
- WARN_ON(utask &&
- (instruction_pointer(regs) != utask->xol_vaddr + 4));
-
- if (uprobe_post_sstep_notifier(regs))
- return DBG_HOOK_HANDLED;
- }
+ WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
}
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
- .esr_mask = BRK64_ESR_MASK,
- .esr_val = BRK64_ESR_UPROBES,
+ .imm = UPROBES_BRK_IMM,
.fn = uprobe_breakpoint_handler,
};
@@ -207,8 +199,8 @@
static int __init arch_init_uprobes(void)
{
- register_break_hook(&uprobes_break_hook);
- register_step_hook(&uprobes_step_hook);
+ register_user_break_hook(&uprobes_break_hook);
+ register_user_step_hook(&uprobes_step_hook);
return 0;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7f1628e..71f788c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/process.c
*
* Original Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdarg.h>
@@ -28,8 +17,10 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
+#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/stddef.h>
+#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
@@ -49,17 +40,21 @@
#include <trace/events/power.h>
#include <linux/percpu.h>
#include <linux/thread_info.h>
+#include <linux/prctl.h>
#include <asm/alternative.h>
+#include <asm/arch_gicv3.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -73,6 +68,50 @@
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+static void __cpu_do_idle(void)
+{
+ dsb(sy);
+ wfi();
+}
+
+static void __cpu_do_idle_irqprio(void)
+{
+ unsigned long pmr;
+ unsigned long daif_bits;
+
+ daif_bits = read_sysreg(daif);
+ write_sysreg(daif_bits | PSR_I_BIT, daif);
+
+ /*
+ * Unmask PMR before going idle to make sure interrupts can
+ * be raised.
+ */
+ pmr = gic_read_pmr();
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ __cpu_do_idle();
+
+ gic_write_pmr(pmr);
+ write_sysreg(daif_bits, daif);
+}
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void cpu_do_idle(void)
+{
+ if (system_uses_irq_prio_masking())
+ __cpu_do_idle_irqprio();
+ else
+ __cpu_do_idle();
+}
+
/*
* This is our default idle handler.
*/
@@ -231,6 +270,9 @@
printk("sp : %016llx\n", sp);
+ if (system_uses_irq_prio_masking())
+ printk("pmr_save: %08llx\n", regs->pmr_save);
+
i = top_reg;
while (i >= 0) {
@@ -269,11 +311,18 @@
}
}
+static void flush_tagged_addr_state(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+ clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
+ flush_tagged_addr_state();
}
void release_thread(struct task_struct *dead_task)
@@ -285,22 +334,27 @@
fpsimd_release_task(tsk);
}
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied. We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead. This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
+ /* We rely on the above assignment to initialize dst's thread_flags: */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+ /*
+ * Detach src's sve_state (if any) from dst so that it does not
+ * get erroneously used or freed prematurely. dst's sve_state
+ * will be allocated on demand later on if dst uses SVE.
+ * For consistency, also clear TIF_SVE here: this could be done
+ * later in copy_process(), but to avoid tripping up future
+ * maintainers it is best not to leave TIF_SVE and sve_state in
+ * an inconsistent state, even temporarily.
+ */
+ dst->thread.sve_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SVE);
+
return 0;
}
@@ -314,13 +368,6 @@
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
/*
- * Unalias p->thread.sve_state (if any) from the parent task
- * and disable discard SVE state for p:
- */
- clear_tsk_thread_flag(p, TIF_SVE);
- p->thread.sve_state = NULL;
-
- /*
* In case p was allocated the same task_struct pointer as some
* other recently-exited task, make sure p is disassociated from
* any cpu that may have run that now-exited task recently.
@@ -358,6 +405,13 @@
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+ set_ssbs_bit(childregs);
+
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
@@ -398,6 +452,32 @@
}
/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ struct pt_regs *regs = task_pt_regs(next);
+
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /* If the mitigation is enabled, then we leave SSBS clear. */
+ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+ test_tsk_thread_flag(next, TIF_SSBD))
+ return;
+
+ if (compat_user_mode(regs))
+ set_compat_ssbs_bit(regs);
+ else if (user_mode(regs))
+ set_ssbs_bit(regs);
+}
+
+/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
@@ -425,6 +505,8 @@
contextidr_thread_switch(next);
entry_task_switch(next);
uao_thread_switch(next);
+ ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -452,11 +534,8 @@
if (!stack_page)
return 0;
- frame.fp = thread_saved_fp(p);
- frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = p->curr_ret_stack;
-#endif
+ start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
+
do {
if (unwind_frame(p, &frame))
goto out;
@@ -478,40 +557,95 @@
return sp & ~0xf;
}
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- if (is_compat_task())
- return randomize_page(mm->brk, SZ_32M);
- else
- return randomize_page(mm->brk, SZ_1G);
-}
-
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+
+ ptrauth_thread_init_user(current);
}
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-void __used stackleak_check_alloca(unsigned long size)
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(unsigned long arg)
{
- unsigned long stack_left;
- unsigned long current_sp = current_stack_pointer;
- struct stack_info info;
-
- BUG_ON(!on_accessible_stack(current, current_sp, &info));
-
- stack_left = current_sp - info.low;
+ if (is_compat_task())
+ return -EINVAL;
+ if (arg & ~PR_TAGGED_ADDR_ENABLE)
+ return -EINVAL;
/*
- * There's a good chance we're almost out of stack space if this
- * is true. Using panic() over BUG() is more likely to give
- * reliable debugging output.
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled.
*/
- if (size >= stack_left)
- panic("alloca() over the kernel stack boundary\n");
+ if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+ return -EINVAL;
+
+ update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+ return 0;
}
-EXPORT_SYMBOL(stackleak_check_alloca);
-#endif
+
+long get_tagged_addr_ctrl(void)
+{
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TAGGED_ADDR))
+ return PR_TAGGED_ADDR_ENABLE;
+
+ return 0;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+static int zero;
+static int one = 1;
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+
+static int __init tagged_addr_init(void)
+{
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+ return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (static_branch_likely(&arm64_const_caps_ready))
+ preempt_schedule_irq();
+}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index e8edbf1..c9f72b2 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 ARM Limited
*
@@ -24,7 +17,6 @@
#include <uapi/linux/psci.h>
-#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/smp_plat.h>
@@ -54,6 +46,11 @@
}
#ifdef CONFIG_HOTPLUG_CPU
+static bool cpu_psci_cpu_can_disable(unsigned int cpu)
+{
+ return !psci_tos_resident_on(cpu);
+}
+
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
@@ -113,14 +110,11 @@
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
-#ifdef CONFIG_CPU_IDLE
- .cpu_init_idle = psci_cpu_init_idle,
- .cpu_suspend = psci_cpu_suspend_enter,
-#endif
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_can_disable = cpu_psci_cpu_can_disable,
.cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6219486..21176d0 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/ptrace.c
*
@@ -5,18 +6,6 @@
* edited by Linus Torvalds
* ARM modifications Copyright (C) 2000 Russell King
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/audit.h>
@@ -46,6 +35,7 @@
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/pgtable.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
#include <asm/traps.h>
@@ -182,13 +172,7 @@
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)(bkpt->trigger);
+ const char *desc = "Hardware breakpoint trap (ptrace)";
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
@@ -208,10 +192,14 @@
break;
}
}
- force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
+ arm64_force_sig_ptrace_errno_trap(si_errno,
+ (void __user *)bkpt->trigger,
+ desc);
}
#endif
- arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
+ arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
+ (void __user *)(bkpt->trigger),
+ desc);
}
/*
@@ -882,7 +870,7 @@
goto out;
/*
- * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
+ * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
* sve_set_vector_length(), which will also validate them for us:
*/
ret = sve_set_vector_length(target, header.vl,
@@ -958,6 +946,155 @@
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_PTR_AUTH
+static int pac_mask_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ /*
+ * The PAC bits can differ across data and instruction pointers
+ * depending on TCR_EL1.TBID*, which we may make use of in future, so
+ * we expose separate masks.
+ */
+ unsigned long mask = ptrauth_user_pac_mask();
+ struct user_pac_mask uregs = {
+ .data_mask = mask,
+ .insn_mask = mask,
+ };
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
+}
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
+{
+ return (__uint128_t)key->hi << 64 | key->lo;
+}
+
+static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
+{
+ struct ptrauth_key key = {
+ .lo = (unsigned long)ukey,
+ .hi = (unsigned long)(ukey >> 64),
+ };
+
+ return key;
+}
+
+static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
+ const struct ptrauth_keys *keys)
+{
+ ukeys->apiakey = pac_key_to_user(&keys->apia);
+ ukeys->apibkey = pac_key_to_user(&keys->apib);
+ ukeys->apdakey = pac_key_to_user(&keys->apda);
+ ukeys->apdbkey = pac_key_to_user(&keys->apdb);
+}
+
+static void pac_address_keys_from_user(struct ptrauth_keys *keys,
+ const struct user_pac_address_keys *ukeys)
+{
+ keys->apia = pac_key_from_user(ukeys->apiakey);
+ keys->apib = pac_key_from_user(ukeys->apibkey);
+ keys->apda = pac_key_from_user(ukeys->apdakey);
+ keys->apdb = pac_key_from_user(ukeys->apdbkey);
+}
+
+static int pac_address_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+}
+
+static int pac_address_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+ int ret;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_address_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+
+static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
+ const struct ptrauth_keys *keys)
+{
+ ukeys->apgakey = pac_key_to_user(&keys->apga);
+}
+
+static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
+ const struct user_pac_generic_keys *ukeys)
+{
+ keys->apga = pac_key_from_user(ukeys->apgakey);
+}
+
+static int pac_generic_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+}
+
+static int pac_generic_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+ int ret;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_generic_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -970,6 +1107,13 @@
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ REGSET_PAC_MASK,
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ REGSET_PACA_KEYS,
+ REGSET_PACG_KEYS,
+#endif
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -1039,6 +1183,34 @@
.get_size = sve_get_size,
},
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ [REGSET_PAC_MASK] = {
+ .core_note_type = NT_ARM_PAC_MASK,
+ .n = sizeof(struct user_pac_mask) / sizeof(u64),
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .get = pac_mask_get,
+ /* this cannot be set dynamically */
+ },
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ [REGSET_PACA_KEYS] = {
+ .core_note_type = NT_ARM_PACA_KEYS,
+ .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .get = pac_address_keys_get,
+ .set = pac_address_keys_set,
+ },
+ [REGSET_PACG_KEYS] = {
+ .core_note_type = NT_ARM_PACG_KEYS,
+ .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .get = pac_generic_keys_get,
+ .set = pac_generic_keys_set,
+ },
+#endif
+#endif
};
static const struct user_regset_view user_aarch64_view = {
@@ -1636,8 +1808,12 @@
int syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (test_thread_flag(TIF_SYSCALL_TRACE) ||
+ test_thread_flag(TIF_SYSCALL_EMU)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ if (!in_syscall(regs) || test_thread_flag(TIF_SYSCALL_EMU))
+ return -1;
+ }
/* Do the secure computing after ptrace; failures should be fast. */
if (secure_computing(NULL) == -1)
@@ -1666,19 +1842,20 @@
}
/*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
* Userspace cannot use these until they have an architectural meaning.
* Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
- GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
static int valid_compat_regs(struct user_pt_regs *regs)
{
diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c
index 5915ce5..e87a2b7 100644
--- a/arch/arm64/kernel/reloc_test_core.c
+++ b/arch/arm64/kernel/reloc_test_core.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S
index 2b8d9cb..16a34f1 100644
--- a/arch/arm64/kernel/reloc_test_syms.S
+++ b/arch/arm64/kernel/reloc_test_syms.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/linkage.h>
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index f407e42..c1d7db7 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kexec for arm64
*
* Copyright (C) Linaro.
* Copyright (C) Huawei Futurewei Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kexec.h>
@@ -32,6 +29,7 @@
ENTRY(arm64_relocate_new_kernel)
/* Setup the list loop variables. */
+ mov x18, x2 /* x18 = dtb address */
mov x17, x1 /* x17 = kimage_start */
mov x16, x0 /* x16 = kimage_head */
raw_dcache_line_size x15, x0 /* x15 = dcache line size */
@@ -107,7 +105,7 @@
isb
/* Start new image. */
- mov x0, xzr
+ mov x0, x18
mov x1, xzr
mov x2, xzr
mov x3, xzr
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc..a5e8b3b 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/return_address.c
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -32,6 +30,7 @@
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -41,12 +40,9 @@
data.level = level + 2;
data.addr = NULL;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)return_address; /* dummy */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
-
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)return_address);
walk_stackframe(current, &frame, save_return_addr, &data);
if (!data.level)
@@ -55,3 +51,4 @@
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 5ba4465..ea94cf8 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -94,6 +94,9 @@
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low)
+ return false;
+
if (sp < low || sp >= high)
return false;
@@ -111,6 +114,9 @@
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low)
+ return false;
+
if (sp < low || sp >= high)
return false;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b3354ff..56f6645 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/setup.c
*
* Copyright (C) 1995-2001 Russell King
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
@@ -26,7 +15,6 @@
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/cache.h>
-#include <linux/bootmem.h>
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -59,7 +47,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
-#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
@@ -183,9 +170,13 @@
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- void *dt_virt = fixmap_remap_fdt(dt_phys);
+ int size;
+ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
const char *name;
+ if (dt_virt)
+ memblock_reserve(dt_phys, size);
+
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("\n"
"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
@@ -197,6 +188,9 @@
cpu_relax();
}
+ /* Early fixups are done, map the FDT as read-only now */
+ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+
name = of_flat_dt_get_machine_name();
if (!name)
return;
@@ -210,6 +204,7 @@
struct memblock_region *region;
struct resource *res;
unsigned long i = 0;
+ size_t res_size;
kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1);
@@ -217,8 +212,10 @@
kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
- standard_resources = alloc_bootmem_low(num_standard_resources *
- sizeof(*standard_resources));
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ if (!standard_resources)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
for_each_memblock(memory, region) {
res = &standard_resources[i++];
@@ -293,6 +290,11 @@
setup_machine_fdt(__fdt_pointer);
+ /*
+ * Initialise the static keys early as they may be enabled by the
+ * cpufeature code and early parameters.
+ */
+ jump_label_init();
parse_early_param();
/*
@@ -339,6 +341,9 @@
smp_init_cpus();
smp_build_mpidr_hash();
+ /* Init percpu seeds for random tags after cpus are set up. */
+ kasan_init_tags();
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation
@@ -349,12 +354,8 @@
#endif
#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
-#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
@@ -363,6 +364,15 @@
}
}
+static inline bool cpu_can_disable(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
+ return cpu_ops[cpu]->cpu_can_disable(cpu);
+#endif
+ return false;
+}
+
static int __init topology_init(void)
{
int i;
@@ -372,7 +382,7 @@
for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
- cpu->hotpluggable = 1;
+ cpu->hotpluggable = cpu_can_disable(i);
register_cpu(cpu, i);
}
@@ -391,6 +401,7 @@
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
offset, KIMAGE_VADDR);
+ pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
} else {
pr_emerg("Kernel Offset: disabled\n");
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 5dcc942..dd2cdc0 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/signal.c
*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/cache.h>
@@ -296,11 +285,6 @@
*/
fpsimd_flush_task_state(current);
- barrier();
- /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
- barrier();
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sve_alloc(current);
@@ -470,7 +454,7 @@
offset = 0;
limit = extra_size;
- if (!access_ok(VERIFY_READ, base, limit))
+ if (!access_ok(base, limit))
goto invalid;
continue;
@@ -556,7 +540,7 @@
frame = (struct rt_sigframe __user *)regs->sp;
- if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
@@ -730,7 +714,7 @@
/*
* Check that we can actually write to the signal frame.
*/
- if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
+ if (!access_ok(user->sigframe, sp_top - sp))
return -EFAULT;
return 0;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 24b0900..12a5853 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/signal.c
*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
* Modified by Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compat.h>
@@ -29,42 +18,7 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
-
-struct compat_sigcontext {
- /* We always set these two fields to 0 */
- compat_ulong_t trap_no;
- compat_ulong_t error_code;
-
- compat_ulong_t oldmask;
- compat_ulong_t arm_r0;
- compat_ulong_t arm_r1;
- compat_ulong_t arm_r2;
- compat_ulong_t arm_r3;
- compat_ulong_t arm_r4;
- compat_ulong_t arm_r5;
- compat_ulong_t arm_r6;
- compat_ulong_t arm_r7;
- compat_ulong_t arm_r8;
- compat_ulong_t arm_r9;
- compat_ulong_t arm_r10;
- compat_ulong_t arm_fp;
- compat_ulong_t arm_ip;
- compat_ulong_t arm_sp;
- compat_ulong_t arm_lr;
- compat_ulong_t arm_pc;
- compat_ulong_t arm_cpsr;
- compat_ulong_t fault_address;
-};
-
-struct compat_ucontext {
- compat_ulong_t uc_flags;
- compat_uptr_t uc_link;
- compat_stack_t uc_stack;
- struct compat_sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask;
- int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
- compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
-};
+#include <asm/vdso.h>
struct compat_vfp_sigframe {
compat_ulong_t magic;
@@ -92,16 +46,6 @@
unsigned long end_magic;
} __attribute__((__aligned__(8)));
-struct compat_sigframe {
- struct compat_ucontext uc;
- compat_ulong_t retcode[2];
-};
-
-struct compat_rt_sigframe {
- struct compat_siginfo info;
- struct compat_sigframe sig;
-};
-
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
@@ -303,7 +247,7 @@
frame = (struct compat_sigframe __user *)regs->compat_sp;
- if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (compat_restore_sigframe(regs, frame))
@@ -334,7 +278,7 @@
frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
- if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (compat_restore_sigframe(regs, &frame->sig))
@@ -365,7 +309,7 @@
/*
* Check that we can actually write to the signal frame.
*/
- if (!access_ok(VERIFY_WRITE, frame, framesize))
+ if (!access_ok(frame, framesize))
frame = NULL;
return frame;
@@ -398,14 +342,38 @@
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
/* Set up sigreturn pointer */
+#ifdef CONFIG_COMPAT_VDSO
+ void *vdso_base = current->mm->context.vdso;
+ void *vdso_trampoline;
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ if (thumb) {
+ vdso_trampoline = VDSO_SYMBOL(vdso_base,
+ compat_rt_sigreturn_thumb);
+ } else {
+ vdso_trampoline = VDSO_SYMBOL(vdso_base,
+ compat_rt_sigreturn_arm);
+ }
+ } else {
+ if (thumb) {
+ vdso_trampoline = VDSO_SYMBOL(vdso_base,
+ compat_sigreturn_thumb);
+ } else {
+ vdso_trampoline = VDSO_SYMBOL(vdso_base,
+ compat_sigreturn_arm);
+ }
+ }
+
+ retcode = ptr_to_compat(vdso_trampoline) + thumb;
+#else
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = AARCH32_VECTORS_BASE +
- AARCH32_KERN_SIGRET_CODE_OFFSET +
+ retcode = (unsigned long)current->mm->context.vdso +
(idx << 2) + thumb;
+#endif
}
regs->regs[0] = usig;
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
new file mode 100644
index 0000000..475d30d
--- /dev/null
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch32 sigreturn code.
+ * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012-2018 ARM Ltd.
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+
+#include <asm/unistd.h>
+
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8e..f5b04dd 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -27,7 +27,7 @@
* aff0 = mpidr_masked & 0xff;
* aff1 = mpidr_masked & 0xff00;
* aff2 = mpidr_masked & 0xff0000;
- * aff2 = mpidr_masked & 0xff00000000;
+ * aff3 = mpidr_masked & 0xff00000000;
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
*}
* Input registers: rs0, rs1, rs2, rs3, mpidr, mask
@@ -101,6 +101,7 @@
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 6252234..5465527 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -1,19 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License Version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
+
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
.macro SMCCC instr
.cfi_startproc
@@ -40,6 +33,7 @@
ENTRY(__arm_smccc_smc)
SMCCC smc
ENDPROC(__arm_smccc_smc)
+EXPORT_SYMBOL(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -50,3 +44,4 @@
ENTRY(__arm_smccc_hvc)
SMCCC hvc
ENDPROC(__arm_smccc_hvc)
+EXPORT_SYMBOL(__arm_smccc_hvc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 25fcd22..dc9fe87 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SMP initialisation and IPI support
* Based on arch/arm/kernel/smp.c
*
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
@@ -35,6 +24,7 @@
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
@@ -133,7 +123,7 @@
* time out.
*/
wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(5000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -141,30 +131,37 @@
}
} else {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ return ret;
}
secondary_data.task = NULL;
secondary_data.stack = NULL;
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (ret && status) {
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
- switch (status) {
+ switch (status & CPU_BOOT_STATUS_MASK) {
default:
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
cpu, status);
+ cpus_stuck_in_kernel++;
break;
case CPU_KILL_ME:
if (!op_cpu_kill(cpu)) {
pr_crit("CPU%u: died during early boot\n", cpu);
break;
}
- /* Fall through */
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN)
+ pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
cpus_stuck_in_kernel++;
break;
case CPU_PANIC_KERNEL:
@@ -175,6 +172,20 @@
return ret;
}
+static void init_gic_priority_masking(void)
+{
+ u32 cpuflags;
+
+ if (WARN_ON(!gic_enable_sre()))
+ return;
+
+ cpuflags = read_sysreg(daif);
+
+ WARN_ON(!(cpuflags & PSR_I_BIT));
+
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -201,6 +212,9 @@
*/
cpu_uninstall_idmap();
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
+
preempt_disable();
trace_hardirqs_off();
@@ -408,12 +422,18 @@
void __init smp_prepare_boot_cpu(void)
{
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
- /*
- * Initialise the static keys early as they may be enabled by the
- * cpufeature code.
- */
- jump_label_init();
cpuinfo_store_boot_cpu();
+
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_boot_alternatives();
+
+ /* Conditionally switch to GIC PMR for interrupt masking */
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
@@ -548,7 +568,7 @@
}
static int __init
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
@@ -557,7 +577,7 @@
if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
acpi_map_gic_cpu_interface(processor);
@@ -602,7 +622,7 @@
{
struct device_node *dn;
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
u64 hwid = of_get_cpu_mpidr(dn);
if (hwid == INVALID_HWID)
@@ -807,18 +827,23 @@
}
#endif
-/*
- * ipi_cpu_stop - handle IPI from smp_send_stop()
- */
-static void ipi_cpu_stop(unsigned int cpu)
+static void local_cpu_stop(void)
{
- set_cpu_online(cpu, false);
+ set_cpu_online(smp_processor_id(), false);
local_daif_mask();
sdei_mask_local_cpu();
+ cpu_park_loop();
+}
- while (1)
- cpu_relax();
+/*
+ * We need to implement panic_smp_self_stop() for parallel panic() calls, so
+ * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
+ * CPUs that have already stopped themselves.
+ */
+void panic_smp_self_stop(void)
+{
+ local_cpu_stop();
}
#ifdef CONFIG_KEXEC_CORE
@@ -871,7 +896,7 @@
case IPI_CPU_STOP:
irq_enter();
- ipi_cpu_stop(cpu);
+ local_cpu_stop();
irq_exit();
break;
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 9303465..c8a3fee 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Spin Table SMP initialisation
*
* Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/delay.h>
@@ -30,7 +19,7 @@
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
-volatile unsigned long __section(".mmuoff.data.read")
+volatile unsigned long __section(.mmuoff.data.read)
secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
index 3432e5e..52cfc61 100644
--- a/arch/arm64/kernel/ssbd.c
+++ b/arch/arm64/kernel/ssbd.c
@@ -3,17 +3,34 @@
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
+#include <linux/prctl.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
+static void ssbd_ssbs_enable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate |= val;
+}
+
+static void ssbd_ssbs_disable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate &= ~val;
+}
+
/*
* prctl interface for SSBD
- * FIXME: Drop the below ifdefery once merged in 4.18.
*/
-#ifdef PR_SPEC_STORE_BYPASS
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
int state = arm64_get_ssbd_state();
@@ -46,12 +63,14 @@
return -EPERM;
task_clear_spec_ssb_disable(task);
clear_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_enable(task);
break;
case PR_SPEC_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
case PR_SPEC_FORCE_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
@@ -59,6 +78,7 @@
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
default:
return -ERANGE;
@@ -107,4 +127,3 @@
return -ENODEV;
}
}
-#endif /* PR_SPEC_STORE_BYPASS */
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7e..a336cb1 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack tracing support
*
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -40,9 +30,18 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
+
+/*
+ * Unwind from one frame record (A) to the next frame record (B).
+ *
+ * We terminate early if the location of B indicates a malformed chain of frame
+ * records (e.g. a cycle), determined based on the location and fp value of A
+ * and the location (but not the fp value) of B.
+ */
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
+ struct stack_info info;
if (fp & 0xf)
return -EINVAL;
@@ -50,27 +49,55 @@
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, NULL))
+ if (!on_accessible_stack(tsk, fp, &info))
return -EINVAL;
+ if (test_bit(info.type, frame->stacks_done))
+ return -EINVAL;
+
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info.type == frame->prev_type) {
+ if (fp <= frame->prev_fp)
+ return -EINVAL;
+ } else {
+ set_bit(frame->prev_type, frame->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_frame() invocation.
+ */
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ frame->prev_fp = fp;
+ frame->prev_type = info.type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
- if (WARN_ON_ONCE(frame->graph == -1))
- return -EINVAL;
- if (frame->graph < -1)
- frame->graph += FTRACE_NOTRACE_DEPTH;
-
+ struct ftrace_ret_stack *ret_stack;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
- frame->pc = tsk->ret_stack[frame->graph--].ret;
+ ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
+ if (WARN_ON_ONCE(!ret_stack))
+ return -EINVAL;
+ frame->pc = ret_stack->ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -85,6 +112,7 @@
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +127,7 @@
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -134,16 +163,10 @@
data.skip = trace->skip;
data.no_sched_functions = 0;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
@@ -159,21 +182,17 @@
data.no_sched_functions = nosched;
if (tsk != current) {
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame, thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
} else {
/* We don't want this function nor the caller */
data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)__save_stack_trace);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
walk_stackframe(tsk, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 70c2833..9405d1b 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -48,6 +48,10 @@
*/
cpu_uninstall_idmap();
+ /* Restore CnP bit in TTBR1_EL1 */
+ if (system_supports_cnp())
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+
/*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index b44065f..d5ffaaa 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* AArch64-specific system calls implementation
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
@@ -31,7 +20,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, off)
+ unsigned long, fd, unsigned long, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
@@ -47,22 +36,26 @@
return ksys_personality(personality);
}
+asmlinkage long sys_ni_syscall(void);
+
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return sys_ni_syscall();
+}
+
/*
* Wrappers to pass the pt_regs argument.
*/
-#define sys_personality sys_arm64_personality
-
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
+#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 0f8bcb7..fc40386 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/sys32.c
*
* Copyright (C) 2015 ARM Ltd.
- *
- * This program is free software(void); you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http(void);//www.gnu.org/licenses/>.
*/
/*
@@ -133,17 +122,14 @@
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
- [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index a610982..f1cb649 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/sys_arm.c
*
* Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
* Copyright (C) 1995, 1996 Russell King.
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compat.h>
@@ -58,7 +47,7 @@
if (end < start || flags)
return -EINVAL;
- if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
+ if (!access_ok((const void __user *)start, end - start))
return -EFAULT;
return __do_compat_cache_op(start, end);
@@ -66,12 +55,11 @@
/*
* Handle all unrecognised system calls.
*/
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
{
- siginfo_t info;
- unsigned int no = regs->regs[7];
+ void __user *addr;
- switch (no) {
+ switch (scno) {
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
@@ -102,23 +90,20 @@
default:
/*
- * Calls 9f00xx..9f07ff are defined to return -ENOSYS
+ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
* if not implemented, rather than raising SIGILL. This
* way the calling program can gracefully determine whether
* a feature is supported.
*/
- if ((no & 0xffff) <= 0x7ff)
+ if (scno < __ARM_NR_COMPAT_END)
return -ENOSYS;
break;
}
- clear_siginfo(&info);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (compat_thumb_mode(regs) ? 2 : 4);
+ addr = (void __user *)instruction_pointer(regs) -
+ (compat_thumb_mode(regs) ? 2 : 4);
- arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
+ arm64_notify_die("Oops - bad compat syscall(2)", regs,
+ SIGILL, ILL_ILLTRP, addr, scno);
return 0;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d223..871c739 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,21 +8,21 @@
#include <linux/syscalls.h>
#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
- ret = compat_arm_syscall(regs);
+ ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
@@ -47,7 +47,7 @@
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn);
} else {
- ret = do_ni_syscall(regs);
+ ret = do_ni_syscall(regs, scno);
}
regs->regs[0] = ret;
@@ -61,6 +61,35 @@
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -69,6 +98,7 @@
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
+ cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index f258636..0b29464 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994-2001 Russell King
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clockchips.h>
@@ -49,11 +38,8 @@
if (!in_lock_functions(regs->pc))
return regs->pc;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
-#endif
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+
do {
int ret = unwind_frame(NULL, &frame);
if (ret < 0)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0825c4a..fa9528d 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -14,250 +14,13 @@
#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
-#include <linux/node.h>
-#include <linux/nodemask.h>
-#include <linux/of.h>
-#include <linux/sched.h>
-#include <linux/sched/topology.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/string.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
-static int __init get_cpu_for_node(struct device_node *node)
-{
- struct device_node *cpu_node;
- int cpu;
-
- cpu_node = of_parse_phandle(node, "cpu", 0);
- if (!cpu_node)
- return -1;
-
- cpu = of_cpu_node_to_id(cpu_node);
- if (cpu >= 0)
- topology_parse_cpu_capacity(cpu_node, cpu);
- else
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
-
- of_node_put(cpu_node);
- return cpu;
-}
-
-static int __init parse_core(struct device_node *core, int package_id,
- int core_id)
-{
- char name[10];
- bool leaf = true;
- int i = 0;
- int cpu;
- struct device_node *t;
-
- do {
- snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else {
- pr_err("%pOF: Can't get CPU for thread\n",
- t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
- }
- i++;
- } while (t);
-
- cpu = get_cpu_for_node(core);
- if (cpu >= 0) {
- if (!leaf) {
- pr_err("%pOF: Core has both threads and CPU\n",
- core);
- return -EINVAL;
- }
-
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- } else if (leaf) {
- pr_err("%pOF: Can't get CPU for leaf core\n", core);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init parse_cluster(struct device_node *cluster, int depth)
-{
- char name[10];
- bool leaf = true;
- bool has_cores = false;
- struct device_node *c;
- static int package_id __initdata;
- int core_id = 0;
- int i, ret;
-
- /*
- * First check for child clusters; we currently ignore any
- * information about the nesting of clusters and present the
- * scheduler with a flat list of them.
- */
- i = 0;
- do {
- snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, depth + 1);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- /* Now check for cores */
- i = 0;
- do {
- snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
-
- if (leaf) {
- ret = parse_core(c, package_id, core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
-
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- if (leaf && !has_cores)
- pr_warn("%pOF: empty cluster\n", cluster);
-
- if (leaf)
- package_id++;
-
- return 0;
-}
-
-static int __init parse_dt_topology(void)
-{
- struct device_node *cn, *map;
- int ret = 0;
- int cpu;
-
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return 0;
- }
-
- /*
- * When topology is provided cpu-map is essentially a root
- * cluster with restricted subnodes.
- */
- map = of_get_child_by_name(cn, "cpu-map");
- if (!map)
- goto out;
-
- ret = parse_cluster(map, 0);
- if (ret != 0)
- goto out_map;
-
- topology_normalize_cpu_scale();
-
- /*
- * Check that all cores are in the topology; the SMP code will
- * only mark cores described in the DT as possible.
- */
- for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].package_id == -1)
- ret = -EINVAL;
-
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
- return ret;
-}
-
-/*
- * cpu topology table
- */
-struct cpu_topology cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
-
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
- const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
-
- /* Find the smaller of NUMA, core or LLC siblings */
- if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
- /* not numa in package, lets use the package siblings */
- core_mask = &cpu_topology[cpu].core_sibling;
- }
- if (cpu_topology[cpu].llc_id != -1) {
- if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
- core_mask = &cpu_topology[cpu].llc_sibling;
- }
-
- return core_mask;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
- struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
-
- /* update core and thread sibling masks */
- for_each_online_cpu(cpu) {
- cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
- cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
- cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
- }
-
- if (cpuid_topo->package_id != cpu_topo->package_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id != cpu_topo->core_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
-}
-
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
@@ -296,60 +59,31 @@
update_siblings_masks(cpuid);
}
-static void clear_cpu_topology(int cpu)
-{
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpumask_clear(&cpu_topo->llc_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
-
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
-}
-
-static void __init reset_cpu_topology(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpu_topo->thread_id = -1;
- cpu_topo->core_id = 0;
- cpu_topo->package_id = -1;
- cpu_topo->llc_id = -1;
-
- clear_cpu_topology(cpu);
- }
-}
-
-void remove_cpu_topology(unsigned int cpu)
-{
- int sibling;
-
- for_each_cpu(sibling, topology_core_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
- for_each_cpu(sibling, topology_sibling_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
- for_each_cpu(sibling, topology_llc_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
-
- clear_cpu_topology(cpu);
-}
-
#ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, assume a homogeneous
+ * machine and return the current CPU's thread state.
+ */
+ if (is_threaded < 0)
+ is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+ return !!is_threaded;
+}
+
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
-static int __init parse_acpi_topology(void)
+int __init parse_acpi_topology(void)
{
- bool is_threaded;
int cpu, topology_id;
- is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+ if (acpi_disabled)
+ return 0;
for_each_possible_cpu(cpu) {
int i, cache_id;
@@ -358,7 +92,7 @@
if (topology_id < 0)
return topology_id;
- if (is_threaded) {
+ if (acpi_cpu_is_threaded(cpu)) {
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
@@ -384,24 +118,6 @@
return 0;
}
-
-#else
-static inline int __init parse_acpi_topology(void)
-{
- return -EINVAL;
-}
#endif
-void __init init_cpu_topology(void)
-{
- reset_cpu_topology();
- /*
- * Discard anything that was parsed if we hit an error so we
- * don't use partial information.
- */
- if (!acpi_disabled && parse_acpi_topology())
- reset_cpu_topology();
- else if (of_have_populated_dt() && parse_dt_topology())
- reset_cpu_topology();
-}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b9da093..34739e8 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -1,26 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/traps.c
*
* Copyright (C) 1995-2009 Russell King
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bug.h>
+#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -35,6 +26,7 @@
#include <linux/sizes.h>
#include <linux/syscalls.h>
#include <linux/mm_types.h>
+#include <linux/kasan.h>
#include <asm/atomic.h>
#include <asm/bug.h>
@@ -65,16 +57,19 @@
printk(" %pS\n", (void *)where);
}
-static void __dump_instr(const char *lvl, struct pt_regs *regs)
+static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
+ if (user_mode(regs))
+ return;
+
for (i = -4; i < 1; i++) {
unsigned int val, bad;
- bad = get_user(val, &((u32 *)addr)[i]);
+ bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
@@ -83,28 +78,23 @@
break;
}
}
- printk("%sCode: %s\n", lvl, str);
-}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
-{
- if (!user_mode(regs)) {
- mm_segment_t fs = get_fs();
- set_fs(KERNEL_DS);
- __dump_instr(lvl, regs);
- set_fs(fs);
- } else {
- __dump_instr(lvl, regs);
- }
+ printk("%sCode: %s\n", lvl, str);
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- int skip;
+ int skip = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ if (regs) {
+ if (user_mode(regs))
+ return;
+ skip = 1;
+ }
+
if (!tsk)
tsk = current;
@@ -112,20 +102,18 @@
return;
if (tsk == current) {
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)dump_backtrace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)dump_backtrace);
} else {
/*
* task blocked in __switch_to
*/
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
-#endif
- skip = !!regs;
printk("Call trace:\n");
do {
/* skip until specified stack frame */
@@ -162,7 +150,6 @@
static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -175,15 +162,9 @@
return ret;
print_modules();
- __show_regs(regs);
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
- end_of_stack(tsk));
+ show_regs(regs);
- if (!user_mode(regs)) {
- dump_backtrace(regs, tsk);
- dump_instr(KERN_EMERG, regs);
- }
+ dump_kernel_instr(KERN_EMERG, regs);
return ret;
}
@@ -224,24 +205,19 @@
do_exit(SIGSEGV);
}
-static bool show_unhandled_signals_ratelimited(void)
+static void arm64_show_signal(int signo, const char *str)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- return show_unhandled_signals && __ratelimit(&rs);
-}
-
-void arm64_force_sig_info(struct siginfo *info, const char *str,
- struct task_struct *tsk)
-{
+ struct task_struct *tsk = current;
unsigned int esr = tsk->thread.fault_code;
struct pt_regs *regs = task_pt_regs(tsk);
- if (!unhandled_signal(tsk, info->si_signo))
- goto send_sig;
-
- if (!show_unhandled_signals_ratelimited())
- goto send_sig;
+ /* Leave if the signal won't be shown */
+ if (!show_unhandled_signals ||
+ !unhandled_signal(tsk, signo) ||
+ !__ratelimit(&rs))
+ return;
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
if (esr)
@@ -251,19 +227,42 @@
print_vma_addr(KERN_CONT " in ", regs->pc);
pr_cont("\n");
__show_regs(regs);
+}
-send_sig:
- force_sig_info(info->si_signo, info, tsk);
+void arm64_force_sig_fault(int signo, int code, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(signo, str);
+ if (signo == SIGKILL)
+ force_sig(SIGKILL);
+ else
+ force_sig_fault(signo, code, addr);
+}
+
+void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
+ const char *str)
+{
+ arm64_show_signal(SIGBUS, str);
+ force_sig_mceerr(code, addr, lsb);
+}
+
+void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(SIGTRAP, str);
+ force_sig_ptrace_errno_trap(errno, addr);
}
void arm64_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, int err)
+ int signo, int sicode, void __user *addr,
+ int err)
{
if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
current->thread.fault_address = 0;
current->thread.fault_code = err;
- arm64_force_sig_info(info, str, current);
+
+ arm64_force_sig_fault(signo, sicode, addr, str);
} else {
die(str, regs, err);
}
@@ -350,11 +349,11 @@
void force_signal_inject(int signal, int code, unsigned long address)
{
- siginfo_t info;
const char *desc;
struct pt_regs *regs = current_pt_regs();
- clear_siginfo(&info);
+ if (WARN_ON(!user_mode(regs)))
+ return;
switch (signal) {
case SIGILL:
@@ -374,12 +373,7 @@
signal = SIGKILL;
}
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = (void __user *)address;
-
- arm64_notify_die(desc, regs, &info, 0);
+ arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
}
/*
@@ -408,13 +402,8 @@
if (call_undef_hook(regs) == 0)
return;
- force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
BUG_ON(!user_mode(regs));
-}
-
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
-{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
}
#define __user_cache_maint(insn, address, res) \
@@ -440,7 +429,7 @@
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
unsigned long address;
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
@@ -453,6 +442,9 @@
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
__user_cache_maint("dc civac", address, ret);
break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
+ __user_cache_maint("sys 3, c7, c13, 1", address, ret);
+ break;
case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
__user_cache_maint("sys 3, c7, c12, 1", address, ret);
break;
@@ -475,7 +467,7 @@
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
pt_regs_write_reg(regs, rt, val);
@@ -485,27 +477,43 @@
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
- pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
+static void mrs_handler(unsigned int esr, struct pt_regs *regs)
+{
+ u32 sysreg, rt;
+
+ rt = ESR_ELx_SYS64_ISS_RT(esr);
+ sysreg = esr_sys64_to_sysreg(esr);
+
+ if (do_emulate_mrs(regs, sysreg, rt) != 0)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+}
+
+static void wfi_handler(unsigned int esr, struct pt_regs *regs)
+{
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
struct sys64_hook {
unsigned int esr_mask;
unsigned int esr_val;
void (*handler)(unsigned int esr, struct pt_regs *regs);
};
-static struct sys64_hook sys64_hooks[] = {
+static const struct sys64_hook sys64_hooks[] = {
{
.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
@@ -529,12 +537,179 @@
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
.handler = cntfrq_read_handler,
},
+ {
+ /* Trap read access to CPUID registers */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
+ .handler = mrs_handler,
+ },
+ {
+ /* Trap WFI instructions executed in userspace */
+ .esr_mask = ESR_ELx_WFx_MASK,
+ .esr_val = ESR_ELx_WFx_WFI_VAL,
+ .handler = wfi_handler,
+ },
{},
};
+
+#ifdef CONFIG_COMPAT
+#define PSTATE_IT_1_0_SHIFT 25
+#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
+#define PSTATE_IT_7_2_SHIFT 10
+#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
+
+static u32 compat_get_it_state(struct pt_regs *regs)
+{
+ u32 it, pstate = regs->pstate;
+
+ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
+ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
+
+ return it;
+}
+
+static void compat_set_it_state(struct pt_regs *regs, u32 it)
+{
+ u32 pstate_it;
+
+ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
+ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
+
+ regs->pstate &= ~PSR_AA32_IT_MASK;
+ regs->pstate |= pstate_it;
+}
+
+static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
+{
+ int cond;
+
+ /* Only a T32 instruction can trap without CV being set */
+ if (!(esr & ESR_ELx_CV)) {
+ u32 it;
+
+ it = compat_get_it_state(regs);
+ if (!it)
+ return true;
+
+ cond = it >> 4;
+ } else {
+ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
+ }
+
+ return aarch32_opcode_cond_checks[cond](regs->pstate);
+}
+
+static void advance_itstate(struct pt_regs *regs)
+{
+ u32 it;
+
+ /* ARM mode */
+ if (!(regs->pstate & PSR_AA32_T_BIT) ||
+ !(regs->pstate & PSR_AA32_IT_MASK))
+ return;
+
+ it = compat_get_it_state(regs);
+
+ /*
+ * If this is the last instruction of the block, wipe the IT
+ * state. Otherwise advance it.
+ */
+ if (!(it & 7))
+ it = 0;
+ else
+ it = (it & 0xe0) | ((it << 1) & 0x1f);
+
+ compat_set_it_state(regs, it);
+}
+
+static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
+ unsigned int sz)
+{
+ advance_itstate(regs);
+ arm64_skip_faulting_instruction(regs, sz);
+}
+
+static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
+
+ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static const struct sys64_hook cp15_32_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
+ .handler = compat_cntfrq_read_handler,
+ },
+ {},
+};
+
+static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
+ u64 val = arch_timer_read_counter();
+
+ pt_regs_write_reg(regs, rt, lower_32_bits(val));
+ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static const struct sys64_hook cp15_64_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
+ .handler = compat_cntvct_read_handler,
+ },
+ {},
+};
+
+asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+{
+ const struct sys64_hook *hook, *hook_base;
+
+ if (!cp15_cond_valid(esr, regs)) {
+ /*
+ * There is no T16 variant of a CP access, so we
+ * always advance PC by 4 bytes.
+ */
+ arm64_compat_skip_faulting_instruction(regs, 4);
+ return;
+ }
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_CP15_32:
+ hook_base = cp15_32_hooks;
+ break;
+ case ESR_ELx_EC_CP15_64:
+ hook_base = cp15_64_hooks;
+ break;
+ default:
+ do_undefinstr(regs);
+ return;
+ }
+
+ for (hook = hook_base; hook->handler; hook++)
+ if ((hook->esr_mask & esr) == hook->esr_val) {
+ hook->handler(esr, regs);
+ return;
+ }
+
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+ * so that we handle these consistently.
+ */
+ do_undefinstr(regs);
+}
+#endif
+
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
- struct sys64_hook *hook;
+ const struct sys64_hook *hook;
for (hook = sys64_hooks; hook->handler; hook++)
if ((hook->esr_mask & esr) == hook->esr_val) {
@@ -560,6 +735,7 @@
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
+ [ESR_ELx_EC_PAC] = "PAC",
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
[ESR_ELx_EC_ILL] = "PSTATE.IL",
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
@@ -570,6 +746,7 @@
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
[ESR_ELx_EC_SVE] = "SVE",
+ [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -608,7 +785,6 @@
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
- die("Oops - bad mode", regs, 0);
local_daif_mask();
panic("bad mode");
}
@@ -619,19 +795,13 @@
*/
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
- clear_siginfo(&info);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
-
current->thread.fault_address = 0;
current->thread.fault_code = esr;
- arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
+ arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
+ "Bad EL0 synchronous exception");
}
#ifdef CONFIG_VMAP_STACK
@@ -704,6 +874,10 @@
/*
* The CPU can't make progress. The exception may have
* been imprecise.
+ *
+ * Neoverse-N1 #1349291 means a non-KVM SError reported as
+ * Unrecoverable should be treated as Uncontainable. We
+ * call arm64_serror_panic() in both cases.
*/
return true;
@@ -716,15 +890,26 @@
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
{
- nmi_enter();
+ const bool was_in_nmi = in_nmi();
+
+ if (!was_in_nmi)
+ nmi_enter();
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
- nmi_exit();
+ if (!was_in_nmi)
+ nmi_exit();
}
+asmlinkage void enter_from_user_mode(void)
+{
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+}
+NOKPROBE_SYMBOL(enter_from_user_mode);
+
void __pte_error(const char *file, int line, unsigned long val)
{
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
@@ -761,9 +946,6 @@
static int bug_handler(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0);
@@ -783,11 +965,56 @@
}
static struct break_hook bug_break_hook = {
- .esr_val = 0xf2000000 | BUG_BRK_IMM,
- .esr_mask = 0xffffffff,
.fn = bug_handler,
+ .imm = BUG_BRK_IMM,
};
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_ESR_RECOVER 0x20
+#define KASAN_ESR_WRITE 0x10
+#define KASAN_ESR_SIZE_MASK 0x0f
+#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
+
+static int kasan_handler(struct pt_regs *regs, unsigned int esr)
+{
+ bool recover = esr & KASAN_ESR_RECOVER;
+ bool write = esr & KASAN_ESR_WRITE;
+ size_t size = KASAN_ESR_SIZE(esr);
+ u64 addr = regs->regs[0];
+ u64 pc = regs->pc;
+
+ kasan_report(addr, size, write, pc);
+
+ /*
+ * The instrumentation allows to control whether we can proceed after
+ * a crash was detected. This is done by passing the -recover flag to
+ * the compiler. Disabling recovery allows to generate more compact
+ * code.
+ *
+ * Unfortunately disabling recovery doesn't work for the kernel right
+ * now. KASAN reporting is disabled in some contexts (for example when
+ * the allocator accesses slab object metadata; this is controlled by
+ * current->kasan_depth). All these accesses are detected by the tool,
+ * even though the reports for them are not printed.
+ *
+ * This is something that might be fixed at some point in the future.
+ */
+ if (!recover)
+ die("Oops - KASAN", regs, 0);
+
+ /* If thread survives, skip over the brk instruction and continue: */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook kasan_break_hook = {
+ .fn = kasan_handler,
+ .imm = KASAN_BRK_IMM,
+ .mask = KASAN_BRK_MASK,
+};
+#endif
+
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
@@ -795,11 +1022,20 @@
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+#ifdef CONFIG_KASAN_SW_TAGS
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
+ return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
+#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
- register_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&bug_break_hook);
+#ifdef CONFIG_KASAN_SW_TAGS
+ register_kernel_break_hook(&kasan_break_hook);
+#endif
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d41900..354b11e 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * VDSO implementations.
*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*/
@@ -31,90 +20,74 @@
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h>
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
#include <asm/cacheflush.h>
#include <asm/signal32.h>
#include <asm/vdso.h>
-#include <asm/vdso_datapage.h>
extern char vdso_start[], vdso_end[];
-static unsigned long vdso_pages __ro_after_init;
+#ifdef CONFIG_COMPAT_VDSO
+extern char vdso32_start[], vdso32_end[];
+#endif /* CONFIG_COMPAT_VDSO */
+
+/* vdso_lookup arch_index */
+enum arch_vdso_type {
+ ARM64_VDSO = 0,
+#ifdef CONFIG_COMPAT_VDSO
+ ARM64_VDSO32 = 1,
+#endif /* CONFIG_COMPAT_VDSO */
+};
+#ifdef CONFIG_COMPAT_VDSO
+#define VDSO_TYPES (ARM64_VDSO32 + 1)
+#else
+#define VDSO_TYPES (ARM64_VDSO + 1)
+#endif /* CONFIG_COMPAT_VDSO */
+
+struct __vdso_abi {
+ const char *name;
+ const char *vdso_code_start;
+ const char *vdso_code_end;
+ unsigned long vdso_pages;
+ /* Data Mapping */
+ struct vm_special_mapping *dm;
+ /* Code Mapping */
+ struct vm_special_mapping *cm;
+};
+
+static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
+ {
+ .name = "vdso",
+ .vdso_code_start = vdso_start,
+ .vdso_code_end = vdso_end,
+ },
+#ifdef CONFIG_COMPAT_VDSO
+ {
+ .name = "vdso32",
+ .vdso_code_start = vdso32_start,
+ .vdso_code_end = vdso32_end,
+ },
+#endif /* CONFIG_COMPAT_VDSO */
+};
/*
* The vDSO data page.
*/
static union {
- struct vdso_data data;
+ struct vdso_data data[CS_BASES];
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = vdso_data_store.data;
-#ifdef CONFIG_COMPAT
-/*
- * Create and map the vectors page for AArch32 tasks.
- */
-static struct page *vectors_page[1] __ro_after_init;
-
-static int __init alloc_vectors_page(void)
-{
- extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
-
- vpage = get_zeroed_page(GFP_ATOMIC);
-
- if (!vpage)
- return -ENOMEM;
-
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
-
- /* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
-
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
-
- return 0;
-}
-arch_initcall(alloc_vectors_page);
-
-int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
-
- };
- void *ret;
-
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- current->mm->context.vdso = (void *)addr;
-
- /* Map vectors page at the high address. */
- ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
-
- up_write(&mm->mmap_sem);
-
- return PTR_ERR_OR_ZERO(ret);
-}
-#endif /* CONFIG_COMPAT */
-
-static int vdso_mremap(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
+static int __vdso_remap(enum arch_vdso_type arch_index,
+ const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- unsigned long vdso_size = vdso_end - vdso_start;
+ unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
+ vdso_lookup[arch_index].vdso_code_start;
if (vdso_size != new_size)
return -EINVAL;
@@ -124,7 +97,303 @@
return 0;
}
-static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
+static int __vdso_init(enum arch_vdso_type arch_index)
+{
+ int i;
+ struct page **vdso_pagelist;
+ unsigned long pfn;
+
+ if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
+
+ vdso_lookup[arch_index].vdso_pages = (
+ vdso_lookup[arch_index].vdso_code_end -
+ vdso_lookup[arch_index].vdso_code_start) >>
+ PAGE_SHIFT;
+
+ /* Allocate the vDSO pagelist, plus a page for the data. */
+ vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (vdso_pagelist == NULL)
+ return -ENOMEM;
+
+ /* Grab the vDSO data page. */
+ vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
+
+ /* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
+
+ for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
+ vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
+
+ vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
+ vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
+
+ return 0;
+}
+
+static int __setup_additional_pages(enum arch_vdso_type arch_index,
+ struct mm_struct *mm,
+ struct linux_binprm *bprm,
+ int uses_interp)
+{
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ void *ret;
+
+ vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
+ /* Be sure to map the data page */
+ vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+
+ vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = ERR_PTR(vdso_base);
+ goto up_fail;
+ }
+
+ ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_MAYREAD,
+ vdso_lookup[arch_index].dm);
+ if (IS_ERR(ret))
+ goto up_fail;
+
+ vdso_base += PAGE_SIZE;
+ mm->context.vdso = (void *)vdso_base;
+ ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ vdso_lookup[arch_index].cm);
+ if (IS_ERR(ret))
+ goto up_fail;
+
+ return 0;
+
+up_fail:
+ mm->context.vdso = NULL;
+ return PTR_ERR(ret);
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Create and map the vectors page for AArch32 tasks.
+ */
+#ifdef CONFIG_COMPAT_VDSO
+static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return __vdso_remap(ARM64_VDSO32, sm, new_vma);
+}
+#endif /* CONFIG_COMPAT_VDSO */
+
+/*
+ * aarch32_vdso_pages:
+ * 0 - kuser helpers
+ * 1 - sigreturn code
+ * or (CONFIG_COMPAT_VDSO):
+ * 0 - kuser helpers
+ * 1 - vdso data
+ * 2 - vdso code
+ */
+#define C_VECTORS 0
+#ifdef CONFIG_COMPAT_VDSO
+#define C_VVAR 1
+#define C_VDSO 2
+#define C_PAGES (C_VDSO + 1)
+#else
+#define C_SIGPAGE 1
+#define C_PAGES (C_SIGPAGE + 1)
+#endif /* CONFIG_COMPAT_VDSO */
+static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
+static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
+ {
+ .name = "[vectors]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_VECTORS],
+ },
+#ifdef CONFIG_COMPAT_VDSO
+ {
+ .name = "[vvar]",
+ },
+ {
+ .name = "[vdso]",
+ .mremap = aarch32_vdso_mremap,
+ },
+#else
+ {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_SIGPAGE],
+ },
+#endif /* CONFIG_COMPAT_VDSO */
+};
+
+static int aarch32_alloc_kuser_vdso_page(void)
+{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long vdso_page;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ vdso_page = get_zeroed_page(GFP_ATOMIC);
+ if (!vdso_page)
+ return -ENOMEM;
+
+ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
+ kuser_sz);
+ aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+ flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+ int ret;
+
+ vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
+ vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
+
+ ret = __vdso_init(ARM64_VDSO32);
+ if (ret)
+ return ret;
+
+ ret = aarch32_alloc_kuser_vdso_page();
+ if (ret) {
+ unsigned long c_vvar =
+ (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
+ unsigned long c_vdso =
+ (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
+
+ free_page(c_vvar);
+ free_page(c_vdso);
+ }
+
+ return ret;
+}
+#else
+static int __aarch32_alloc_vdso_pages(void)
+{
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigpage;
+ int ret;
+
+ sigpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigpage)
+ return -ENOMEM;
+
+ memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
+ flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
+
+ ret = aarch32_alloc_kuser_vdso_page();
+ if (ret)
+ free_page(sigpage);
+
+ return ret;
+}
+#endif /* CONFIG_COMPAT_VDSO */
+
+static int __init aarch32_alloc_vdso_pages(void)
+{
+ return __aarch32_alloc_vdso_pages();
+}
+arch_initcall(aarch32_alloc_vdso_pages);
+
+static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
+{
+ void *ret;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ /*
+ * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
+ * not safe to CoW the page containing the CPU exception vectors.
+ */
+ ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_VECTORS]);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+#ifndef CONFIG_COMPAT_VDSO
+static int aarch32_sigreturn_setup(struct mm_struct *mm)
+{
+ unsigned long addr;
+ void *ret;
+
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
+
+ /*
+ * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
+ * set breakpoints.
+ */
+ ret = _install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_SIGPAGE]);
+ if (IS_ERR(ret))
+ goto out;
+
+ mm->context.vdso = (void *)addr;
+
+out:
+ return PTR_ERR_OR_ZERO(ret);
+}
+#endif /* !CONFIG_COMPAT_VDSO */
+
+int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ ret = aarch32_kuser_helpers_setup(mm);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_COMPAT_VDSO
+ ret = __setup_additional_pages(ARM64_VDSO32,
+ mm,
+ bprm,
+ uses_interp);
+#else
+ ret = aarch32_sigreturn_setup(mm);
+#endif /* CONFIG_COMPAT_VDSO */
+
+out:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ return __vdso_remap(ARM64_VDSO, sm, new_vma);
+}
+
+/*
+ * aarch64_vdso_pages:
+ * 0 - vvar
+ * 1 - vdso
+ */
+#define A_VVAR 0
+#define A_VDSO 1
+#define A_PAGES (A_VDSO + 1)
+static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
{
.name = "[vvar]",
},
@@ -136,39 +405,10 @@
static int __init vdso_init(void)
{
- int i;
- struct page **vdso_pagelist;
- unsigned long pfn;
+ vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
+ vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
- if (memcmp(vdso_start, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
- return -EINVAL;
- }
-
- vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
-
- /* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
- GFP_KERNEL);
- if (vdso_pagelist == NULL)
- return -ENOMEM;
-
- /* Grab the vDSO data page. */
- vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
-
- /* Grab the vDSO code pages. */
- pfn = sym_to_pfn(vdso_start);
-
- for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
-
- vdso_spec[0].pages = &vdso_pagelist[0];
- vdso_spec[1].pages = &vdso_pagelist[1];
-
- return 0;
+ return __vdso_init(ARM64_VDSO);
}
arch_initcall(vdso_init);
@@ -176,81 +416,17 @@
int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
- void *ret;
-
- vdso_text_len = vdso_pages << PAGE_SHIFT;
- /* Be sure to map the data page */
- vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+ int ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
- vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- ret = ERR_PTR(vdso_base);
- goto up_fail;
- }
- ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
- VM_READ|VM_MAYREAD,
- &vdso_spec[0]);
- if (IS_ERR(ret))
- goto up_fail;
- vdso_base += PAGE_SIZE;
- mm->context.vdso = (void *)vdso_base;
- ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_spec[1]);
- if (IS_ERR(ret))
- goto up_fail;
-
+ ret = __setup_additional_pages(ARM64_VDSO,
+ mm,
+ bprm,
+ uses_interp);
up_write(&mm->mmap_sem);
- return 0;
-up_fail:
- mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
- return PTR_ERR(ret);
-}
-
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-void update_vsyscall(struct timekeeper *tk)
-{
- u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
-
- ++vdso_data->tb_seq_count;
- smp_wmb();
-
- vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = tk->xtime_sec;
- vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
-
- if (!use_syscall) {
- /* tkr_mono.cycle_last == tkr_raw.cycle_last */
- vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->raw_time_sec = tk->raw_sec;
- vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
- vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- vdso_data->cs_mono_mult = tk->tkr_mono.mult;
- vdso_data->cs_raw_mult = tk->tkr_raw.mult;
- /* tkr_mono.shift == tkr_raw.shift */
- vdso_data->cs_shift = tk->tkr_mono.shift;
- }
-
- smp_wmb();
- ++vdso_data->tb_seq_count;
-}
-
-void update_vsyscall_tz(void)
-{
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+ return ret;
}
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b215c71..dd2514b 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -6,23 +6,48 @@
# Heavily based on the vDSO Makefiles for other archs.
#
-obj-vdso := gettimeofday.o note.o sigreturn.o
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_AARCH64_JUMP_SLOT|R_AARCH64_GLOB_DAT|R_AARCH64_ABS64
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso := vgettimeofday.o note.o sigreturn.o
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ --build-id -n -T
+
+ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+
+VDSO_LDFLAGS := -Bsymbolic
+
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+KBUILD_CFLAGS += $(DISABLE_LTO)
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+KCOV_INSTRUMENT := n
+
+CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+# Clang versions less than 8 do not support -mcmodel=tiny
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+ ifeq ($(shell test $(CONFIG_CLANG_VERSION) -lt 80000; echo $$?),0)
+ CFLAGS_REMOVE_vgettimeofday.o += -mcmodel=tiny
+ endif
+endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
-# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
-# down to collect2, resulting in silent corruption of the vDSO image.
-ccflags-y += -Wl,-shared
-
obj-y += vdso.o
extra-y += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
@@ -31,8 +56,8 @@
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
- $(call if_changed,vdsold)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -42,22 +67,14 @@
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
-# Assembly rules for the .S files
-$(obj-vdso): %.o: %.S FORCE
- $(call if_changed_dep,vdsoas)
-
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
-quiet_cmd_vdsoas = VDSOA $@
- cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
deleted file mode 100644
index c39872a..0000000
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Userspace implementations of gettimeofday() and friends.
- *
- * Copyright (C) 2012 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
-#define NSEC_PER_SEC_LO16 0xca00
-#define NSEC_PER_SEC_HI16 0x3b9a
-
-vdso_data .req x6
-seqcnt .req w7
-w_tmp .req w8
-x_tmp .req x8
-
-/*
- * Conventions for macro arguments:
- * - An argument is write-only if its name starts with "res".
- * - All other arguments are read-only, unless otherwise specified.
- */
-
- .macro seqcnt_acquire
-9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
- tbnz seqcnt, #0, 9999b
- dmb ishld
- .endm
-
- .macro seqcnt_check fail
- dmb ishld
- ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
- cmp w_tmp, seqcnt
- b.ne \fail
- .endm
-
- .macro syscall_check fail
- ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
- cbnz w_tmp, \fail
- .endm
-
- .macro get_nsec_per_sec res
- mov \res, #NSEC_PER_SEC_LO16
- movk \res, #NSEC_PER_SEC_HI16, lsl #16
- .endm
-
- /*
- * Returns the clock delta, in nanoseconds left-shifted by the clock
- * shift.
- */
- .macro get_clock_shifted_nsec res, cycle_last, mult
- /* Read the virtual counter. */
- isb
- mrs x_tmp, cntvct_el0
- /* Calculate cycle delta and convert to ns. */
- sub \res, x_tmp, \cycle_last
- /* We can only guarantee 56 bits of precision. */
- movn x_tmp, #0xff00, lsl #48
- and \res, x_tmp, \res
- mul \res, \res, \mult
- .endm
-
- /*
- * Returns in res_{sec,nsec} the REALTIME timespec, based on the
- * "wall time" (xtime) and the clock_mono delta.
- */
- .macro get_ts_realtime res_sec, res_nsec, \
- clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
- add \res_nsec, \clock_nsec, \xtime_nsec
- udiv x_tmp, \res_nsec, \nsec_to_sec
- add \res_sec, \xtime_sec, x_tmp
- msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
- .endm
-
- /*
- * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
- * used for CLOCK_MONOTONIC_RAW.
- */
- .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
- udiv \res_sec, \clock_nsec, \nsec_to_sec
- msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
- .endm
-
- /* sec and nsec are modified in place. */
- .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
- /* Add timespec. */
- add \sec, \sec, \ts_sec
- add \nsec, \nsec, \ts_nsec
-
- /* Normalise the new timespec. */
- cmp \nsec, \nsec_to_sec
- b.lt 9999f
- sub \nsec, \nsec, \nsec_to_sec
- add \sec, \sec, #1
-9999:
- cmp \nsec, #0
- b.ge 9998f
- add \nsec, \nsec, \nsec_to_sec
- sub \sec, \sec, #1
-9998:
- .endm
-
- .macro clock_gettime_return, shift=0
- .if \shift == 1
- lsr x11, x11, x12
- .endif
- stp x10, x11, [x1, #TSPEC_TV_SEC]
- mov x0, xzr
- ret
- .endm
-
- .macro jump_slot jumptable, index, label
- .if (. - \jumptable) != 4 * (\index)
- .error "Jump slot index mismatch"
- .endif
- b \label
- .endm
-
- .text
-
-/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__kernel_gettimeofday)
- .cfi_startproc
- adr vdso_data, _vdso_data
- /* If tv is NULL, skip to the timezone code. */
- cbz x0, 2f
-
- /* Compute the time of day. */
-1: seqcnt_acquire
- syscall_check fail=4f
- ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
- /* w11 = cs_mono_mult, w12 = cs_shift */
- ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
- ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=1b
-
- get_nsec_per_sec res=x9
- lsl x9, x9, x12
-
- get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
- get_ts_realtime res_sec=x10, res_nsec=x11, \
- clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
-
- /* Convert ns to us. */
- mov x13, #1000
- lsl x13, x13, x12
- udiv x11, x11, x13
- stp x10, x11, [x0, #TVAL_TV_SEC]
-2:
- /* If tz is NULL, return 0. */
- cbz x1, 3f
- ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
- stp w4, w5, [x1, #TZ_MINWEST]
-3:
- mov x0, xzr
- ret
-4:
- /* Syscall fallback. */
- mov x8, #__NR_gettimeofday
- svc #0
- ret
- .cfi_endproc
-ENDPROC(__kernel_gettimeofday)
-
-#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
-
-/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__kernel_clock_gettime)
- .cfi_startproc
- cmp w0, #JUMPSLOT_MAX
- b.hi syscall
- adr vdso_data, _vdso_data
- adr x_tmp, jumptable
- add x_tmp, x_tmp, w0, uxtw #2
- br x_tmp
-
- ALIGN
-jumptable:
- jump_slot jumptable, CLOCK_REALTIME, realtime
- jump_slot jumptable, CLOCK_MONOTONIC, monotonic
- b syscall
- b syscall
- jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
- jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
- jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
-
- .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
- .error "Wrong jumptable size"
- .endif
-
- ALIGN
-realtime:
- seqcnt_acquire
- syscall_check fail=syscall
- ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
- /* w11 = cs_mono_mult, w12 = cs_shift */
- ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
- ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=realtime
-
- /* All computations are done with left-shifted nsecs. */
- get_nsec_per_sec res=x9
- lsl x9, x9, x12
-
- get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
- get_ts_realtime res_sec=x10, res_nsec=x11, \
- clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
- clock_gettime_return, shift=1
-
- ALIGN
-monotonic:
- seqcnt_acquire
- syscall_check fail=syscall
- ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
- /* w11 = cs_mono_mult, w12 = cs_shift */
- ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
- ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
- seqcnt_check fail=monotonic
-
- /* All computations are done with left-shifted nsecs. */
- lsl x4, x4, x12
- get_nsec_per_sec res=x9
- lsl x9, x9, x12
-
- get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
- get_ts_realtime res_sec=x10, res_nsec=x11, \
- clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
-
- add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
- clock_gettime_return, shift=1
-
- ALIGN
-monotonic_raw:
- seqcnt_acquire
- syscall_check fail=syscall
- ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
- /* w11 = cs_raw_mult, w12 = cs_shift */
- ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
- ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
- seqcnt_check fail=monotonic_raw
-
- /* All computations are done with left-shifted nsecs. */
- get_nsec_per_sec res=x9
- lsl x9, x9, x12
-
- get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
- get_ts_clock_raw res_sec=x10, res_nsec=x11, \
- clock_nsec=x15, nsec_to_sec=x9
-
- add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
- clock_gettime_return, shift=1
-
- ALIGN
-realtime_coarse:
- seqcnt_acquire
- ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
- seqcnt_check fail=realtime_coarse
- clock_gettime_return
-
- ALIGN
-monotonic_coarse:
- seqcnt_acquire
- ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
- ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
- seqcnt_check fail=monotonic_coarse
-
- /* Computations are done in (non-shifted) nsecs. */
- get_nsec_per_sec res=x9
- add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
- clock_gettime_return
-
- ALIGN
-syscall: /* Syscall fallback. */
- mov x8, #__NR_clock_gettime
- svc #0
- ret
- .cfi_endproc
-ENDPROC(__kernel_clock_gettime)
-
-/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__kernel_clock_getres)
- .cfi_startproc
- cmp w0, #CLOCK_REALTIME
- ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
- ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
- b.ne 1f
-
- ldr x2, 5f
- b 2f
-1:
- cmp w0, #CLOCK_REALTIME_COARSE
- ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
- b.ne 4f
- ldr x2, 6f
-2:
- cbz x1, 3f
- stp xzr, x2, [x1]
-
-3: /* res == NULL. */
- mov w0, wzr
- ret
-
-4: /* Syscall fallback. */
- mov x8, #__NR_clock_getres
- svc #0
- ret
-5:
- .quad CLOCK_REALTIME_RES
-6:
- .quad CLOCK_COARSE_RES
- .cfi_endproc
-ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
index e20483b..0ce6ec7 100644
--- a/arch/arm64/kernel/vdso/note.S
+++ b/arch/arm64/kernel/vdso/note.S
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
index 20d98ef..0723aa3 100644
--- a/arch/arm64/kernel/vdso/sigreturn.S
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -1,21 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Sigreturn trampoline for returning from a signal when the SA_RESTORER
* flag is not set.
*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*/
diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S
index 82379a7..d1414fe 100644
--- a/arch/arm64/kernel/vdso/vdso.S
+++ b/arch/arm64/kernel/vdso/vdso.S
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
*/
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index beca249..7ad2d3a 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* GNU linker script for the VDSO library.
*
* Copyright (C) 2012 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author: Will Deacon <will.deacon@arm.com>
* Heavily based on the vDSO linker scripts for other archs.
*/
diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c
new file mode 100644
index 0000000..7476355
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2018 ARM Limited
+ *
+ */
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __kernel_clock_gettime(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __kernel_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __kernel_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore
new file mode 100644
index 0000000..4fea950
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/.gitignore
@@ -0,0 +1,2 @@
+vdso.lds
+vdso.so.raw
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
new file mode 100644
index 0000000..76b327f
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for vdso32
+#
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
+include $(srctree)/lib/vdso/Makefile
+
+# Same as cc-*option, but using CC_COMPAT instead of CC
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+CC_COMPAT ?= $(CC)
+else
+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+endif
+
+cc32-option = $(call try-run,\
+ $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-disable-warning = $(call try-run,\
+ $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+cc32-ldoption = $(call try-run,\
+ $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-as-instr = $(call try-run,\
+ printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+
+# We cannot use the global flags to compile the vDSO files, the main reason
+# being that the 32-bit compiler may be older than the main (64-bit) compiler
+# and therefore may not understand flags set using $(cc-option ...). Besides,
+# arch-specific options should be taken from the arm Makefile instead of the
+# arm64 one.
+# As a result we set our own flags here.
+
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
+VDSO_CPPFLAGS += $(LINUXINCLUDE)
+
+# Common C and assembly flags
+# From top-level Makefile
+VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),)
+VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+endif
+
+VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
+ifdef CONFIG_DEBUG_INFO
+VDSO_CAFLAGS += -g
+endif
+
+# From arm Makefile
+VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
+VDSO_CAFLAGS += -mabi=aapcs-linux -mfloat-abi=soft
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+VDSO_CAFLAGS += -mbig-endian
+else
+VDSO_CAFLAGS += -mlittle-endian
+endif
+
+# From arm vDSO Makefile
+VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
+VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
+
+
+# Try to compile for ARMv8. If the compiler is too old and doesn't support it,
+# fall back to v7. There is no easy way to check for what architecture the code
+# is being compiled, so define a macro specifying that (see arch/arm/Makefile).
+VDSO_CAFLAGS += $(call cc32-option,-march=armv8-a -D__LINUX_ARM_ARCH__=8,\
+ -march=armv7-a -D__LINUX_ARM_ARCH__=7)
+
+VDSO_CFLAGS := $(VDSO_CAFLAGS)
+VDSO_CFLAGS += -DENABLE_COMPAT_VDSO=1
+# KBUILD_CFLAGS from top-level Makefile
+VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+ -std=gnu89
+VDSO_CFLAGS += -O2
+# Some useful compiler-dependent flags from top-level Makefile
+VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
+VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+VDSO_CFLAGS += $(call cc32-option,-fno-strict-overflow)
+VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
+VDSO_CFLAGS += $(call cc32-option,-Werror=date-time)
+VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types)
+
+# The 32-bit compiler does not provide 128-bit integers, which are used in
+# some headers that are indirectly included from the vDSO code.
+# This hack makes the compiler happy and should trigger a warning/error if
+# variables of such type are referenced.
+VDSO_CFLAGS += -D__uint128_t='void*'
+# Silence some warnings coming from headers that operate on long's
+# (on GCC 4.8 or older, there is unfortunately no way to silence this warning)
+VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
+VDSO_CFLAGS += -Wno-int-to-pointer-cast
+
+VDSO_AFLAGS := $(VDSO_CAFLAGS)
+VDSO_AFLAGS += -D__ASSEMBLY__
+
+# Check for binutils support for dmb ishld
+dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
+
+VDSO_CFLAGS += $(dmbinstr)
+VDSO_AFLAGS += $(dmbinstr)
+
+VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
+# From arm vDSO Makefile
+VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
+VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft
+VDSO_LDFLAGS += -Wl,--hash-style=sysv
+VDSO_LDFLAGS += -Wl,--build-id
+VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
+
+
+# Borrow vdsomunge.c from the arm vDSO
+# We have to use a relative path because scripts/Makefile.host prefixes
+# $(hostprogs-y) with $(obj)
+munge := ../../../arm/vdso/vdsomunge
+hostprogs-y := $(munge)
+
+c-obj-vdso := note.o
+c-obj-vdso-gettimeofday := vgettimeofday.o
+asm-obj-vdso := sigreturn.o
+
+ifneq ($(c-gettimeofday-y),)
+VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
+endif
+
+VDSO_CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
+# Build rules
+targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw
+c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso))
+c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday))
+asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso))
+obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
+
+obj-y += vdso.o
+extra-y += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (vdso.s includes vdso.so through incbin)
+$(obj)/vdso.o: $(obj)/vdso.so
+
+include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+# Strip rule for vdso.so
+$(obj)/vdso.so: OBJCOPYFLAGS := -S
+$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
+ $(call if_changed,vdsomunge)
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+# Compilation rules for the vDSO sources
+$(c-obj-vdso): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc)
+$(c-obj-vdso-gettimeofday): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc_gettimeofday)
+$(asm-obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+# Actual build commands
+quiet_cmd_vdsold_and_vdso_check = LD32 $@
+ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
+
+quiet_cmd_vdsold = LD32 $@
+ cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
+ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+quiet_cmd_vdsocc = CC32 $@
+ cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+quiet_cmd_vdsocc_gettimeofday = CC32 $@
+ cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
+quiet_cmd_vdsoas = AS32 $@
+ cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+
+quiet_cmd_vdsomunge = MUNGE $@
+ cmd_vdsomunge = $(obj)/$(munge) $< $@
+
+# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
+gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+# The AArch64 nm should be able to read an AArch32 binary
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso32/note.c b/arch/arm64/kernel/vdso32/note.c
new file mode 100644
index 0000000..eff5bf9
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/note.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2018 ARM Limited
+ *
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+#include <linux/build-salt.h>
+
+ELFNOTE32("Linux", 0, LINUX_VERSION_CODE);
+BUILD_SALT;
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
new file mode 100644
index 0000000..1a81277
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/sigreturn.S
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file provides both A32 and T32 versions, in accordance with the
+ * arm sigreturn code.
+ *
+ * Copyright (C) 2018 ARM Limited
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+#define ARM_ENTRY(name) \
+ ENTRY(name)
+
+#define ARM_ENDPROC(name) \
+ .type name, %function; \
+ END(name)
+
+ .text
+
+ .arm
+ .fnstart
+ .save {r0-r15}
+ .pad #COMPAT_SIGFRAME_REGS_OFFSET
+ nop
+ARM_ENTRY(__kernel_sigreturn_arm)
+ mov r7, #__NR_compat_sigreturn
+ svc #0
+ .fnend
+ARM_ENDPROC(__kernel_sigreturn_arm)
+
+ .fnstart
+ .save {r0-r15}
+ .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
+ nop
+ARM_ENTRY(__kernel_rt_sigreturn_arm)
+ mov r7, #__NR_compat_rt_sigreturn
+ svc #0
+ .fnend
+ARM_ENDPROC(__kernel_rt_sigreturn_arm)
+
+ .thumb
+ .fnstart
+ .save {r0-r15}
+ .pad #COMPAT_SIGFRAME_REGS_OFFSET
+ nop
+ARM_ENTRY(__kernel_sigreturn_thumb)
+ mov r7, #__NR_compat_sigreturn
+ svc #0
+ .fnend
+ARM_ENDPROC(__kernel_sigreturn_thumb)
+
+ .fnstart
+ .save {r0-r15}
+ .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
+ nop
+ARM_ENTRY(__kernel_rt_sigreturn_thumb)
+ mov r7, #__NR_compat_rt_sigreturn
+ svc #0
+ .fnend
+ARM_ENDPROC(__kernel_rt_sigreturn_thumb)
diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32/vdso.S
new file mode 100644
index 0000000..e72ac7b
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+ .globl vdso32_start, vdso32_end
+ .section .rodata
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/arm64/kernel/vdso32/vdso.so"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
new file mode 100644
index 0000000..a394492
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Adapted from arm64 version.
+ *
+ * GNU linker script for the VDSO library.
+ * Heavily based on the vDSO linker scripts for other archs.
+ *
+ * Copyright (C) 2012-2018 ARM Limited
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+SECTIONS
+{
+ PROVIDE_HIDDEN(_vdso_data = . - PAGE_SIZE);
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ .text : { *(.text*) } :text =0xe7f001f2
+
+ .got : { *(.got) }
+ .rel.plt : { *(.rel.plt) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+}
+
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ __kernel_sigreturn_arm;
+ __kernel_sigreturn_thumb;
+ __kernel_rt_sigreturn_arm;
+ __kernel_rt_sigreturn_thumb;
+ __vdso_clock_gettime64;
+ local: *;
+ };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm;
+VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb;
+VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm;
+VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
new file mode 100644
index 0000000..54fc1c2
--- /dev/null
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 compat userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2018 ARM Limited
+ *
+ */
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ /* The checks below are required for ABI consistency with arm */
+ if ((u32)ts >= TASK_SIZE_32)
+ return -EFAULT;
+
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ /* The checks below are required for ABI consistency with arm */
+ if ((u32)ts >= TASK_SIZE_32)
+ return -EFAULT;
+
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ /* The checks below are required for ABI consistency with arm */
+ if ((u32)res >= TASK_SIZE_32)
+ return -EFAULT;
+
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
+
+/* Avoid unresolved references emitted by GCC */
+
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b6..aa76f72 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -99,7 +99,8 @@
*(.discard)
*(.discard.*)
*(.interp .dynamic)
- *(.dynsym .dynstr .hash)
+ *(.dynsym .dynstr .hash .gnu.hash)
+ *(.eh_frame)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -138,6 +139,23 @@
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ reserved_ttbr0 = .;
+ . += RESERVED_TTBR0_SIZE;
+#endif
+ swapper_pg_dir = .;
+ . += PAGE_SIZE;
+ swapper_pg_end = .;
+
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -166,7 +184,6 @@
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
*(.init.rodata.* .init.bss) /* from the EFI stub */
}
@@ -176,12 +193,21 @@
PERCPU_SECTION(L1_CACHE_BYTES)
- .rela : ALIGN(8) {
+ .rela.dyn : ALIGN(8) {
*(.rela .rela*)
}
- __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
- __rela_size = SIZEOF(.rela);
+ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
+ __rela_size = SIZEOF(.rela.dyn);
+
+#ifdef CONFIG_RELR
+ .relr.dyn : ALIGN(8) {
+ *(.relr.dyn)
+ }
+
+ __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
+ __relr_size = SIZEOF(.relr.dyn);
+#endif
. = ALIGN(SEGMENT_ALIGN);
__initdata_end = .;
@@ -216,21 +242,9 @@
BSS_SECTION(0, 0, 0)
. = ALIGN(PAGE_SIZE);
- idmap_pg_dir = .;
- . += IDMAP_DIR_SIZE;
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- tramp_pg_dir = .;
- . += PAGE_SIZE;
-#endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- reserved_ttbr0 = .;
- . += RESERVED_TTBR0_SIZE;
-#endif
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
- swapper_pg_end = .;
+ init_pg_dir = .;
+ . += INIT_DIR_SIZE;
+ init_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
@@ -240,6 +254,8 @@
HEAD_SYMBOLS
}
+#include "image-vars.h"
+
/*
* The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.