Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore
index c5f676c..bbb90f9 100644
--- a/arch/arc/kernel/.gitignore
+++ b/arch/arc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index de62511..8c4fc4b 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -3,9 +3,6 @@
# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
#
-# Pass UTS_MACHINE for user_regset definition
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
obj-y := arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o
@@ -20,9 +17,12 @@
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+ifdef CONFIG_ISA_ARCOMPACT
CFLAGS_fpu.o += -mdpfp
+endif
ifdef CONFIG_ARC_DW2_UNWIND
CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index 1f621e4..0e88403 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <asm/hardirq.h>
#include <asm/page.h>
+
int main(void)
{
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
@@ -66,7 +67,18 @@
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
- DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
+#ifdef CONFIG_ISA_ARCV2
+ OFFSET(PT_r12, pt_regs, r12);
+ OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ OFFSET(PT_r58, pt_regs, r58);
+ OFFSET(PT_r59, pt_regs, r59);
+#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
+#endif
return 0;
}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index e172c33..1a76f2d 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -14,9 +14,6 @@
#include <asm/asm-offsets.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
-#ifdef CONFIG_ARC_PLAT_EZNPS
-#include <plat/ctop.h>
-#endif
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
@@ -68,16 +65,9 @@
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
-#ifdef CONFIG_ARC_PLAT_EZNPS
- "lr r24, [%4] \n\t"
-#ifndef CONFIG_EZNPS_MTM_EXT
- "lsr r24, r24, 4 \n\t"
-#endif
-#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
-#endif
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
@@ -115,9 +105,6 @@
: "=r"(tmp)
: "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
-#ifdef CONFIG_ARC_PLAT_EZNPS
- , "i"(CTOP_AUX_LOGIC_GLOBAL_ID)
-#endif
: "blink"
);
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index fa86d13..721d465 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -29,8 +29,6 @@
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp") ||
of_flat_dt_is_compatible(dt_root, "snps,hsdk"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x & HSDK) */
- else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
- arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index d04837d..03f8b1b 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -339,7 +339,7 @@
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
- /* intentional fall-through */
+ fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index cef1d3f..ae656bf 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -343,11 +343,11 @@
resume_kernel_mode:
; Disable Interrupts from this point on
- ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
- ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
+ ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
+ ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
IRQ_DISABLE r9
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
index 07e22b5..ec64021 100644
--- a/arch/arc/kernel/fpu.c
+++ b/arch/arc/kernel/fpu.c
@@ -6,7 +6,9 @@
*/
#include <linux/sched.h>
-#include <asm/switch_to.h>
+#include <asm/fpu.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
/*
* To save/restore FPU regs, simplest scheme would use LR/SR insns.
@@ -50,3 +52,31 @@
: "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
);
}
+
+#else
+
+void fpu_init_task(struct pt_regs *regs)
+{
+ const unsigned int fwe = 0x80000000;
+
+ /* default rounding mode */
+ write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
+
+ /* Initialize to zero: setting requires FWE be set */
+ write_aux_reg(ARC_REG_FPU_STATUS, fwe);
+}
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ struct arc_fpu *save = &prev->thread.fpu;
+ struct arc_fpu *restore = &next->thread.fpu;
+ const unsigned int fwe = 0x80000000;
+
+ save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
+ save->status = read_aux_reg(ARC_REG_FPU_STATUS);
+
+ write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
+ write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
+}
+
+#endif
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 6f41265..9152782 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -14,6 +14,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/dsp-impl.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -58,7 +59,33 @@
bclr r5, r5, STATUS_AD_BIT
#endif
kflag r5
-#endif
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+ lr r5, [ARC_REG_LPB_BUILD]
+ breq r5, 0, 1f ; LPB doesn't exist
+ mov r5, 1
+ sr r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
+
+ /* On HSDK, CCMs need to remapped super early */
+#ifdef CONFIG_ARC_SOC_HSDK
+ mov r6, 0x60000000
+ lr r5, [ARC_REG_ICCM_BUILD]
+ breq r5, 0, 1f
+ sr r6, [ARC_REG_AUX_ICCM]
+1:
+ lr r5, [ARC_REG_DCCM_BUILD]
+ breq r5, 0, 2f
+ sr r6, [ARC_REG_AUX_DCCM]
+2:
+#endif /* CONFIG_ARC_SOC_HSDK */
+
+#endif /* CONFIG_ISA_ARCV2 */
+
+ ; Config DSP_CTRL properly, so kernel may use integer multiply,
+ ; multiply-accumulate, and divide operations
+ DSP_EARLY_INIT
.endm
.section .init.text, "ax",@progbits
diff --git a/arch/arc/kernel/jump_label.c b/arch/arc/kernel/jump_label.c
new file mode 100644
index 0000000..b8600dc
--- /dev/null
+++ b/arch/arc/kernel/jump_label.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+
+#include "asm/cacheflush.h"
+
+#define JUMPLABEL_ERR "ARC: jump_label: ERROR: "
+
+/* Halt system on fatal error to make debug easier */
+#define arc_jl_fatal(format...) \
+({ \
+ pr_err(JUMPLABEL_ERR format); \
+ BUG(); \
+})
+
+static inline u32 arc_gen_nop(void)
+{
+ /* 1x 32bit NOP in middle endian */
+ return 0x7000264a;
+}
+
+/*
+ * Atomic update of patched instruction is only available if this
+ * instruction doesn't cross L1 cache line boundary. You can read about
+ * the way we achieve this in arc/include/asm/jump_label.h
+ */
+static inline void instruction_align_assert(void *addr, int len)
+{
+ unsigned long a = (unsigned long)addr;
+
+ if ((a >> L1_CACHE_SHIFT) != ((a + len - 1) >> L1_CACHE_SHIFT))
+ arc_jl_fatal("instruction (addr %px) cross L1 cache line border",
+ addr);
+}
+
+/*
+ * ARCv2 'Branch unconditionally' instruction:
+ * 00000ssssssssss1SSSSSSSSSSNRtttt
+ * s S[n:0] lower bits signed immediate (number is bitfield size)
+ * S S[m:n+1] upper bits signed immediate (number is bitfield size)
+ * t S[24:21] upper bits signed immediate (branch unconditionally far)
+ * N N <.d> delay slot mode
+ * R R Reserved
+ */
+static inline u32 arc_gen_branch(jump_label_t pc, jump_label_t target)
+{
+ u32 instruction_l, instruction_r;
+ u32 pcl = pc & GENMASK(31, 2);
+ u32 u_offset = target - pcl;
+ u32 s, S, t;
+
+ /*
+ * Offset in 32-bit branch instruction must to fit into s25.
+ * Something is terribly broken if we get such huge offset within one
+ * function.
+ */
+ if ((s32)u_offset < -16777216 || (s32)u_offset > 16777214)
+ arc_jl_fatal("gen branch with offset (%d) not fit in s25",
+ (s32)u_offset);
+
+ /*
+ * All instructions are aligned by 2 bytes so we should never get offset
+ * here which is not 2 bytes aligned.
+ */
+ if (u_offset & 0x1)
+ arc_jl_fatal("gen branch with offset (%d) unaligned to 2 bytes",
+ (s32)u_offset);
+
+ s = (u_offset >> 1) & GENMASK(9, 0);
+ S = (u_offset >> 11) & GENMASK(9, 0);
+ t = (u_offset >> 21) & GENMASK(3, 0);
+
+ /* 00000ssssssssss1 */
+ instruction_l = (s << 1) | 0x1;
+ /* SSSSSSSSSSNRtttt */
+ instruction_r = (S << 6) | t;
+
+ return (instruction_r << 16) | (instruction_l & GENMASK(15, 0));
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ jump_label_t *instr_addr = (jump_label_t *)entry->code;
+ u32 instr;
+
+ instruction_align_assert(instr_addr, JUMP_LABEL_NOP_SIZE);
+
+ if (type == JUMP_LABEL_JMP)
+ instr = arc_gen_branch(entry->code, entry->target);
+ else
+ instr = arc_gen_nop();
+
+ WRITE_ONCE(*instr_addr, instr);
+ flush_icache_range(entry->code, entry->code + JUMP_LABEL_NOP_SIZE);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use only one NOP type (1x, 4 byte) in arch_static_branch, so
+ * there's no need to patch an identical NOP over the top of it here.
+ * The generic code calls 'arch_jump_label_transform' if the NOP needs
+ * to be replaced by a branch, so 'arch_jump_label_transform_static' is
+ * never called with type other than JUMP_LABEL_NOP.
+ */
+ BUG_ON(type != JUMP_LABEL_NOP);
+}
+
+#ifdef CONFIG_ARC_DBG_JUMP_LABEL
+#define SELFTEST_MSG "ARC: instruction generation self-test: "
+
+struct arc_gen_branch_testdata {
+ jump_label_t pc;
+ jump_label_t target_address;
+ u32 expected_instr;
+};
+
+static __init int branch_gen_test(const struct arc_gen_branch_testdata *test)
+{
+ u32 instr_got;
+
+ instr_got = arc_gen_branch(test->pc, test->target_address);
+ if (instr_got == test->expected_instr)
+ return 0;
+
+ pr_err(SELFTEST_MSG "FAIL:\n arc_gen_branch(0x%08x, 0x%08x) != 0x%08x, got 0x%08x\n",
+ test->pc, test->target_address,
+ test->expected_instr, instr_got);
+
+ return -EFAULT;
+}
+
+/*
+ * Offset field in branch instruction is not continuous. Test all
+ * available offset field and sign combinations. Test data is generated
+ * from real working code.
+ */
+static const struct arc_gen_branch_testdata arcgenbr_test_data[] __initconst = {
+ {0x90007548, 0x90007514, 0xffcf07cd}, /* tiny (-52) offs */
+ {0x9000c9c0, 0x9000c782, 0xffcf05c3}, /* tiny (-574) offs */
+ {0x9000cc1c, 0x9000c782, 0xffcf0367}, /* tiny (-1178) offs */
+ {0x9009dce0, 0x9009d106, 0xff8f0427}, /* small (-3034) offs */
+ {0x9000f5de, 0x90007d30, 0xfc0f0755}, /* big (-30892) offs */
+ {0x900a2444, 0x90035f64, 0xc9cf0321}, /* huge (-443616) offs */
+ {0x90007514, 0x9000752c, 0x00000019}, /* tiny (+24) offs */
+ {0x9001a578, 0x9001a77a, 0x00000203}, /* tiny (+514) offs */
+ {0x90031ed8, 0x90032634, 0x0000075d}, /* tiny (+1884) offs */
+ {0x9008c7f2, 0x9008d3f0, 0x00400401}, /* small (+3072) offs */
+ {0x9000bb38, 0x9003b340, 0x17c00009}, /* big (+194568) offs */
+ {0x90008f44, 0x90578d80, 0xb7c2063d} /* huge (+5701180) offs */
+};
+
+static __init int instr_gen_test(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arcgenbr_test_data); i++)
+ if (branch_gen_test(&arcgenbr_test_data[i]))
+ return -EFAULT;
+
+ pr_info(SELFTEST_MSG "OK\n");
+
+ return 0;
+}
+early_initcall(instr_gen_test);
+
+#endif /* CONFIG_ARC_DBG_JUMP_LABEL */
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 7d3efe8..cabef45 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -388,6 +388,7 @@
{
ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->blink = (unsigned long)&kretprobe_trampoline;
@@ -396,58 +397,7 @@
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address) {
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- regs->ret = orig_ret_address;
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
/* By returning a non zero value, we are telling the kprobe handler
* that we don't want the post_handler to run
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index bfd4cbe..37f724a 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -20,6 +20,8 @@
#include <linux/elf.h>
#include <linux/tick.h>
+#include <asm/fpu.h>
+
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
{
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
@@ -88,10 +90,10 @@
if (unlikely(ret != -EFAULT))
goto fail;
- down_read(¤t->mm->mmap_sem);
- ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+ mmap_read_lock(current->mm);
+ ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (likely(!ret))
goto again;
@@ -114,17 +116,6 @@
:"I"(arg)); /* can't be "r" has to be embedded const */
}
-#elif defined(CONFIG_EZNPS_MTM_EXT) /* ARC700 variant in NPS */
-
-void arch_cpu_idle(void)
-{
- /* only the calling HW thread needs to sleep */
- __asm__ __volatile__(
- ".word %0 \n"
- :
- :"i"(CTOP_INST_HWSCHD_WFT_IE12));
-}
-
#else /* ARC700 */
void arch_cpu_idle(void)
@@ -171,8 +162,9 @@
* | user_r25 |
* ------------------ <===== END of PAGE
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p,
+ unsigned long tls)
{
struct pt_regs *c_regs; /* child's pt_regs */
unsigned long *childksp; /* to unwind out of __switch_to() */
@@ -263,7 +255,7 @@
/*
* Do necessary setup to start up a new user task
*/
-void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
{
regs->sp = usp;
regs->ret = pc;
@@ -275,9 +267,7 @@
*/
regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
-#ifdef CONFIG_EZNPS_MTM_EXT
- regs->eflags = 0;
-#endif
+ fpu_init_task(regs);
/* bogus seed values for debugging */
regs->lp_start = 0x10;
@@ -291,11 +281,6 @@
{
}
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
- return 0;
-}
-
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index d5f3fcf..8833919 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -18,88 +18,61 @@
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
const struct pt_regs *ptregs = task_pt_regs(target);
const struct callee_regs *cregs = task_callee_regs(target);
- int ret = 0;
unsigned int stop_pc_val;
-#define REG_O_CHUNK(START, END, PTR) \
- if (!ret) \
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
- offsetof(struct user_regs_struct, START), \
- offsetof(struct user_regs_struct, END));
+ membuf_zero(&to, 4); // pad
+ membuf_store(&to, ptregs->bta);
+ membuf_store(&to, ptregs->lp_start);
+ membuf_store(&to, ptregs->lp_end);
+ membuf_store(&to, ptregs->lp_count);
+ membuf_store(&to, ptregs->status32);
+ membuf_store(&to, ptregs->ret);
+ membuf_store(&to, ptregs->blink);
+ membuf_store(&to, ptregs->fp);
+ membuf_store(&to, ptregs->r26); // gp
+ membuf_store(&to, ptregs->r12);
+ membuf_store(&to, ptregs->r11);
+ membuf_store(&to, ptregs->r10);
+ membuf_store(&to, ptregs->r9);
+ membuf_store(&to, ptregs->r8);
+ membuf_store(&to, ptregs->r7);
+ membuf_store(&to, ptregs->r6);
+ membuf_store(&to, ptregs->r5);
+ membuf_store(&to, ptregs->r4);
+ membuf_store(&to, ptregs->r3);
+ membuf_store(&to, ptregs->r2);
+ membuf_store(&to, ptregs->r1);
+ membuf_store(&to, ptregs->r0);
+ membuf_store(&to, ptregs->sp);
+ membuf_zero(&to, 4); // pad2
+ membuf_store(&to, cregs->r25);
+ membuf_store(&to, cregs->r24);
+ membuf_store(&to, cregs->r23);
+ membuf_store(&to, cregs->r22);
+ membuf_store(&to, cregs->r21);
+ membuf_store(&to, cregs->r20);
+ membuf_store(&to, cregs->r19);
+ membuf_store(&to, cregs->r18);
+ membuf_store(&to, cregs->r17);
+ membuf_store(&to, cregs->r16);
+ membuf_store(&to, cregs->r15);
+ membuf_store(&to, cregs->r14);
+ membuf_store(&to, cregs->r13);
+ membuf_store(&to, target->thread.fault_address); // efa
-#define REG_O_ONE(LOC, PTR) \
- if (!ret) \
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
- offsetof(struct user_regs_struct, LOC), \
- offsetof(struct user_regs_struct, LOC) + 4);
-
-#define REG_O_ZERO(LOC) \
- if (!ret) \
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \
- offsetof(struct user_regs_struct, LOC), \
- offsetof(struct user_regs_struct, LOC) + 4);
-
- REG_O_ZERO(pad);
- REG_O_ONE(scratch.bta, &ptregs->bta);
- REG_O_ONE(scratch.lp_start, &ptregs->lp_start);
- REG_O_ONE(scratch.lp_end, &ptregs->lp_end);
- REG_O_ONE(scratch.lp_count, &ptregs->lp_count);
- REG_O_ONE(scratch.status32, &ptregs->status32);
- REG_O_ONE(scratch.ret, &ptregs->ret);
- REG_O_ONE(scratch.blink, &ptregs->blink);
- REG_O_ONE(scratch.fp, &ptregs->fp);
- REG_O_ONE(scratch.gp, &ptregs->r26);
- REG_O_ONE(scratch.r12, &ptregs->r12);
- REG_O_ONE(scratch.r11, &ptregs->r11);
- REG_O_ONE(scratch.r10, &ptregs->r10);
- REG_O_ONE(scratch.r9, &ptregs->r9);
- REG_O_ONE(scratch.r8, &ptregs->r8);
- REG_O_ONE(scratch.r7, &ptregs->r7);
- REG_O_ONE(scratch.r6, &ptregs->r6);
- REG_O_ONE(scratch.r5, &ptregs->r5);
- REG_O_ONE(scratch.r4, &ptregs->r4);
- REG_O_ONE(scratch.r3, &ptregs->r3);
- REG_O_ONE(scratch.r2, &ptregs->r2);
- REG_O_ONE(scratch.r1, &ptregs->r1);
- REG_O_ONE(scratch.r0, &ptregs->r0);
- REG_O_ONE(scratch.sp, &ptregs->sp);
-
- REG_O_ZERO(pad2);
-
- REG_O_ONE(callee.r25, &cregs->r25);
- REG_O_ONE(callee.r24, &cregs->r24);
- REG_O_ONE(callee.r23, &cregs->r23);
- REG_O_ONE(callee.r22, &cregs->r22);
- REG_O_ONE(callee.r21, &cregs->r21);
- REG_O_ONE(callee.r20, &cregs->r20);
- REG_O_ONE(callee.r19, &cregs->r19);
- REG_O_ONE(callee.r18, &cregs->r18);
- REG_O_ONE(callee.r17, &cregs->r17);
- REG_O_ONE(callee.r16, &cregs->r16);
- REG_O_ONE(callee.r15, &cregs->r15);
- REG_O_ONE(callee.r14, &cregs->r14);
- REG_O_ONE(callee.r13, &cregs->r13);
-
- REG_O_ONE(efa, &target->thread.fault_address);
-
- if (!ret) {
- if (in_brkpt_trap(ptregs)) {
- stop_pc_val = target->thread.fault_address;
- pr_debug("\t\tstop_pc (brk-pt)\n");
- } else {
- stop_pc_val = ptregs->ret;
- pr_debug("\t\tstop_pc (others)\n");
- }
-
- REG_O_ONE(stop_pc, &stop_pc_val);
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
}
- return ret;
+ return membuf_store(&to, stop_pc_val); // stop_pc
}
static int genregs_set(struct task_struct *target,
@@ -184,25 +157,20 @@
#ifdef CONFIG_ISA_ARCV2
static int arcv2regs_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
- int ret, copy_sz;
if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
- copy_sz = sizeof(struct user_regs_arcv2);
- else
- copy_sz = 4; /* r30 only */
+ /*
+ * itemized copy not needed like above as layout of regs (r30,r58,r59)
+ * is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
+ */
+ return membuf_write(&to, ®s->r30, sizeof(struct user_regs_arcv2));
- /*
- * itemized copy not needed like above as layout of regs (r30,r58,r59)
- * is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
- */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ®s->r30,
- 0, copy_sz);
- return ret;
+ membuf_write(&to, ®s->r30, 4); /* r30 only */
+ return membuf_zero(&to, sizeof(struct user_regs_arcv2) - 4);
}
static int arcv2regs_set(struct task_struct *target,
@@ -237,7 +205,7 @@
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
- .get = genregs_get,
+ .regset_get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_ISA_ARCV2
@@ -246,14 +214,14 @@
.n = ELF_ARCV2REG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
- .get = arcv2regs_get,
+ .regset_get = arcv2regs_get,
.set = arcv2regs_set,
},
#endif
};
static const struct user_regset_view user_arc_view = {
- .name = UTS_MACHINE,
+ .name = "arc",
.e_machine = EM_ARC_INUSE,
.regsets = arc_regsets,
.n = ARRAY_SIZE(arc_regsets)
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 23dc002..41f07b3 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -8,18 +8,19 @@
#include <linux/delay.h>
#include <linux/root_dev.h>
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/cpu.h>
+#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
#include <linux/cache.h>
#include <uapi/linux/mount.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
+#include <asm/asserts.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -27,6 +28,7 @@
#include <asm/unwind.h>
#include <asm/mach_desc.h>
#include <asm/smp.h>
+#include <asm/dsp-impl.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
@@ -56,10 +58,12 @@
{ 0x00, NULL }
};
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
/* UARCH.MAJOR, Release */
{ 0, "R3.10a"},
{ 1, "R3.50a"},
+ { 2, "R3.60a"},
+ { 3, "R4.00a"},
{ 0xFF, NULL }
};
@@ -115,12 +119,6 @@
struct bcr_uarch_build_arcv2 uarch;
const struct id_to_str *tbl;
- /*
- * Up until (including) the first core4 release (0x54) things were
- * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
- * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
- */
-
if (cpu->core.family < 0x54) { /* includes arc700 */
for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
@@ -141,11 +139,10 @@
}
/*
- * However the subsequent HS release (same 0x54) allow HS38 or HS48
- * configurations and encode this info in a different BCR.
- * The BCR was introduced in 0x54 so can't be read unconditionally.
+ * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+ * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+ * releases only update it.
*/
-
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
if (uarch.prod == 4) {
@@ -156,7 +153,7 @@
cpu->name = "HS38";
}
- for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+ for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
cpu->release = tbl->str;
break;
@@ -390,11 +387,24 @@
return buf;
}
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (hw_exists && !opt_ena)
+ pr_warn(" ! Enable %s for working apps\n", opt_name);
+ else if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
static void arc_chk_core_config(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
- int saved = 0, present = 0;
- char *opt_nm = NULL;
+ int present = 0;
if (!cpu->extn.timer0)
panic("Timer0 is not present!\n");
@@ -426,23 +436,16 @@
*/
if (is_isa_arcompact()) {
- opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE";
- saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
-
/* only DPDP checked since SP has no arch visible regs */
present = cpu->extn.fpu_dp;
+ CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
} else {
- opt_nm = "CONFIG_ARC_HAS_ACCL_REGS";
- saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS);
-
/* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
- }
+ CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
- if (present && !saved)
- pr_warn("Enable %s for working apps\n", opt_nm);
- else if (!present && saved)
- panic("Disable %s, hardware NOT present\n", opt_nm);
+ dsp_config_check();
+ }
}
/*
@@ -573,10 +576,6 @@
*/
root_mountflags &= ~MS_RDONLY;
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
-
arc_unwind_init();
}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 8877de0..9d5996e 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -364,7 +364,7 @@
regs->r0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
/*
@@ -437,6 +437,6 @@
* ASM glue gaurantees that this is only called when returning to
* user mode
*/
- if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ if (test_thread_flag(TIF_NOTIFY_RESUME))
tracehook_notify_resume(regs);
}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index eca35e0..db0e104 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -189,7 +189,6 @@
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
@@ -226,7 +225,7 @@
}
if (!cpu_online(cpu)) {
- pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+ pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
return -1;
}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index fc3054c..f73da20 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -42,11 +42,23 @@
seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
struct unwind_frame_info *frame_info)
{
- /*
- * synchronous unwinding (e.g. dump_stack)
- * - uses current values of SP and friends
- */
- if (regs == NULL && (tsk == NULL || tsk == current)) {
+ if (regs) {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ } else if (tsk == NULL || tsk == current) {
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -63,7 +75,7 @@
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
- } else if (regs == NULL) {
+ } else {
/*
* Asynchronous unwinding of a likely sleeping task
* - first ensure it is actually sleeping
@@ -94,20 +106,7 @@
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
- } else {
- /*
- * Asynchronous unwinding of intr/exception
- * - Just uses the pt_regs passed
- */
- frame_info->task = tsk;
-
- frame_info->regs.r27 = regs->fp;
- frame_info->regs.r28 = regs->sp;
- frame_info->regs.r31 = regs->blink;
- frame_info->regs.r63 = regs->ret;
- frame_info->call_frame = 0;
}
-
return 0;
}
@@ -170,9 +169,11 @@
/* Call-back which plugs into unwinding core to dump the stack in
* case of panic/OOPs/BUG etc
*/
-static int __print_sym(unsigned int address, void *unused)
+static int __print_sym(unsigned int address, void *arg)
{
- printk(" %pS\n", (void *)address);
+ const char *loglvl = arg;
+
+ printk("%s %pS\n", loglvl, (void *)address);
return 0;
}
@@ -229,17 +230,18 @@
*-------------------------------------------------------------------------
*/
-noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl)
{
- pr_info("\nStack Trace:\n");
- arc_unwind_core(tsk, regs, __print_sym, NULL);
+ printk("%s\nStack Trace:\n", loglvl);
+ arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
}
EXPORT_SYMBOL(show_stacktrace);
/* Expected by sched Code */
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- show_stacktrace(tsk, NULL);
+ show_stacktrace(tsk, NULL, loglvl);
}
/* Another API expected by schedular, shows up in "ps" as Wait Channel
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index b79886a..a331bb5 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,44 +18,37 @@
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
-
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
@@ -89,7 +82,7 @@
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
- down_read(&active_mm->mmap_sem);
+ mmap_read_lock(active_mm);
vma = find_vma(active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
@@ -104,15 +97,14 @@
if (IS_ERR(nm))
nm = "?";
}
- pr_info(" @off 0x%lx in [%s]\n"
- " VMA: 0x%08lx to 0x%08lx\n",
+ pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n",
vma->vm_start < TASK_UNMAPPED_BASE ?
address : address - vma->vm_start,
nm, vma->vm_start, vma->vm_end);
} else
pr_info(" @No matching VMA found\n");
- up_read(&active_mm->mmap_sem);
+ mmap_read_unlock(active_mm);
}
static void show_ecr_verbose(struct pt_regs *regs)
@@ -120,8 +112,6 @@
unsigned int vec, cause_code;
unsigned long address;
- pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
-
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
@@ -130,10 +120,10 @@
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
- pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
+ pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"),
- address, regs->ret);
+ address, (void *)regs->ret);
} else if (vec == ECR_V_ITLB_MISS) {
pr_cont("Insn could not be fetched\n");
} else if (vec == ECR_V_MACH_CHK) {
@@ -178,7 +168,7 @@
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
@@ -191,43 +181,31 @@
show_ecr_verbose(regs);
- pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
- current->thread.fault_address,
- (void *)regs->blink, (void *)regs->ret);
-
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
- pr_info("[STAT32]: 0x%08lx", regs->status32);
+ pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\nSTAT: 0x%08lx",
+ regs->event, current->thread.fault_address, regs->ret,
+ regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
#ifdef CONFIG_ISA_ARCOMPACT
- pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
+ pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE),
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" : %2s%2s%2s%2s\n",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
- regs->bta, regs->sp, regs->fp);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
-
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
@@ -245,5 +223,5 @@
/* Show stack trace if this Fatality happened in kernel mode */
if (!user_mode(regs))
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index dc05a63..74ad425 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -42,10 +42,10 @@
#define EXTRA_INFO(f) { \
BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
- % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ % sizeof_field(struct unwind_frame_info, f)) \
+ offsetof(struct unwind_frame_info, f) \
- / FIELD_SIZEOF(struct unwind_frame_info, f), \
- FIELD_SIZEOF(struct unwind_frame_info, f) \
+ / sizeof_field(struct unwind_frame_info, f), \
+ sizeof_field(struct unwind_frame_info, f) \
}
#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
@@ -572,7 +572,7 @@
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
- /* Fall through */
+ fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
@@ -827,7 +827,7 @@
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -835,7 +835,7 @@
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
@@ -1178,11 +1178,9 @@
#endif
/* update frame */
-#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
if (frame->call_frame
&& !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
frame->call_frame = 0;
-#endif
cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 0391b82..f67e4ad 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -97,13 +97,13 @@
_etext = .;
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
/*
* 1. this is .data essentially
* 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
*/
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
@@ -120,12 +120,11 @@
/DISCARD/ : { *(.eh_frame) }
#endif
- NOTES
-
. = ALIGN(PAGE_SIZE);
_end = . ;
STABS_DEBUG
+ ELF_DETAILS
DISCARDS
.arcextmap 0 : {