Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
index c5f676c..bbb90f9 100644
--- a/arch/xtensa/kernel/.gitignore
+++ b/arch/xtensa/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 6f62902..d4082c6 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,10 +5,11 @@
extra-y := head.o vmlinux.lds
-obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \
ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
vectors.o
+obj-$(CONFIG_MMU) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index d956f87..45cc0ae 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -15,17 +15,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
-#include <asm/processor.h>
#include <asm/coprocessor.h>
-#include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
-#include <asm/unistd.h>
-#include <asm/ptrace.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/signal.h>
-#include <asm/tlbflush.h>
+#include <asm/regs.h>
#if XTENSA_HAVE_COPROCESSORS
@@ -66,6 +58,8 @@
.endif; \
.long THREAD_XTREGS_CP##x
+ __XTENSA_HANDLER
+
SAVE_CP_REGS(0)
SAVE_CP_REGS(1)
SAVE_CP_REGS(2)
@@ -84,7 +78,6 @@
LOAD_CP_REGS(6)
LOAD_CP_REGS(7)
- .section ".rodata", "a"
.align 4
.Lsave_cp_regs_jump_table:
SAVE_CP_REGS_TAB(0)
@@ -106,8 +99,6 @@
LOAD_CP_REGS_TAB(6)
LOAD_CP_REGS_TAB(7)
- .previous
-
/*
* Entry condition:
*
@@ -122,13 +113,6 @@
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-ENTRY(fast_coprocessor_double)
-
- wsr a0, excsave1
- call0 unrecoverable_exception
-
-ENDPROC(fast_coprocessor_double)
-
ENTRY(fast_coprocessor)
/* Save remaining registers a1-a3 and SAR */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 1f07876..703cf62 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
@@ -22,7 +23,6 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
@@ -525,7 +525,7 @@
call4 schedule # void schedule (void)
j 1b
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
6:
_bbci.l a4, TIF_NEED_RESCHED, 4f
@@ -534,7 +534,7 @@
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
call4 preempt_schedule_irq
- j 1b
+ j 4f
#endif
#if XTENSA_FAKE_NMI
@@ -944,6 +944,9 @@
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+ __XTENSA_HANDLER
+ .literal_position
+
/*
* Fast-handler for alloca exceptions
*
@@ -961,14 +964,14 @@
* of the proper size instead.
*
* This algorithm simply backs out the register changes started by the user
- * excpetion handler, makes it appear that we have started a window underflow
+ * exception handler, makes it appear that we have started a window underflow
* by rotating the window back and then setting the old window base (OWB) in
* the 'ps' register with the rolled back window base. The 'movsp' instruction
* will be re-executed and this time since the next window frames is in the
* active AR registers it won't cause an exception.
*
* If the WindowUnderflow code gets a TLB miss the page will get mapped
- * the the partial windeowUnderflow will be handeled in the double exception
+ * the partial WindowUnderflow will be handled in the double exception
* handler.
*
* Entry condition:
@@ -1029,7 +1032,7 @@
ENTRY(fast_illegal_instruction_user)
rsr a0, ps
- bbsi.l a0, PS_WOE_BIT, user_exception
+ bbsi.l a0, PS_WOE_BIT, 1f
s32i a3, a2, PT_AREG3
movi a3, PS_WOE_MASK
or a0, a0, a3
@@ -1038,6 +1041,8 @@
l32i a0, a2, PT_AREG0
rsr a2, depc
rfe
+1:
+ call0 user_exception
ENDPROC(fast_illegal_instruction_user)
#endif
@@ -1076,7 +1081,7 @@
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
- j user_exception
+ call0 user_exception
ENDPROC(fast_syscall_user)
@@ -1767,8 +1772,8 @@
rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
- j _kernel_exception
-1: j _user_exception
+ call0 _kernel_exception
+1: call0 _user_exception
ENDPROC(fast_second_level_miss)
@@ -1864,13 +1869,14 @@
rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
- j _kernel_exception
-1: j _user_exception
+ call0 _kernel_exception
+1: call0 _user_exception
ENDPROC(fast_store_prohibited)
#endif /* CONFIG_MMU */
+ .text
/*
* System Calls.
*
@@ -1881,8 +1887,7 @@
ENTRY(system_call)
- /* reserve 4 bytes on stack for function parameter */
- abi_entry(4)
+ abi_entry_default
/* regs->syscall = regs->areg[2] */
@@ -1901,8 +1906,6 @@
l32i a7, a2, PT_SYSCALL
1:
- s32i a7, a1, 4
-
/* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table
@@ -1922,9 +1925,6 @@
l32i a10, a2, PT_AREG8
l32i a11, a2, PT_AREG9
- /* Pass one additional argument to the syscall: pt_regs (on stack) */
- s32i a2, a1, 0
-
callx4 a4
1: /* regs->areg[2] = return_value */
@@ -1932,16 +1932,12 @@
s32i a6, a2, PT_AREG2
bnez a3, 1f
.Lsyscall_exit:
- abi_ret(4)
+ abi_ret_default
1:
- l32i a4, a1, 4
- l32i a3, a2, PT_SYSCALL
- s32i a4, a2, PT_SYSCALL
mov a6, a2
call4 do_syscall_trace_leave
- s32i a3, a2, PT_SYSCALL
- abi_ret(4)
+ abi_ret_default
ENDPROC(system_call)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 4ae998b..e0c1fac 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -260,6 +260,13 @@
___invalidate_icache_all a2 a3
isync
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup bootstrap CPU stack in XIP kernel */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+#endif
+
movi a6, 0
xsr a6, excsave1
@@ -355,10 +362,10 @@
* DATA section
*/
- .section ".data.init.refok"
- .align 4
+ __REFDATA
+ .align 4
ENTRY(start_info)
- .long init_thread_union + KERNEL_STACK_SIZE
+ .long init_thread_union + KERNEL_STACK_SIZE
/*
* BSS section
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a48bf2d..80cc977 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -145,7 +145,7 @@
void __init init_IRQ(void)
{
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
irqchip_init();
#else
#ifdef CONFIG_HAVE_SMP
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 154979d..94955ca 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -11,8 +11,7 @@
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
-#include <linux/dma-contiguous.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
@@ -44,8 +43,8 @@
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -62,8 +61,8 @@
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -81,122 +80,19 @@
}
}
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
+}
+
+/*
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * This function should be implemented in platform code in order to enable
+ * coherent DMA memory operations when CONFIG_MMU is not enabled.
+ */
#ifdef CONFIG_MMU
-bool platform_vaddr_cached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_CACHED_VADDR &&
- addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
-}
-
-bool platform_vaddr_uncached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_BYPASS_VADDR &&
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
-}
-
-void *platform_vaddr_to_uncached(void *p)
+void *arch_dma_set_uncached(void *p, size_t size)
{
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
-
-void *platform_vaddr_to_cached(void *p)
-{
- return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-}
-#else
-bool __attribute__((weak)) platform_vaddr_cached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return true;
-}
-
-bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return false;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
-}
-#endif
-
-/*
- * Note: We assume that the full memory space is always mapped to 'kseg'
- * Otherwise we have to use page attributes (not implemented).
- */
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t flag, unsigned long attrs)
-{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- /* ignore region speicifiers */
-
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- flag |= GFP_DMA;
-
- if (gfpflags_allow_blocking(flag))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag & __GFP_NOWARN);
-
- if (!page)
- page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-
- if (!page)
- return NULL;
-
- *handle = phys_to_dma(dev, page_to_phys(page));
-
-#ifdef CONFIG_MMU
- if (PageHighMem(page)) {
- void *p;
-
- p = dma_common_contiguous_remap(page, size,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!p) {
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
- }
- return p;
- }
-#endif
- BUG_ON(!platform_vaddr_cached(page_address(page)));
- __invalidate_dcache_range((unsigned long)page_address(page), size);
- return platform_vaddr_to_uncached(page_address(page));
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page;
-
- if (platform_vaddr_uncached(vaddr)) {
- page = virt_to_page(platform_vaddr_to_cached(vaddr));
- } else {
-#ifdef CONFIG_MMU
- dma_common_free_remap(vaddr, size);
-#endif
- page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
- }
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
-}
+#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 86c9ba9..a0d05c8 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -362,9 +362,7 @@
struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
unsigned i;
- for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
- i < XCHAL_NUM_PERF_COUNTERS;
- i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+ for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
struct perf_event *event = ev->event[i];
struct hw_perf_event *hwc = &event->hw;
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index a95ba05..ac1e0e5 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -12,12 +12,10 @@
* Chris Zankel <chris@zankel.net>
*/
+#include <linux/printk.h>
#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/time.h>
#include <asm/platform.h>
#include <asm/timex.h>
-#include <asm/param.h> /* HZ */
#define _F(r,f,a,b) \
r __platform_##f a b; \
@@ -28,6 +26,7 @@
* (Please, refer to include/asm-xtensa/platform.h for more information)
*/
+_F(void, init, (bp_tag_t *first), { });
_F(void, setup, (char** cmd), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 7cbf8bd..397a7de 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/rcupdate.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -202,7 +201,7 @@
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
unsigned long thread_fn_arg, struct task_struct *p,
unsigned long tls)
{
@@ -265,6 +264,8 @@
®s->areg[XCHAL_NUM_AREGS - len/4], len);
}
+ childregs->syscall = regs->syscall;
+
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = tls;
} else {
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 145742d..bb3f479 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -12,6 +12,7 @@
* Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/
+#include <linux/audit.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/kernel.h>
@@ -21,6 +22,7 @@
#include <linux/regset.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/seccomp.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/smp.h>
@@ -33,13 +35,11 @@
#include <asm/coprocessor.h>
#include <asm/elf.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
struct user_pt_regs newregs = {
@@ -52,6 +52,7 @@
.threadptr = regs->threadptr,
.windowbase = regs->windowbase,
.windowstart = regs->windowstart,
+ .syscall = regs->syscall,
};
memcpy(newregs.a,
@@ -61,8 +62,7 @@
regs->areg,
(WSBITS - regs->windowbase) * 16);
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &newregs, 0, -1);
+ return membuf_write(&to, &newregs, sizeof(newregs));
}
static int gpr_set(struct task_struct *target,
@@ -91,6 +91,9 @@
regs->sar = newregs.sar;
regs->threadptr = newregs.threadptr;
+ if (newregs.syscall)
+ regs->syscall = newregs.syscall;
+
if (newregs.windowbase != regs->windowbase ||
newregs.windowstart != regs->windowstart) {
u32 rotws, wmask;
@@ -116,8 +119,7 @@
static int tie_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
int ret;
struct pt_regs *regs = task_pt_regs(target);
@@ -142,8 +144,7 @@
newregs->cp6 = ti->xtregs_cp.cp6;
newregs->cp7 = ti->xtregs_cp.cp7;
#endif
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- newregs, 0, -1);
+ ret = membuf_write(&to, newregs, sizeof(*newregs));
kfree(newregs);
return ret;
}
@@ -198,7 +199,7 @@
.n = sizeof(struct user_pt_regs) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = gpr_get,
+ .regset_get = gpr_get,
.set = gpr_set,
},
[REGSET_TIE] = {
@@ -206,7 +207,7 @@
.n = sizeof(elf_xtregs_t) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = tie_get,
+ .regset_get = tie_get,
.set = tie_set,
},
};
@@ -555,7 +556,8 @@
return 0;
}
- if (regs->syscall == NO_SYSCALL) {
+ if (regs->syscall == NO_SYSCALL ||
+ secure_computing() == -1) {
do_syscall_trace_leave(regs);
return 0;
}
@@ -563,6 +565,9 @@
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
+ audit_syscall_entry(regs->syscall, regs->areg[6],
+ regs->areg[3], regs->areg[4],
+ regs->areg[5]);
return 1;
}
@@ -570,6 +575,8 @@
{
int step;
+ audit_syscall_exit(regs);
+
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index d081721..ee9082a 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -37,7 +37,6 @@
#include <asm/bootparam.h>
#include <asm/kasan.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/platform.h>
@@ -64,7 +63,7 @@
extern int initrd_below_start_ok;
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void *dtb_start = __dtb_start;
#endif
@@ -94,7 +93,7 @@
} tagtable_t;
#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
- __attribute__((used, section(".taglist"))) = { tag, fn }
+ __section(".taglist") __attribute__((used)) = { tag, fn }
/* parse current tag */
@@ -126,7 +125,7 @@
#endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
static int __init parse_tag_fdt(const bp_tag_t *tag)
{
@@ -136,7 +135,7 @@
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
@@ -184,7 +183,7 @@
}
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -233,7 +232,7 @@
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
/*
* Initialize architecture. (Early stage)
@@ -254,7 +253,7 @@
if (bp_start)
parse_bootparam(bp_start);
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
early_init_devtree(dtb_start);
#endif
@@ -284,6 +283,8 @@
extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_text_start;
extern char _DoubleExceptionVector_text_end;
+extern char _exception_text_start;
+extern char _exception_text_end;
#if XCHAL_EXCM_LEVEL >= 2
extern char _Level2InterruptVector_text_start;
extern char _Level2InterruptVector_text_end;
@@ -308,6 +309,10 @@
extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
+#ifdef CONFIG_XIP_KERNEL
+extern char _xip_start[];
+extern char _xip_end[];
+#endif
static inline int __init_memblock mem_reserve(unsigned long start,
unsigned long end)
@@ -339,8 +344,11 @@
#endif
mem_reserve(__pa(_stext), __pa(_end));
+#ifdef CONFIG_XIP_KERNEL
+ mem_reserve(__pa(_xip_start), __pa(_xip_end));
+#endif
-#ifdef CONFIG_VECTORS_OFFSET
+#ifdef CONFIG_VECTORS_ADDR
mem_reserve(__pa(&_WindowVectors_text_start),
__pa(&_WindowVectors_text_end));
@@ -356,6 +364,8 @@
mem_reserve(__pa(&_DoubleExceptionVector_text_start),
__pa(&_DoubleExceptionVector_text_end));
+ mem_reserve(__pa(&_exception_text_start),
+ __pa(&_exception_text_end));
#if XCHAL_EXCM_LEVEL >= 2
mem_reserve(__pa(&_Level2InterruptVector_text_start),
__pa(&_Level2InterruptVector_text_end));
@@ -377,7 +387,7 @@
__pa(&_Level6InterruptVector_text_end));
#endif
-#endif /* CONFIG_VECTORS_OFFSET */
+#endif /* CONFIG_VECTORS_ADDR */
#ifdef CONFIG_SMP
mem_reserve(__pa(&_SecondaryResetVector_text_start),
@@ -398,8 +408,6 @@
#ifdef CONFIG_VT
# if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
-# elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
# endif
#endif
}
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index dae83cd..1fb1047 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -236,9 +236,9 @@
* Do a signal return; undo the signal stack.
*/
-asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
- long a4, long a5, struct pt_regs *regs)
+asmlinkage long xtensa_rt_sigreturn(void)
{
+ struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t set;
int ret;
@@ -448,7 +448,7 @@
regs->areg[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
@@ -501,6 +501,6 @@
if (test_thread_flag(TIF_SIGPENDING))
do_signal(regs);
- if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ if (test_thread_flag(TIF_NOTIFY_RESUME))
tracehook_notify_resume(regs);
}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 83b244c..1254da0 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -53,16 +53,12 @@
#define IPI_IRQ 0
static irqreturn_t ipi_interrupt(int irq, void *dev_id);
-static struct irqaction ipi_irqaction = {
- .handler = ipi_interrupt,
- .flags = IRQF_PERCPU,
- .name = "ipi",
-};
void ipi_init(void)
{
unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
- setup_irq(irq, &ipi_irqaction);
+ if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL))
+ pr_err("Failed to request irq %u (ipi)\n", irq);
}
static inline unsigned int get_core_count(void)
@@ -149,7 +145,6 @@
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
- preempt_disable();
trace_hardirqs_off();
calibrate_delay();
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 25f4de7..b070f27 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -222,7 +222,7 @@
204 common quotactl sys_quotactl
# 205 was old nfsservctl
205 common nfsservctl sys_ni_syscall
-206 common _sysctl sys_sysctl
+206 common _sysctl sys_ni_syscall
207 common bdflush sys_bdflush
208 common uname sys_newuname
209 common sysinfo sys_sysinfo
@@ -406,3 +406,8 @@
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
diff --git a/arch/xtensa/kernel/syscalls/syscallhdr.sh b/arch/xtensa/kernel/syscalls/syscallhdr.sh
index d37db64..eebfb8a 100644
--- a/arch/xtensa/kernel/syscalls/syscallhdr.sh
+++ b/arch/xtensa/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 69db8c9..77971fe 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -128,12 +128,6 @@
return IRQ_HANDLED;
}
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
- .flags = IRQF_TIMER,
- .name = "timer",
-};
-
void local_timer_setup(unsigned cpu)
{
struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
@@ -184,6 +178,8 @@
void __init time_init(void)
{
+ int irq;
+
of_clk_init(NULL);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
pr_info("Calibrating CPU frequency ");
@@ -199,7 +195,9 @@
__func__);
clocksource_register_hz(&ccount_clocksource, ccount_freq);
local_timer_setup(0);
- setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+ irq = this_cpu_ptr(&ccount_timer)->evt.irq;
+ if (request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
timer_probe();
}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 4a6c495..efc3a29 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -34,12 +34,12 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/hw_breakpoint.h>
@@ -479,44 +479,43 @@
static int show_trace_cb(struct stackframe *frame, void *data)
{
+ const char *loglvl = data;
+
if (kernel_text_address(frame->pc))
- pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
+ printk("%s [<%08lx>] %pB\n",
+ loglvl, frame->pc, (void *)frame->pc);
return 0;
}
-void show_trace(struct task_struct *task, unsigned long *sp)
+static void show_trace(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
{
if (!sp)
sp = stack_pointer(task);
- pr_info("Call Trace:\n");
- walk_stackframe(sp, show_trace_cb, NULL);
-#ifndef CONFIG_KALLSYMS
- pr_cont("\n");
-#endif
+ printk("%sCall Trace:\n", loglvl);
+ walk_stackframe(sp, show_trace_cb, (void *)loglvl);
}
-static int kstack_depth_to_print = 24;
+#define STACK_DUMP_ENTRY_SIZE 4
+#define STACK_DUMP_LINE_SIZE 32
+static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
- int i = 0;
- unsigned long *stack;
+ size_t len;
if (!sp)
sp = stack_pointer(task);
- stack = sp;
- pr_info("Stack:\n");
+ len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
+ kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(sp))
- break;
- pr_cont(" %08lx", *sp++);
- if (i % 8 == 7)
- pr_cont("\n");
- }
- show_trace(task, stack);
+ printk("%sStack:\n", loglvl);
+ print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
+ STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+ sp, len, false);
+ show_trace(task, sp, loglvl);
}
DEFINE_SPINLOCK(die_lock);
@@ -524,15 +523,18 @@
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
+ const char *pr = "";
+
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
console_verbose();
spin_lock_irq(&die_lock);
- pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
+ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
show_regs(regs);
if (!user_mode(regs))
- show_stack(NULL, (unsigned long*)regs->areg[1]);
+ show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 841503d..1a7538c 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -43,10 +43,11 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/asmmacro.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
@@ -477,7 +478,6 @@
ENDPROC(_DoubleExceptionVector)
- .text
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
* We get here with windowbase set to the window that was being spilled and
@@ -505,6 +505,7 @@
* a3: exctable, original value in excsave1
*/
+ __XTENSA_HANDLER
.literal_position
ENTRY(window_overflow_restore_a0_fixup)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 943f106..d23a6e3 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -14,6 +14,8 @@
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
@@ -45,15 +47,20 @@
LONG(sym ## _end); \
LONG(LOADADDR(section))
+#if !defined(CONFIG_VECTORS_ADDR) && XCHAL_HAVE_VECBASE
+#define MERGED_VECTORS 1
+#else
+#define MERGED_VECTORS 0
+#endif
+
/*
- * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
- * defined code for every vector is located with other init data. At startup
+ * Macro to define a section for a vector. When MERGED_VECTORS is 0
+ * code for every vector is located with other init data. At startup
* time head.S copies code for every vector to its final position according
* to description recorded in the corresponding RELOCATE_ENTRY.
*/
-#ifdef CONFIG_VECTORS_OFFSET
-#define SECTION_VECTOR(sym, section, addr, prevsec) \
+#define SECTION_VECTOR4(sym, section, addr, prevsec) \
section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
{ \
. = ALIGN(4); \
@@ -61,11 +68,10 @@
*(section) \
sym ## _end = ABSOLUTE(.); \
}
-#else
-#define SECTION_VECTOR(section, addr) \
+
+#define SECTION_VECTOR2(section, addr) \
. = addr; \
*(section)
-#endif
/*
* Mapping of input sections to output sections when linking.
@@ -84,30 +90,32 @@
/* The HEAD_TEXT section must be the first section! */
HEAD_TEXT
-#ifndef CONFIG_VECTORS_OFFSET
- . = ALIGN(PAGE_SIZE);
- _vecbase = .;
+#if MERGED_VECTORS
+ . = ALIGN(PAGE_SIZE);
+ _vecbase = .;
- SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR)
+ SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR)
#if XCHAL_EXCM_LEVEL >= 2
- SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
+ SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
#endif
#if XCHAL_EXCM_LEVEL >= 3
- SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
+ SECTION_VECTOR2 (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
#endif
#if XCHAL_EXCM_LEVEL >= 4
- SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
+ SECTION_VECTOR2 (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
#endif
#if XCHAL_EXCM_LEVEL >= 5
- SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
+ SECTION_VECTOR2 (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
#endif
#if XCHAL_EXCM_LEVEL >= 6
- SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
+ SECTION_VECTOR2 (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
#endif
- SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
- SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
- SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
- SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+ SECTION_VECTOR2 (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
+ SECTION_VECTOR2 (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
+ SECTION_VECTOR2 (.UserExceptionVector.text, USER_VECTOR_VADDR)
+ SECTION_VECTOR2 (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+
+ *(.exception.text)
#endif
IRQENTRY_TEXT
@@ -117,25 +125,22 @@
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
-
+ *(.fixup)
}
_etext = .;
PROVIDE (etext = .);
. = ALIGN(16);
- RODATA
+ RO_DATA(4096)
- /* Relocation table */
-
- .fixup : { *(.fixup) }
-
- EXCEPTION_TABLE(16)
- NOTES
/* Data section */
+#ifdef CONFIG_XIP_KERNEL
+ INIT_TEXT_SECTION(PAGE_SIZE)
+#else
_sdata = .;
- RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
/* Initialization code and data: */
@@ -147,6 +152,11 @@
.init.data :
{
INIT_DATA
+ }
+#endif
+
+ .init.rodata :
+ {
. = ALIGN(0x4);
__tagtable_begin = .;
*(.taglist)
@@ -155,7 +165,7 @@
. = ALIGN(16);
__boot_reloc_table_start = ABSOLUTE(.);
-#ifdef CONFIG_VECTORS_OFFSET
+#if !MERGED_VECTORS
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
#if XCHAL_EXCM_LEVEL >= 2
@@ -186,13 +196,18 @@
.DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
+ RELOCATE_ENTRY(_exception_text,
+ .exception.text);
+#endif
+#ifdef CONFIG_XIP_KERNEL
+ RELOCATE_ENTRY(_xip_data, .data);
+ RELOCATE_ENTRY(_xip_init_data, .init.data);
#endif
#if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
-
__boot_reloc_table_end = ABSOLUTE(.) ;
INIT_SETUP(XCHAL_ICACHE_LINESIZE)
@@ -208,21 +223,24 @@
. = ALIGN(4);
.dummy : { LONG(0) }
-#ifdef CONFIG_VECTORS_OFFSET
+#undef LAST
+#define LAST .dummy
+
+#if !MERGED_VECTORS
/* The vectors are relocated to the real position at startup time */
- SECTION_VECTOR (_WindowVectors_text,
+ SECTION_VECTOR4 (_WindowVectors_text,
.WindowVectors.text,
WINDOW_VECTORS_VADDR,
.dummy)
- SECTION_VECTOR (_DebugInterruptVector_text,
+ SECTION_VECTOR4 (_DebugInterruptVector_text,
.DebugInterruptVector.text,
DEBUG_VECTOR_VADDR,
.WindowVectors.text)
#undef LAST
#define LAST .DebugInterruptVector.text
#if XCHAL_EXCM_LEVEL >= 2
- SECTION_VECTOR (_Level2InterruptVector_text,
+ SECTION_VECTOR4 (_Level2InterruptVector_text,
.Level2InterruptVector.text,
INTLEVEL2_VECTOR_VADDR,
LAST)
@@ -230,7 +248,7 @@
# define LAST .Level2InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 3
- SECTION_VECTOR (_Level3InterruptVector_text,
+ SECTION_VECTOR4 (_Level3InterruptVector_text,
.Level3InterruptVector.text,
INTLEVEL3_VECTOR_VADDR,
LAST)
@@ -238,7 +256,7 @@
# define LAST .Level3InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 4
- SECTION_VECTOR (_Level4InterruptVector_text,
+ SECTION_VECTOR4 (_Level4InterruptVector_text,
.Level4InterruptVector.text,
INTLEVEL4_VECTOR_VADDR,
LAST)
@@ -246,7 +264,7 @@
# define LAST .Level4InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 5
- SECTION_VECTOR (_Level5InterruptVector_text,
+ SECTION_VECTOR4 (_Level5InterruptVector_text,
.Level5InterruptVector.text,
INTLEVEL5_VECTOR_VADDR,
LAST)
@@ -254,49 +272,95 @@
# define LAST .Level5InterruptVector.text
#endif
#if XCHAL_EXCM_LEVEL >= 6
- SECTION_VECTOR (_Level6InterruptVector_text,
+ SECTION_VECTOR4 (_Level6InterruptVector_text,
.Level6InterruptVector.text,
INTLEVEL6_VECTOR_VADDR,
LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
- SECTION_VECTOR (_KernelExceptionVector_text,
+ SECTION_VECTOR4 (_KernelExceptionVector_text,
.KernelExceptionVector.text,
KERNEL_VECTOR_VADDR,
LAST)
#undef LAST
- SECTION_VECTOR (_UserExceptionVector_text,
+ SECTION_VECTOR4 (_UserExceptionVector_text,
.UserExceptionVector.text,
USER_VECTOR_VADDR,
.KernelExceptionVector.text)
- SECTION_VECTOR (_DoubleExceptionVector_text,
+ SECTION_VECTOR4 (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
.UserExceptionVector.text)
-
- . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+#define LAST .DoubleExceptionVector.text
#endif
#if defined(CONFIG_SMP)
- SECTION_VECTOR (_SecondaryResetVector_text,
+ SECTION_VECTOR4 (_SecondaryResetVector_text,
.SecondaryResetVector.text,
RESET_VECTOR1_VADDR,
- .DoubleExceptionVector.text)
-
- . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+ LAST)
+#undef LAST
+#define LAST .SecondaryResetVector.text
#endif
+#if !MERGED_VECTORS
+ SECTION_VECTOR4 (_exception_text,
+ .exception.text,
+ ,
+ LAST)
+#undef LAST
+#define LAST .exception.text
+#endif
+ . = (LOADADDR(LAST) + SIZEOF(LAST) + 3) & ~ 3;
+
+ .dummy1 : AT(ADDR(.dummy1)) { LONG(0) }
. = ALIGN(PAGE_SIZE);
+#ifndef CONFIG_XIP_KERNEL
__init_end = .;
BSS_SECTION(0, 8192, 0)
+#endif
_end = .;
+#ifdef CONFIG_XIP_KERNEL
+ . = CONFIG_XIP_DATA_ADDR;
+
+ _xip_start = .;
+
+#undef LOAD_OFFSET
+#define LOAD_OFFSET \
+ (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy1) + SIZEOF(.dummy1) + 3) & ~ 3)
+
+ _xip_data_start = .;
+ _sdata = .;
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+ _xip_data_end = .;
+
+ /* Initialization data: */
+
+ STRUCT_ALIGN();
+
+ _xip_init_data_start = .;
+ __init_begin = .;
+ .init.data :
+ {
+ INIT_DATA
+ }
+ _xip_init_data_end = .;
+ __init_end = .;
+ BSS_SECTION(0, 8192, 0)
+
+ _xip_end = .;
+
+#undef LOAD_OFFSET
+#endif
+
DWARF_DEBUG
.xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 24cf697..415fe7f 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -25,7 +25,6 @@
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>