v4.19.13 snapshot.
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/xtensa/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 0000000..9190759
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/Xtensa kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+	 ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+	 vectors.o
+
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o mxhead.o
+obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
+
+# In the Xtensa architecture, assembly generates literals which must always
+# precede the L32R instruction with a relative offset less than 256 kB.
+# Therefore, the .text and .literal section must be combined in parenthesis
+# in the linker script, such as: *(.literal .text).
+#
+# We need to post-process the generated vmlinux.lds scripts to convert
+# *(xxx.text) to  *(xxx.literal xxx.text) for the following text sections:
+#   .text .ref.text .*init.text .*exit.text .text.*
+#
+# Replicate rules in scripts/Makefile.build
+
+sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \
+	-e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \
+	-e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
+	-e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \
+	-e 's/\.{text}/.text/g'
+
+quiet_cmd__cpp_lds_S = LDS     $@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $<    \
+                 | sed $(sed-y) >$@
+
+$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
+	$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 0000000..9301452
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,485 @@
+/*
+ * arch/xtensa/kernel/align.S
+ *
+ * Handle unalignment exceptions in kernel space.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica, Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ *
+ * Rewritten by Chris Zankel <chris@zankel.net>
+ *
+ * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
+ */
+
+#include <linux/linkage.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
+/*  First-level exception handler for unaligned exceptions.
+ *
+ *  Note: This handler works only for kernel exceptions.  Unaligned user
+ *        access should get a seg fault.
+ */
+
+/* Big and little endian 16-bit values are located in
+ * different halves of a register.  HWORD_START helps to
+ * abstract the notion of extracting a 16-bit value from a
+ * register.
+ * We also have to define new shifting instructions because
+ * lsb and msb are on 'opposite' ends in a register for
+ * different endian machines.
+ *
+ * Assume a memory region in ascending address:
+ *   	0 1 2 3|4 5 6 7
+ *
+ * When loading one word into a register, the content of that register is:
+ *  LE	3 2 1 0, 7 6 5 4
+ *  BE  0 1 2 3, 4 5 6 7
+ *
+ * Masking the bits of the higher/lower address means:
+ *  LE  X X 0 0, 0 0 X X
+ *  BE	0 0 X X, X X 0 0
+ *
+ * Shifting to higher/lower addresses, means:
+ *  LE  shift left / shift right
+ *  BE  shift right / shift left
+ *
+ * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
+ *  LE  mask 0 0 X X / shift left
+ *  BE  shift left / mask 0 0 X X
+ */
+
+#define UNALIGNED_USER_EXCEPTION
+
+#if XCHAL_HAVE_BE
+
+#define HWORD_START	16
+#define	INSN_OP0	28
+#define	INSN_T		24
+#define	INSN_OP1	16
+
+.macro __ssa8r	r;		ssa8l	\r;		.endm
+.macro __sh	r, s;		srl	\r, \s;		.endm
+.macro __sl	r, s;		sll	\r, \s;		.endm
+.macro __exth	r, s;		extui	\r, \s, 0, 16;	.endm
+.macro __extl	r, s;		slli	\r, \s, 16;	.endm
+
+#else
+
+#define HWORD_START	0
+#define	INSN_OP0	0
+#define	INSN_T		4
+#define	INSN_OP1	12
+
+.macro __ssa8r	r;		ssa8b	\r;		.endm
+.macro __sh	r, s;		sll	\r, \s;		.endm
+.macro __sl	r, s;		srl	\r, \s;		.endm
+.macro __exth	r, s;		slli	\r, \s, 16;	.endm
+.macro __extl	r, s;		extui	\r, \s, 0, 16;	.endm
+
+#endif
+
+/*
+ *	xxxx xxxx = imm8 field
+ *	     yyyy = imm4 field
+ *	     ssss = s field
+ *	     tttt = t field
+ *
+ *	       		 16		    0
+ *		          -------------------
+ *	L32I.N		  yyyy ssss tttt 1000
+ *	S32I.N	          yyyy ssss tttt 1001
+ *
+ *	       23			    0
+ *		-----------------------------
+ *	res	          0000           0010
+ *	L16UI	xxxx xxxx 0001 ssss tttt 0010
+ *	L32I	xxxx xxxx 0010 ssss tttt 0010
+ *	XXX	          0011 ssss tttt 0010
+ *	XXX	          0100 ssss tttt 0010
+ *	S16I	xxxx xxxx 0101 ssss tttt 0010
+ *	S32I	xxxx xxxx 0110 ssss tttt 0010
+ *	XXX	          0111 ssss tttt 0010
+ *	XXX	          1000 ssss tttt 0010
+ *	L16SI	xxxx xxxx 1001 ssss tttt 0010
+ *	XXX	          1010           0010
+ *      **L32AI	xxxx xxxx 1011 ssss tttt 0010 unsupported
+ *	XXX	          1100           0010
+ *	XXX	          1101           0010
+ *	XXX	          1110           0010
+ *	**S32RI	xxxx xxxx 1111 ssss tttt 0010 unsupported
+ *		-----------------------------
+ *                           ^         ^    ^
+ *    sub-opcode (NIBBLE_R) -+         |    |
+ *       t field (NIBBLE_T) -----------+    |
+ *  major opcode (NIBBLE_OP0) --------------+
+ */
+
+#define OP0_L32I_N	0x8		/* load immediate narrow */
+#define OP0_S32I_N	0x9		/* store immediate narrow */
+#define OP1_SI_MASK	0x4		/* OP1 bit set for stores */
+#define OP1_SI_BIT	2		/* OP1 bit number for stores */
+
+#define OP1_L32I	0x2
+#define OP1_L16UI	0x1
+#define OP1_L16SI	0x9
+#define OP1_L32AI	0xb
+
+#define OP1_S32I	0x6
+#define OP1_S16I	0x5
+#define OP1_S32RI	0xf
+
+/*
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+	.literal_position
+ENTRY(fast_unaligned)
+
+	/* Note: We don't expect the address to be aligned on a word
+	 *       boundary. After all, the processor generated that exception
+	 *       and it would be a hardware fault.
+	 */
+
+	/* Save some working register */
+
+	s32i	a4, a2, PT_AREG4
+	s32i	a5, a2, PT_AREG5
+	s32i	a6, a2, PT_AREG6
+	s32i	a7, a2, PT_AREG7
+	s32i	a8, a2, PT_AREG8
+
+	rsr	a0, depc
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+
+	rsr	a3, excsave1
+	movi	a4, fast_unaligned_fixup
+	s32i	a4, a3, EXC_TABLE_FIXUP
+
+	/* Keep value of SAR in a0 */
+
+	rsr	a0, sar
+	rsr	a8, excvaddr		# load unaligned memory address
+
+	/* Now, identify one of the following load/store instructions.
+	 *
+	 * The only possible danger of a double exception on the
+	 * following l32i instructions is kernel code in vmalloc
+	 * memory. The processor was just executing at the EPC_1
+	 * address, and indeed, already fetched the instruction.  That
+	 * guarantees a TLB mapping, which hasn't been replaced by
+	 * this unaligned exception handler that uses only static TLB
+	 * mappings. However, high-level interrupt handlers might
+	 * modify TLB entries, so for the generic case, we register a
+	 * TABLE_FIXUP handler here, too.
+	 */
+
+	/* a3...a6 saved on stack, a2 = SP */
+
+	/* Extract the instruction that caused the unaligned access. */
+
+	rsr	a7, epc1	# load exception address
+	movi	a3, ~3
+	and	a3, a3, a7	# mask lower bits
+
+	l32i	a4, a3, 0	# load 2 words
+	l32i	a5, a3, 4
+
+	__ssa8	a7
+	__src_b	a4, a4, a5	# a4 has the instruction
+
+	/* Analyze the instruction (load or store?). */
+
+	extui	a5, a4, INSN_OP0, 4	# get insn.op0 nibble
+
+#if XCHAL_HAVE_DENSITY
+	_beqi	a5, OP0_L32I_N, .Lload	# L32I.N, jump
+	addi	a6, a5, -OP0_S32I_N
+	_beqz	a6, .Lstore		# S32I.N, do a store
+#endif
+	/* 'store indicator bit' not set, jump */
+	_bbci.l	a4, OP1_SI_BIT + INSN_OP1, .Lload
+
+	/* Store: Jump to table entry to get the value in the source register.*/
+
+.Lstore:movi	a5, .Lstore_table	# table
+	extui	a6, a4, INSN_T, 4	# get source register
+	addx8	a5, a6, a5
+	jx	a5			# jump into table
+
+	/* Load: Load memory address. */
+
+.Lload: movi	a3, ~3
+	and	a3, a3, a8		# align memory address
+
+	__ssa8	a8
+#ifdef UNALIGNED_USER_EXCEPTION
+	addi	a3, a3, 8
+	l32e	a5, a3, -8
+	l32e	a6, a3, -4
+#else
+	l32i	a5, a3, 0
+	l32i	a6, a3, 4
+#endif
+	__src_b	a3, a5, a6		# a3 has the data word
+
+#if XCHAL_HAVE_DENSITY
+	addi	a7, a7, 2		# increment PC (assume 16-bit insn)
+
+	extui	a5, a4, INSN_OP0, 4
+	_beqi	a5, OP0_L32I_N, 1f	# l32i.n: jump
+
+	addi	a7, a7, 1
+#else
+	addi	a7, a7, 3
+#endif
+
+	extui	a5, a4, INSN_OP1, 4
+	_beqi	a5, OP1_L32I, 1f	# l32i: jump
+
+	extui	a3, a3, 0, 16		# extract lower 16 bits
+	_beqi	a5, OP1_L16UI, 1f
+	addi	a5, a5, -OP1_L16SI
+	_bnez	a5, .Linvalid_instruction_load
+
+	/* sign extend value */
+
+	slli	a3, a3, 16
+	srai	a3, a3, 16
+
+	/* Set target register. */
+
+1:
+	extui	a4, a4, INSN_T, 4	# extract target register
+	movi	a5, .Lload_table
+	addx8	a4, a4, a5
+	jx	a4			# jump to entry for target register
+
+	.align	8
+.Lload_table:
+	s32i	a3, a2, PT_AREG0;	_j .Lexit;	.align 8
+	mov	a1, a3;			_j .Lexit;	.align 8 # fishy??
+	s32i	a3, a2, PT_AREG2;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG3;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG4;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG5;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG6;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG7;	_j .Lexit;	.align 8
+	s32i	a3, a2, PT_AREG8;	_j .Lexit;	.align 8
+	mov	a9, a3		;	_j .Lexit;	.align 8
+	mov	a10, a3		;	_j .Lexit;	.align 8
+	mov	a11, a3		;	_j .Lexit;	.align 8
+	mov	a12, a3		;	_j .Lexit;	.align 8
+	mov	a13, a3		;	_j .Lexit;	.align 8
+	mov	a14, a3		;	_j .Lexit;	.align 8
+	mov	a15, a3		;	_j .Lexit;	.align 8
+
+.Lstore_table:
+	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
+	mov	a3, a1;			_j 1f;	.align 8	# fishy??
+	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG5;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG6;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG7;	_j 1f;	.align 8
+	l32i	a3, a2, PT_AREG8;	_j 1f;	.align 8
+	mov	a3, a9		;	_j 1f;	.align 8
+	mov	a3, a10		;	_j 1f;	.align 8
+	mov	a3, a11		;	_j 1f;	.align 8
+	mov	a3, a12		;	_j 1f;	.align 8
+	mov	a3, a13		;	_j 1f;	.align 8
+	mov	a3, a14		;	_j 1f;	.align 8
+	mov	a3, a15		;	_j 1f;	.align 8
+
+	/* We cannot handle this exception. */
+
+	.extern _kernel_exception
+.Linvalid_instruction_load:
+.Linvalid_instruction_store:
+
+	movi	a4, 0
+	rsr	a3, excsave1
+	s32i	a4, a3, EXC_TABLE_FIXUP
+
+	/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+
+	l32i	a8, a2, PT_AREG8
+	l32i	a7, a2, PT_AREG7
+	l32i	a6, a2, PT_AREG6
+	l32i	a5, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	wsr	a0, sar
+	mov	a1, a2
+
+	rsr	a0, ps
+	bbsi.l  a0, PS_UM_BIT, 2f     # jump if user mode
+
+	movi	a0, _kernel_exception
+	jx	a0
+
+2:	movi	a0, _user_exception
+	jx	a0
+
+1: 	# a7: instruction pointer, a4: instruction, a3: value
+
+	movi	a6, 0			# mask: ffffffff:00000000
+
+#if XCHAL_HAVE_DENSITY
+	addi	a7, a7, 2		# incr. PC,assume 16-bit instruction
+
+	extui	a5, a4, INSN_OP0, 4	# extract OP0
+	addi	a5, a5, -OP0_S32I_N
+	_beqz	a5, 1f			# s32i.n: jump
+
+	addi	a7, a7, 1		# increment PC, 32-bit instruction
+#else
+	addi	a7, a7, 3		# increment PC, 32-bit instruction
+#endif
+
+	extui	a5, a4, INSN_OP1, 4	# extract OP1
+	_beqi	a5, OP1_S32I, 1f	# jump if 32 bit store
+	_bnei	a5, OP1_S16I, .Linvalid_instruction_store
+
+	movi	a5, -1
+	__extl	a3, a3			# get 16-bit value
+	__exth	a6, a5			# get 16-bit mask ffffffff:ffff0000
+
+	/* Get memory address */
+
+1:
+	movi	a4, ~3
+	and	a4, a4, a8		# align memory address
+
+	/* Insert value into memory */
+
+	movi	a5, -1			# mask: ffffffff:XXXX0000
+#ifdef UNALIGNED_USER_EXCEPTION
+	addi	a4, a4, 8
+#endif
+
+	__ssa8r a8
+	__src_b	a8, a5, a6		# lo-mask  F..F0..0 (BE) 0..0F..F (LE)
+	__src_b	a6, a6, a5		# hi-mask  0..0F..F (BE) F..F0..0 (LE)
+#ifdef UNALIGNED_USER_EXCEPTION
+	l32e	a5, a4, -8
+#else
+	l32i	a5, a4, 0		# load lower address word
+#endif
+	and	a5, a5, a8		# mask
+	__sh	a8, a3 			# shift value
+	or	a5, a5, a8		# or with original value
+#ifdef UNALIGNED_USER_EXCEPTION
+	s32e	a5, a4, -8
+	l32e	a8, a4, -4
+#else
+	s32i	a5, a4, 0		# store
+	l32i	a8, a4, 4		# same for upper address word
+#endif
+	__sl	a5, a3
+	and	a6, a8, a6
+	or	a6, a6, a5
+#ifdef UNALIGNED_USER_EXCEPTION
+	s32e	a6, a4, -4
+#else
+	s32i	a6, a4, 4
+#endif
+
+.Lexit:
+#if XCHAL_HAVE_LOOPS
+	rsr	a4, lend		# check if we reached LEND
+	bne	a7, a4, 1f
+	rsr	a4, lcount		# and LCOUNT != 0
+	beqz	a4, 1f
+	addi	a4, a4, -1		# decrement LCOUNT and set
+	rsr	a7, lbeg		# set PC to LBEGIN
+	wsr	a4, lcount
+#endif
+
+1:	wsr	a7, epc1		# skip emulated instruction
+
+	/* Update icount if we're single-stepping in userspace. */
+	rsr	a4, icountlevel
+	beqz	a4, 1f
+	bgeui	a4, LOCKLEVEL + 1, 1f
+	rsr	a4, icount
+	addi	a4, a4, 1
+	wsr	a4, icount
+1:
+	movi	a4, 0
+	rsr	a3, excsave1
+	s32i	a4, a3, EXC_TABLE_FIXUP
+
+	/* Restore working register */
+
+	l32i	a8, a2, PT_AREG8
+	l32i	a7, a2, PT_AREG7
+	l32i	a6, a2, PT_AREG6
+	l32i	a5, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	l32i	a3, a2, PT_AREG3
+
+	/* restore SAR and return */
+
+	wsr	a0, sar
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_AREG2
+	rfe
+
+ENDPROC(fast_unaligned)
+
+ENTRY(fast_unaligned_fixup)
+
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	wsr	a3, excsave1
+
+	l32i	a8, a2, PT_AREG8
+	l32i	a7, a2, PT_AREG7
+	l32i	a6, a2, PT_AREG6
+	l32i	a5, a2, PT_AREG5
+	l32i	a4, a2, PT_AREG4
+	l32i	a0, a2, PT_AREG2
+	xsr	a0, depc			# restore depc and a0
+	wsr	a0, sar
+
+	rsr	a0, exccause
+	s32i	a0, a2, PT_DEPC			# mark as a regular exception
+
+	rsr	a0, ps
+	bbsi.l  a0, PS_UM_BIT, 1f		# jump if user mode
+
+	rsr	a0, exccause
+	addx4	a0, a0, a3              	# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_KERNEL   # load handler
+	l32i	a3, a2, PT_AREG3
+	jx	a0
+1:
+	rsr	a0, exccause
+	addx4	a0, a0, a3              	# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
+	l32i	a3, a2, PT_AREG3
+	jx	a0
+
+ENDPROC(fast_unaligned_fixup)
+
+#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 0000000..120dd74
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,149 @@
+/*
+ * arch/xtensa/kernel/asm-offsets.c
+ *
+ * Generates definitions from c-type structures used by assembly sources.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+int main(void)
+{
+	/* struct pt_regs */
+	DEFINE(PT_PC, offsetof (struct pt_regs, pc));
+	DEFINE(PT_PS, offsetof (struct pt_regs, ps));
+	DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
+	DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
+	DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
+	DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
+	DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
+	DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
+	DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
+	DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
+	DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+	DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
+	DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+	DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+	DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
+	DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
+	DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
+	DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
+	DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
+	DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
+	DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
+	DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
+	DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
+	DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
+	DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
+	DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
+	DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
+	DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
+	DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
+	DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
+	DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
+	DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
+	DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
+	DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
+	DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+	DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+	DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
+
+	/* struct task_struct */
+	DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
+	DEFINE(TASK_MM, offsetof (struct task_struct, mm));
+	DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
+	DEFINE(TASK_PID, offsetof (struct task_struct, pid));
+	DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
+	DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+	DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
+	DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+
+	/* offsets in thread_info struct */
+	OFFSET(TI_TASK, thread_info, task);
+	OFFSET(TI_FLAGS, thread_info, flags);
+	OFFSET(TI_STSTUS, thread_info, status);
+	OFFSET(TI_CPU, thread_info, cpu);
+	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+
+	/* struct thread_info (offset from start_struct) */
+	DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+	DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+	DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XTENSA_HAVE_COPROCESSORS
+	DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+	DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+	DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+	DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+	DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+	DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+	DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+	DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
+#endif
+	DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+	DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
+	DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
+	       thread.current_ds));
+
+	/* struct mm_struct */
+	DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
+	DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
+	DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
+
+	/* struct page */
+	DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
+
+	/* constants */
+	DEFINE(_CLONE_VM, CLONE_VM);
+	DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
+	DEFINE(PG_ARCH_1, PG_arch_1);
+
+	/* struct debug_table */
+	DEFINE(DT_DEBUG_EXCEPTION,
+	       offsetof(struct debug_table, debug_exception));
+	DEFINE(DT_DEBUG_SAVE, offsetof(struct debug_table, debug_save));
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	DEFINE(DT_DBREAKC_SAVE, offsetof(struct debug_table, dbreakc_save));
+	DEFINE(DT_ICOUNT_SAVE, offsetof(struct debug_table, icount_save));
+	DEFINE(DT_ICOUNT_LEVEL_SAVE,
+	       offsetof(struct debug_table, icount_level_save));
+#endif
+
+	/* struct exc_table */
+	DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
+	DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
+	DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
+	DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+	DEFINE(EXC_TABLE_SYSCALL_SAVE,
+	       offsetof(struct exc_table, syscall_save));
+	DEFINE(EXC_TABLE_FAST_USER,
+	       offsetof(struct exc_table, fast_user_handler));
+	DEFINE(EXC_TABLE_FAST_KERNEL,
+	       offsetof(struct exc_table, fast_kernel_handler));
+	DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+
+	return 0;
+}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 0000000..4f8b52d
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,330 @@
+/*
+ * arch/xtensa/kernel/coprocessor.S
+ *
+ * Xtensa processor configuration-specific table of coprocessor and
+ * other custom register layout information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+
+#if XTENSA_HAVE_COPROCESSORS
+
+/*
+ * Macros for lazy context switch. 
+ */
+
+#define SAVE_CP_REGS(x)							\
+	.align 4;							\
+	.Lsave_cp_regs_cp##x:						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		xchal_cp##x##_store a2 a4 a5 a6 a7;			\
+	.endif;								\
+	jx	a0
+
+#define SAVE_CP_REGS_TAB(x)						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table;	\
+	.else;								\
+		.long 0;						\
+	.endif;								\
+	.long THREAD_XTREGS_CP##x
+
+
+#define LOAD_CP_REGS(x)							\
+	.align 4;							\
+	.Lload_cp_regs_cp##x:						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		xchal_cp##x##_load a2 a4 a5 a6 a7;			\
+	.endif;								\
+	jx	a0
+
+#define LOAD_CP_REGS_TAB(x)						\
+	.if XTENSA_HAVE_COPROCESSOR(x);					\
+		.long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
+	.else;								\
+		.long 0;						\
+	.endif;								\
+	.long THREAD_XTREGS_CP##x
+
+	SAVE_CP_REGS(0)
+	SAVE_CP_REGS(1)
+	SAVE_CP_REGS(2)
+	SAVE_CP_REGS(3)
+	SAVE_CP_REGS(4)
+	SAVE_CP_REGS(5)
+	SAVE_CP_REGS(6)
+	SAVE_CP_REGS(7)
+
+	LOAD_CP_REGS(0)
+	LOAD_CP_REGS(1)
+	LOAD_CP_REGS(2)
+	LOAD_CP_REGS(3)
+	LOAD_CP_REGS(4)
+	LOAD_CP_REGS(5)
+	LOAD_CP_REGS(6)
+	LOAD_CP_REGS(7)
+
+	.align 4
+.Lsave_cp_regs_jump_table:
+	SAVE_CP_REGS_TAB(0)
+	SAVE_CP_REGS_TAB(1)
+	SAVE_CP_REGS_TAB(2)
+	SAVE_CP_REGS_TAB(3)
+	SAVE_CP_REGS_TAB(4)
+	SAVE_CP_REGS_TAB(5)
+	SAVE_CP_REGS_TAB(6)
+	SAVE_CP_REGS_TAB(7)
+
+.Lload_cp_regs_jump_table:
+	LOAD_CP_REGS_TAB(0)
+	LOAD_CP_REGS_TAB(1)
+	LOAD_CP_REGS_TAB(2)
+	LOAD_CP_REGS_TAB(3)
+	LOAD_CP_REGS_TAB(4)
+	LOAD_CP_REGS_TAB(5)
+	LOAD_CP_REGS_TAB(6)
+	LOAD_CP_REGS_TAB(7)
+
+/*
+ * coprocessor_save(buffer, index) 
+ *                    a2      a3
+ * coprocessor_load(buffer, index)
+ *                    a2      a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'. 
+ * The register values are saved to or loaded from them 'buffer' address.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_save)
+
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lsave_cp_regs_jump_table
+	addx8	a3, a3, a0
+	l32i	a3, a3, 0
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENDPROC(coprocessor_save)
+
+ENTRY(coprocessor_load)
+
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lload_cp_regs_jump_table
+	addx4	a3, a3, a0
+	l32i	a3, a3, 0
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENDPROC(coprocessor_load)
+
+/*
+ * coprocessor_flush(struct task_info*, index)
+ *                             a2        a3
+ * coprocessor_restore(struct task_info*, index)
+ *                              a2         a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'. 
+ * The register values are saved to or loaded from the coprocessor area 
+ * inside the task_info structure.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+
+ENTRY(coprocessor_flush)
+
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lsave_cp_regs_jump_table
+	addx8	a3, a3, a0
+	l32i	a4, a3, 4
+	l32i	a3, a3, 0
+	add	a2, a2, a4
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENDPROC(coprocessor_flush)
+
+ENTRY(coprocessor_restore)
+	entry	a1, 32
+	s32i	a0, a1, 0
+	movi	a0, .Lload_cp_regs_jump_table
+	addx4	a3, a3, a0
+	l32i	a4, a3, 4
+	l32i	a3, a3, 0
+	add	a2, a2, a4
+	beqz	a3, 1f
+	add	a0, a0, a3
+	callx0	a0
+1:	l32i	a0, a1, 0
+	retw
+
+ENDPROC(coprocessor_restore)
+
+/*
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_coprocessor_double)
+
+	wsr	a0, excsave1
+	call0	unrecoverable_exception
+
+ENDPROC(fast_coprocessor_double)
+
+ENTRY(fast_coprocessor)
+
+	/* Save remaining registers a1-a3 and SAR */
+
+	s32i	a3, a2, PT_AREG3
+	rsr	a3, sar
+	s32i	a1, a2, PT_AREG1
+	s32i	a3, a2, PT_SAR
+	mov	a1, a2
+	rsr	a2, depc
+	s32i	a2, a1, PT_AREG2
+
+	/*
+	 * The hal macros require up to 4 temporary registers. We use a3..a6.
+	 */
+
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+
+	/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+	rsr	a3, exccause
+	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+	/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+	ssl	a3			# SAR: 32 - coprocessor_number
+	movi	a2, 1
+	rsr	a0, cpenable
+	sll	a2, a2
+	or	a0, a0, a2
+	wsr	a0, cpenable
+	rsync
+
+	/* Retrieve previous owner. (a3 still holds CP number) */
+
+	movi	a0, coprocessor_owner	# list of owners
+	addx4	a0, a3, a0		# entry for CP
+	l32i	a4, a0, 0
+
+	beqz	a4, 1f			# skip 'save' if no previous owner
+
+	/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+	l32i	a5, a4, THREAD_CPENABLE
+	xor	a5, a5, a2		# (1 << cp-id) still in a2
+	s32i	a5, a4, THREAD_CPENABLE
+
+	/*
+	 * Get context save area and 'call' save routine. 
+	 * (a4 still holds previous owner (thread_info), a3 CP number)
+	 */
+
+	movi	a5, .Lsave_cp_regs_jump_table
+	movi	a0, 2f			# a0: 'return' address
+	addx8	a3, a3, a5		# a3: coprocessor number
+	l32i	a2, a3, 4		# a2: xtregs offset
+	l32i	a3, a3, 0		# a3: jump offset
+	add	a2, a2, a4
+	add	a4, a3, a5		# a4: address of save routine
+	jx	a4
+
+	/* Note that only a0 and a1 were preserved. */
+
+2:	rsr	a3, exccause
+	addi	a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+	movi	a0, coprocessor_owner
+	addx4	a0, a3, a0
+
+	/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+
+1:	GET_THREAD_INFO (a4, a1)
+	s32i	a4, a0, 0
+
+	/* Get context save area and 'call' load routine. */
+
+	movi	a5, .Lload_cp_regs_jump_table
+	movi	a0, 1f
+	addx8	a3, a3, a5
+	l32i	a2, a3, 4		# a2: xtregs offset
+	l32i	a3, a3, 0		# a3: jump offset
+	add	a2, a2, a4
+	add	a4, a3, a5
+	jx	a4
+
+	/* Restore all registers and return from exception handler. */
+
+1:	l32i	a6, a1, PT_AREG6
+	l32i	a5, a1, PT_AREG5
+	l32i	a4, a1, PT_AREG4
+
+	l32i	a0, a1, PT_SAR
+	l32i	a3, a1, PT_AREG3
+	l32i	a2, a1, PT_AREG2
+	wsr	a0, sar
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+
+	rfe
+
+ENDPROC(fast_coprocessor)
+
+	.data
+
+ENTRY(coprocessor_owner)
+
+	.fill XCHAL_CP_MAX, 4, 0
+
+END(coprocessor_owner)
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 0000000..9cbc380
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,2047 @@
+/*
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+#include <variant/tie-asm.h>
+
+/* Unimplemented features. */
+
+#undef KERNEL_STACK_OVERFLOW_CHECK
+
+/* Not well tested.
+ *
+ * - fast_coprocessor
+ */
+
+/*
+ * Macro to find first bit set in WINDOWBASE from the left + 1
+ *
+ * 100....0 -> 1
+ * 010....0 -> 2
+ * 000....1 -> WSBITS
+ */
+
+	.macro ffs_ws bit mask
+
+#if XCHAL_HAVE_NSA
+	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
+	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
+#else
+	movi    \bit, WSBITS
+#if WSBITS > 16
+	_bltui  \mask, 0x10000, 99f
+	addi    \bit, \bit, -16
+	extui   \mask, \mask, 16, 16
+#endif
+#if WSBITS > 8
+99:	_bltui  \mask, 0x100, 99f
+	addi    \bit, \bit, -8
+	srli    \mask, \mask, 8
+#endif
+99:	_bltui  \mask, 0x10, 99f
+	addi    \bit, \bit, -4
+	srli    \mask, \mask, 4
+99:	_bltui  \mask, 0x4, 99f
+	addi    \bit, \bit, -2
+	srli    \mask, \mask, 2
+99:	_bltui  \mask, 0x2, 99f
+	addi    \bit, \bit, -1
+99:
+
+#endif
+	.endm
+
+
+	.macro	irq_save flags tmp
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+	rsr	\flags, ps
+	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	bgei	\tmp, LOCKLEVEL, 99f
+	rsil	\tmp, LOCKLEVEL
+99:
+#else
+	movi	\tmp, LOCKLEVEL
+	rsr	\flags, ps
+	or	\flags, \flags, \tmp
+	xsr	\flags, ps
+	rsync
+#endif
+#else
+	rsil	\flags, LOCKLEVEL
+#endif
+	.endm
+
+/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
+
+/*
+ * First-level exception handler for user exceptions.
+ * Save some special registers, extra states and all registers in the AR
+ * register file that were in use in the user task, and jump to the common
+ * exception code.
+ * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
+ * save them for kernel exceptions).
+ *
+ * Entry condition for user_exception:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original value in depc
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _user_exception:
+ *
+ *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ *   excsave has been restored, and
+ *   stack pointer (a1) has been set.
+ *
+ * Note: _user_exception might be at an odd address. Don't use call0..call12
+ */
+	.literal_position
+
+ENTRY(user_exception)
+
+	/* Save a1, a2, a3, and set SP. */
+
+	rsr	a0, depc
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	mov	a1, a2
+
+	.globl _user_exception
+_user_exception:
+
+	/* Save SAR and turn off single stepping */
+
+	movi	a2, 0
+	wsr	a2, depc		# terminate user stack trace with 0
+	rsr	a3, sar
+	xsr	a2, icountlevel
+	s32i	a3, a1, PT_SAR
+	s32i	a2, a1, PT_ICOUNTLEVEL
+
+#if XCHAL_HAVE_THREADPTR
+	rur	a2, threadptr
+	s32i	a2, a1, PT_THREADPTR
+#endif
+
+	/* Rotate ws so that the current windowbase is at bit0. */
+	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+	rsr	a2, windowbase
+	rsr	a3, windowstart
+	ssr	a2
+	s32i	a2, a1, PT_WINDOWBASE
+	s32i	a3, a1, PT_WINDOWSTART
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2
+	srli	a2, a2, 32-WSBITS
+	s32i	a2, a1, PT_WMASK	# needed for restoring registers
+
+	/* Save only live registers. */
+
+	_bbsi.l	a2, 1, 1f
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+	s32i	a7, a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	s32i	a8, a1, PT_AREG8
+	s32i	a9, a1, PT_AREG9
+	s32i	a10, a1, PT_AREG10
+	s32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	s32i	a12, a1, PT_AREG12
+	s32i	a13, a1, PT_AREG13
+	s32i	a14, a1, PT_AREG14
+	s32i	a15, a1, PT_AREG15
+	_bnei	a2, 1, 1f		# only one valid frame?
+
+	/* Only one valid frame, skip saving regs. */
+
+	j	2f
+
+	/* Save the remaining registers.
+	 * We have to save all registers up to the first '1' from
+	 * the right, except the current frame (bit 0).
+	 * Assume a2 is:  001001000110001
+	 * All register frames starting from the top field to the marked '1'
+	 * must be saved.
+	 */
+
+1:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
+	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
+	and	a3, a3, a2		# max. only one bit is set
+
+	/* Find number of frames to save */
+
+	ffs_ws	a0, a3			# number of frames to the '1' from left
+
+	/* Store information into WMASK:
+	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
+	 * bits 4...: number of valid 4-register frames
+	 */
+
+	slli	a3, a0, 4		# number of frames to save in bits 8..4
+	extui	a2, a2, 0, 4		# mask for the first 16 registers
+	or	a2, a3, a2
+	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
+
+	/* Save 4 registers at a time */
+
+1:	rotw	-1
+	s32i	a0, a5, PT_AREG_END - 16
+	s32i	a1, a5, PT_AREG_END - 12
+	s32i	a2, a5, PT_AREG_END - 8
+	s32i	a3, a5, PT_AREG_END - 4
+	addi	a0, a4, -1
+	addi	a1, a5, -16
+	_bnez	a0, 1b
+
+	/* WINDOWBASE still in SAR! */
+
+	rsr	a2, sar			# original WINDOWBASE
+	movi	a3, 1
+	ssl	a2
+	sll	a3, a3
+	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
+	wsr	a2, windowbase		# and WINDOWSTART
+	rsync
+
+	/* We are back to the original stack pointer (a1) */
+
+2:	/* Now, jump to the common exception handler. */
+
+	j	common_exception
+
+ENDPROC(user_exception)
+
+/*
+ * First-level exit handler for kernel exceptions
+ * Save special registers and the live window frame.
+ * Note: Even though we changes the stack pointer, we don't have to do a
+ *	 MOVSP here, as we do that when we return from the exception.
+ *	 (See comment in the kernel exception exit code)
+ *
+ * Entry condition for kernel_exception:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _kernel_exception:
+ *
+ *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ *   excsave has been restored, and
+ *   stack pointer (a1) has been set.
+ *
+ * Note: _kernel_exception might be at an odd address. Don't use call0..call12
+ */
+
+ENTRY(kernel_exception)
+
+	/* Save a1, a2, a3, and set SP. */
+
+	rsr	a0, depc		# get a2
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_AREG2
+	s32i	a3, a2, PT_AREG3
+	mov	a1, a2
+
+	.globl _kernel_exception
+_kernel_exception:
+
+	/* Save SAR and turn off single stepping */
+
+	movi	a2, 0
+	rsr	a3, sar
+	xsr	a2, icountlevel
+	s32i	a3, a1, PT_SAR
+	s32i	a2, a1, PT_ICOUNTLEVEL
+
+	/* Rotate ws so that the current windowbase is at bit0. */
+	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+	rsr	a2, windowbase		# don't need to save these, we only
+	rsr	a3, windowstart		# need shifted windowstart: windowmask
+	ssr	a2
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2
+	srli	a2, a2, 32-WSBITS
+	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
+
+	/* Save only the live window-frame */
+
+	_bbsi.l	a2, 1, 1f
+	s32i	a4, a1, PT_AREG4
+	s32i	a5, a1, PT_AREG5
+	s32i	a6, a1, PT_AREG6
+	s32i	a7, a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	s32i	a8, a1, PT_AREG8
+	s32i	a9, a1, PT_AREG9
+	s32i	a10, a1, PT_AREG10
+	s32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	s32i	a12, a1, PT_AREG12
+	s32i	a13, a1, PT_AREG13
+	s32i	a14, a1, PT_AREG14
+	s32i	a15, a1, PT_AREG15
+
+	_bnei	a2, 1, 1f
+
+	/* Copy spill slots of a0 and a1 to imitate movsp
+	 * in order to keep exception stack continuous
+	 */
+	l32i	a3, a1, PT_SIZE
+	l32i	a0, a1, PT_SIZE + 4
+	s32e	a3, a1, -16
+	s32e	a0, a1, -12
+1:
+	l32i	a0, a1, PT_AREG0	# restore saved a0
+	wsr	a0, depc
+
+#ifdef KERNEL_STACK_OVERFLOW_CHECK
+
+	/*  Stack overflow check, for debugging  */
+	extui	a2, a1, TASK_SIZE_BITS,XX
+	movi	a3, SIZE??
+	_bge	a2, a3, out_of_stack_panic
+
+#endif
+
+/*
+ * This is the common exception handler.
+ * We get here from the user exception handler or simply by falling through
+ * from the kernel exception handler.
+ * Save the remaining special registers, switch to kernel mode, and jump
+ * to the second-level exception handler.
+ *
+ */
+
+common_exception:
+
+	/* Save some registers, disable loops and clear the syscall flag. */
+
+	rsr	a2, debugcause
+	rsr	a3, epc1
+	s32i	a2, a1, PT_DEBUGCAUSE
+	s32i	a3, a1, PT_PC
+
+	movi	a2, -1
+	rsr	a3, excvaddr
+	s32i	a2, a1, PT_SYSCALL
+	movi	a2, 0
+	s32i	a3, a1, PT_EXCVADDR
+#if XCHAL_HAVE_LOOPS
+	xsr	a2, lcount
+	s32i	a2, a1, PT_LCOUNT
+#endif
+
+	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+	rsr	a2, exccause
+	movi	a3, 0
+	rsr	a0, excsave1
+	s32i	a2, a1, PT_EXCCAUSE
+	s32i	a3, a0, EXC_TABLE_FIXUP
+
+	/* All unrecoverable states are saved on stack, now, and a1 is valid.
+	 * Now we can allow exceptions again. In case we've got an interrupt
+	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
+	 * otherwise it's left unchanged.
+	 *
+	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+	 */
+
+	rsr	a3, ps
+	s32i	a3, a1, PT_PS		# save ps
+
+#if XTENSA_FAKE_NMI
+	/* Correct PS needs to be saved in the PT_PS:
+	 * - in case of exception or level-1 interrupt it's in the PS,
+	 *   and is already saved.
+	 * - in case of medium level interrupt it's in the excsave2.
+	 */
+	movi	a0, EXCCAUSE_MAPPED_NMI
+	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	beq	a2, a0, .Lmedium_level_irq
+	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
+	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
+
+.Lmedium_level_irq:
+	rsr	a0, excsave2
+	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
+	bgei	a3, LOCKLEVEL, .Lexception
+
+.Llevel1_irq:
+	movi	a3, LOCKLEVEL
+
+.Lexception:
+	movi	a0, 1 << PS_WOE_BIT
+	or	a3, a3, a0
+#else
+	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
+	movi	a0, LOCKLEVEL
+	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+					# a3 = PS.INTLEVEL
+	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
+	movi	a2, 1 << PS_WOE_BIT
+	or	a3, a3, a2
+	rsr	a2, exccause
+#endif
+
+	/* restore return address (or 0 if return to userspace) */
+	rsr	a0, depc
+	wsr	a3, ps
+	rsync				# PS.WOE => rsync => overflow
+
+	/* Save lbeg, lend */
+#if XCHAL_HAVE_LOOPS
+	rsr	a4, lbeg
+	rsr	a3, lend
+	s32i	a4, a1, PT_LBEG
+	s32i	a3, a1, PT_LEND
+#endif
+
+	/* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+	rsr     a3, scompare1
+	s32i    a3, a1, PT_SCOMPARE1
+#endif
+
+	/* Save optional registers. */
+
+	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+	
+	/* Go to second-level dispatcher. Set up parameters to pass to the
+	 * exception handler and call the exception handler.
+	 */
+
+	rsr	a4, excsave1
+	mov	a6, a1			# pass stack frame
+	mov	a7, a2			# pass EXCCAUSE
+	addx4	a4, a2, a4
+	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
+
+	/* Call the second-level handler */
+
+	callx4	a4
+
+	/* Jump here for exception exit */
+	.global common_exception_return
+common_exception_return:
+
+#if XTENSA_FAKE_NMI
+	l32i	a2, a1, PT_EXCCAUSE
+	movi	a3, EXCCAUSE_MAPPED_NMI
+	beq	a2, a3, .LNMIexit
+#endif
+1:
+	irq_save a2, a3
+#ifdef CONFIG_TRACE_IRQFLAGS
+	call4	trace_hardirqs_off
+#endif
+
+	/* Jump if we are returning from kernel exceptions. */
+
+	l32i	a3, a1, PT_PS
+	GET_THREAD_INFO(a2, a1)
+	l32i	a4, a2, TI_FLAGS
+	_bbci.l	a3, PS_UM_BIT, 6f
+
+	/* Specific to a user exception exit:
+	 * We need to check some flags for signal handling and rescheduling,
+	 * and have to restore WB and WS, extra states, and all registers
+	 * in the register file that were in use in the user task.
+	 * Note that we don't disable interrupts here. 
+	 */
+
+	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
+	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
+	_bbci.l	a4, TIF_SIGPENDING, 5f
+
+2:	l32i	a4, a1, PT_DEPC
+	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+
+	/* Call do_signal() */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	call4	trace_hardirqs_on
+#endif
+	rsil	a2, 0
+	mov	a6, a1
+	call4	do_notify_resume	# int do_notify_resume(struct pt_regs*)
+	j	1b
+
+3:	/* Reschedule */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	call4	trace_hardirqs_on
+#endif
+	rsil	a2, 0
+	call4	schedule	# void schedule (void)
+	j	1b
+
+#ifdef CONFIG_PREEMPT
+6:
+	_bbci.l	a4, TIF_NEED_RESCHED, 4f
+
+	/* Check current_thread_info->preempt_count */
+
+	l32i	a4, a2, TI_PRE_COUNT
+	bnez	a4, 4f
+	call4	preempt_schedule_irq
+	j	1b
+#endif
+
+#if XTENSA_FAKE_NMI
+.LNMIexit:
+	l32i	a3, a1, PT_PS
+	_bbci.l	a3, PS_UM_BIT, 4f
+#endif
+
+5:
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	_bbci.l	a4, TIF_DB_DISABLED, 7f
+	call4	restore_dbreak
+7:
+#endif
+#ifdef CONFIG_DEBUG_TLB_SANITY
+	l32i	a4, a1, PT_DEPC
+	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+	call4	check_tlb_sanity
+#endif
+6:
+4:
+#ifdef CONFIG_TRACE_IRQFLAGS
+	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	bgei	a4, LOCKLEVEL, 1f
+	call4	trace_hardirqs_on
+1:
+#endif
+	/* Restore optional registers. */
+
+	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+
+	/* Restore SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+	l32i    a2, a1, PT_SCOMPARE1
+	wsr     a2, scompare1
+#endif
+	wsr	a3, ps		/* disable interrupts */
+
+	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
+
+user_exception_exit:
+
+	/* Restore the state of the task and return from the exception. */
+
+	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
+
+	l32i	a2, a1, PT_WINDOWBASE
+	l32i	a3, a1, PT_WINDOWSTART
+	wsr	a1, depc		# use DEPC as temp storage
+	wsr	a3, windowstart		# restore WINDOWSTART
+	ssr	a2			# preserve user's WB in the SAR
+	wsr	a2, windowbase		# switch to user's saved WB
+	rsync
+	rsr	a1, depc		# restore stack pointer
+	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
+	rotw	-1			# we restore a4..a7
+	_bltui	a6, 16, 1f		# only have to restore current window?
+
+	/* The working registers are a0 and a3.  We are restoring to
+	 * a4..a7.  Be careful not to destroy what we have just restored.
+	 * Note: wmask has the format YYYYM:
+	 *       Y: number of registers saved in groups of 4
+	 *       M: 4 bit mask of first 16 registers
+	 */
+
+	mov	a2, a6
+	mov	a3, a5
+
+2:	rotw	-1			# a0..a3 become a4..a7
+	addi	a3, a7, -4*4		# next iteration
+	addi	a2, a6, -16		# decrementing Y in WMASK
+	l32i	a4, a3, PT_AREG_END + 0
+	l32i	a5, a3, PT_AREG_END + 4
+	l32i	a6, a3, PT_AREG_END + 8
+	l32i	a7, a3, PT_AREG_END + 12
+	_bgeui	a2, 16, 2b
+
+	/* Clear unrestored registers (don't leak anything to user-land */
+
+1:	rsr	a0, windowbase
+	rsr	a3, sar
+	sub	a3, a0, a3
+	beqz	a3, 2f
+	extui	a3, a3, 0, WBBITS
+
+1:	rotw	-1
+	addi	a3, a7, -1
+	movi	a4, 0
+	movi	a5, 0
+	movi	a6, 0
+	movi	a7, 0
+	bgei	a3, 1, 1b
+
+	/* We are back were we were when we started.
+	 * Note: a2 still contains WMASK (if we've returned to the original
+	 *	 frame where we had loaded a2), or at least the lower 4 bits
+	 *	 (if we have restored WSBITS-1 frames).
+	 */
+
+2:
+#if XCHAL_HAVE_THREADPTR
+	l32i	a3, a1, PT_THREADPTR
+	wur	a3, threadptr
+#endif
+
+	j	common_exception_exit
+
+	/* This is the kernel exception exit.
+	 * We avoided to do a MOVSP when we entered the exception, but we
+	 * have to do it here.
+	 */
+
+kernel_exception_exit:
+
+	/* Check if we have to do a movsp.
+	 *
+	 * We only have to do a movsp if the previous window-frame has
+	 * been spilled to the *temporary* exception stack instead of the
+	 * task's stack. This is the case if the corresponding bit in
+	 * WINDOWSTART for the previous window-frame was set before
+	 * (not spilled) but is zero now (spilled).
+	 * If this bit is zero, all other bits except the one for the
+	 * current window frame are also zero. So, we can use a simple test:
+	 * 'and' WINDOWSTART and WINDOWSTART-1:
+	 *
+	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
+	 *
+	 * The result is zero only if one bit was set.
+	 *
+	 * (Note: We might have gone through several task switches before
+	 *        we come back to the current task, so WINDOWBASE might be
+	 *        different from the time the exception occurred.)
+	 */
+
+	/* Test WINDOWSTART before and after the exception.
+	 * We actually have WMASK, so we only have to test if it is 1 or not.
+	 */
+
+	l32i	a2, a1, PT_WMASK
+	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
+
+	/* Test WINDOWSTART now. If spilled, do the movsp */
+
+	rsr     a3, windowstart
+	addi	a0, a3, -1
+	and     a3, a3, a0
+	_bnez	a3, common_exception_exit
+
+	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
+
+	addi    a0, a1, -16
+	l32i    a3, a0, 0
+	l32i    a4, a0, 4
+	s32i    a3, a1, PT_SIZE+0
+	s32i    a4, a1, PT_SIZE+4
+	l32i    a3, a0, 8
+	l32i    a4, a0, 12
+	s32i    a3, a1, PT_SIZE+8
+	s32i    a4, a1, PT_SIZE+12
+
+	/* Common exception exit.
+	 * We restore the special register and the current window frame, and
+	 * return from the exception.
+	 *
+	 * Note: We expect a2 to hold PT_WMASK
+	 */
+
+common_exception_exit:
+
+	/* Restore address registers. */
+
+	_bbsi.l	a2, 1, 1f
+	l32i	a4,  a1, PT_AREG4
+	l32i	a5,  a1, PT_AREG5
+	l32i	a6,  a1, PT_AREG6
+	l32i	a7,  a1, PT_AREG7
+	_bbsi.l	a2, 2, 1f
+	l32i	a8,  a1, PT_AREG8
+	l32i	a9,  a1, PT_AREG9
+	l32i	a10, a1, PT_AREG10
+	l32i	a11, a1, PT_AREG11
+	_bbsi.l	a2, 3, 1f
+	l32i	a12, a1, PT_AREG12
+	l32i	a13, a1, PT_AREG13
+	l32i	a14, a1, PT_AREG14
+	l32i	a15, a1, PT_AREG15
+
+	/* Restore PC, SAR */
+
+1:	l32i	a2, a1, PT_PC
+	l32i	a3, a1, PT_SAR
+	wsr	a2, epc1
+	wsr	a3, sar
+
+	/* Restore LBEG, LEND, LCOUNT */
+#if XCHAL_HAVE_LOOPS
+	l32i	a2, a1, PT_LBEG
+	l32i	a3, a1, PT_LEND
+	wsr	a2, lbeg
+	l32i	a2, a1, PT_LCOUNT
+	wsr	a3, lend
+	wsr	a2, lcount
+#endif
+
+	/* We control single stepping through the ICOUNTLEVEL register. */
+
+	l32i	a2, a1, PT_ICOUNTLEVEL
+	movi	a3, -2
+	wsr	a2, icountlevel
+	wsr	a3, icount
+
+	/* Check if it was double exception. */
+
+	l32i	a0, a1, PT_DEPC
+	l32i	a3, a1, PT_AREG3
+	l32i	a2, a1, PT_AREG2
+	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	/* Restore a0...a3 and return */
+
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+	rfe
+
+1: 	wsr	a0, depc
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+	rfde
+
+ENDPROC(kernel_exception)
+
+/*
+ * Debug exception handler.
+ *
+ * Currently, we don't support KGDB, so only user application can be debugged.
+ *
+ * When we get here,  a0 is trashed and saved to excsave[debuglevel]
+ */
+
+	.literal_position
+
+ENTRY(debug_exception)
+
+	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
+	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
+
+	/* Set EPC1 and EXCCAUSE */
+
+	wsr	a2, depc		# save a2 temporarily
+	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
+	wsr	a2, epc1
+
+	movi	a2, EXCCAUSE_MAPPED_DEBUG
+	wsr	a2, exccause
+
+	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
+
+	movi	a2, 1 << PS_EXCM_BIT
+	or	a2, a0, a2
+	wsr	a2, ps
+
+	/* Switch to kernel/user stack, restore jump vector, and save a0 */
+
+	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
+
+	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
+3:
+	l32i	a0, a3, DT_DEBUG_SAVE
+	s32i	a1, a2, PT_AREG1
+	s32i	a0, a2, PT_AREG0
+	movi	a0, 0
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+	xsr	a0, depc
+	s32i	a3, a2, PT_AREG3
+	s32i	a0, a2, PT_AREG2
+	mov	a1, a2
+
+	/* Debug exception is handled as an exception, so interrupts will
+	 * likely be enabled in the common exception handler. Disable
+	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
+	 * meaning.
+	 */
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
+	GET_THREAD_INFO(a2, a1)
+	l32i	a3, a2, TI_PRE_COUNT
+	addi	a3, a3, 1
+	s32i	a3, a2, TI_PRE_COUNT
+#endif
+
+	rsr	a2, ps
+	bbsi.l	a2, PS_UM_BIT, _user_exception
+	j	_kernel_exception
+
+2:	rsr	a2, excsave1
+	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
+	j	3b
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	/* Debug exception while in exception mode. This may happen when
+	 * window overflow/underflow handler or fast exception handler hits
+	 * data breakpoint, in which case save and disable all data
+	 * breakpoints, single-step faulting instruction and restore data
+	 * breakpoints.
+	 */
+1:
+	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
+
+	rsr	a0, debugcause
+	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
+
+	.set	_index, 0
+	.rept	XCHAL_NUM_DBREAK
+	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
+	wsr	a0, SREG_DBREAKC + _index
+	.set	_index, _index + 1
+	.endr
+
+	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
+	wsr	a0, icountlevel
+
+	l32i	a0, a3, DT_ICOUNT_SAVE
+	xsr	a0, icount
+
+	l32i	a0, a3, DT_DEBUG_SAVE
+	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+	rfi	XCHAL_DEBUGLEVEL
+
+.Ldebug_save_dbreak:
+	.set	_index, 0
+	.rept	XCHAL_NUM_DBREAK
+	movi	a0, 0
+	xsr	a0, SREG_DBREAKC + _index
+	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
+	.set	_index, _index + 1
+	.endr
+
+	movi	a0, XCHAL_EXCM_LEVEL + 1
+	xsr	a0, icountlevel
+	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
+
+	movi	a0, 0xfffffffe
+	xsr	a0, icount
+	s32i	a0, a3, DT_ICOUNT_SAVE
+
+	l32i	a0, a3, DT_DEBUG_SAVE
+	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+	rfi	XCHAL_DEBUGLEVEL
+#else
+	/* Debug exception while in exception mode. Should not happen. */
+1:	j	1b	// FIXME!!
+#endif
+
+ENDPROC(debug_exception)
+
+/*
+ * We get here in case of an unrecoverable exception.
+ * The only thing we can do is to be nice and print a panic message.
+ * We only produce a single stack frame for panic, so ???
+ *
+ *
+ * Entry conditions:
+ *
+ *   - a0 contains the caller address; original value saved in excsave1.
+ *   - the original a0 contains a valid return address (backtrace) or 0.
+ *   - a2 contains a valid stackpointer
+ *
+ * Notes:
+ *
+ *   - If the stack pointer could be invalid, the caller has to setup a
+ *     dummy stack pointer (e.g. the stack of the init_task)
+ *
+ *   - If the return address could be invalid, the caller has to set it
+ *     to 0, so the backtrace would stop.
+ *
+ */
+	.align 4
+unrecoverable_text:
+	.ascii "Unrecoverable error in exception handler\0"
+
+	.literal_position
+
+ENTRY(unrecoverable_exception)
+
+	movi	a0, 1
+	movi	a1, 0
+
+	wsr	a0, windowstart
+	wsr	a1, windowbase
+	rsync
+
+	movi	a1, (1 << PS_WOE_BIT) | LOCKLEVEL
+	wsr	a1, ps
+	rsync
+
+	movi	a1, init_task
+	movi	a0, 0
+	addi	a1, a1, PT_REGS_OFFSET
+
+	movi	a6, unrecoverable_text
+	call4	panic
+
+1:	j	1b
+
+ENDPROC(unrecoverable_exception)
+
+/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+/*
+ * Fast-handler for alloca exceptions
+ *
+ *  The ALLOCA handler is entered when user code executes the MOVSP
+ *  instruction and the caller's frame is not in the register file.
+ *
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * excpetion handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the the partial windeowUnderflow will be handeled in the double exception
+ * handler.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_alloca)
+	rsr	a0, windowbase
+	rotw	-1
+	rsr	a2, ps
+	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+	xor	a3, a3, a4
+	l32i	a4, a6, PT_AREG0
+	l32i	a1, a6, PT_DEPC
+	rsr	a6, depc
+	wsr	a1, depc
+	slli	a3, a3, PS_OWB_SHIFT
+	xor	a2, a2, a3
+	wsr	a2, ps
+	rsync
+
+	_bbci.l	a4, 31, 4f
+	rotw	-1
+	_bbci.l	a8, 30, 8f
+	rotw	-1
+	j	_WindowUnderflow12
+8:	j	_WindowUnderflow8
+4:	j	_WindowUnderflow4
+ENDPROC(fast_alloca)
+
+/*
+ * fast system calls.
+ *
+ * WARNING:  The kernel doesn't save the entire user context before
+ * handling a fast system call.  These functions are small and short,
+ * usually offering some functionality not available to user tasks.
+ *
+ * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ */
+
+ENTRY(fast_syscall_kernel)
+
+	/* Skip syscall. */
+
+	rsr	a0, epc1
+	addi	a0, a0, 3
+	wsr	a0, epc1
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+	rsr	a0, depc			# get syscall-nr
+	_beqz	a0, fast_syscall_spill_registers
+	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
+
+	j	kernel_exception
+
+ENDPROC(fast_syscall_kernel)
+
+ENTRY(fast_syscall_user)
+
+	/* Skip syscall. */
+
+	rsr	a0, epc1
+	addi	a0, a0, 3
+	wsr	a0, epc1
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+	rsr	a0, depc			# get syscall-nr
+	_beqz	a0, fast_syscall_spill_registers
+	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
+
+	j	user_exception
+
+ENDPROC(fast_syscall_user)
+
+ENTRY(fast_syscall_unrecoverable)
+
+	/* Restore all states. */
+
+	l32i    a0, a2, PT_AREG0        # restore a0
+	xsr     a2, depc                # restore a2, depc
+
+	wsr     a0, excsave1
+	call0	unrecoverable_exception
+
+ENDPROC(fast_syscall_unrecoverable)
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ *        a2            a6                   a3    a4      a5
+ *
+ * Entry condition:
+ *
+ *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in a0 and DEPC
+ *   a3:	a3
+ *   a4..a15:	unchanged
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: we don't have to save a2; a2 holds the return value
+ */
+
+	.literal_position
+
+#ifdef CONFIG_FAST_SYSCALL_XTENSA
+
+ENTRY(fast_syscall_xtensa)
+
+	s32i	a7, a2, PT_AREG7	# we need an additional register
+	movi	a7, 4			# sizeof(unsigned int)
+	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
+
+	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
+	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
+
+	/* Fall through for ATOMIC_CMP_SWP. */
+
+.Lswp:	/* Atomic compare and swap */
+
+EX(.Leac) l32i	a0, a3, 0		# read old value
+	bne	a0, a4, 1f		# same as old value? jump
+EX(.Leac) s32i	a5, a3, 0		# different, modify value
+	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, 1			# and return 1
+	rfe
+
+1:	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, 0			# return 0 (note that we cannot set
+	rfe
+
+.Lnswp:	/* Atomic set, add, and exg_add. */
+
+EX(.Leac) l32i	a7, a3, 0		# orig
+	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
+	add	a0, a4, a7		# + arg
+	moveqz	a0, a4, a6		# set
+	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
+EX(.Leac) s32i	a0, a3, 0		# write new value
+
+	mov	a0, a2
+	mov	a2, a7
+	l32i	a7, a0, PT_AREG7	# restore a7
+	l32i	a0, a0, PT_AREG0	# restore a0
+	rfe
+
+.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, -EFAULT
+	rfe
+
+.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
+	l32i	a0, a2, PT_AREG0	# restore a0
+	movi	a2, -EINVAL
+	rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#else /* CONFIG_FAST_SYSCALL_XTENSA */
+
+ENTRY(fast_syscall_xtensa)
+
+	l32i    a0, a2, PT_AREG0        # restore a0
+	movi	a2, -ENOSYS
+	rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#endif /* CONFIG_FAST_SYSCALL_XTENSA */
+
+
+/* fast_syscall_spill_registers.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
+ */
+
+#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
+
+ENTRY(fast_syscall_spill_registers)
+
+	/* Register a FIXUP handler (pass current wb as a parameter) */
+
+	xsr	a3, excsave1
+	movi	a0, fast_syscall_spill_registers_fixup
+	s32i	a0, a3, EXC_TABLE_FIXUP
+	rsr	a0, windowbase
+	s32i	a0, a3, EXC_TABLE_PARAM
+	xsr	a3, excsave1		# restore a3 and excsave_1
+
+	/* Save a3, a4 and SAR on stack. */
+
+	rsr	a0, sar
+	s32i	a3, a2, PT_AREG3
+	s32i	a0, a2, PT_SAR
+
+	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+
+	s32i	a4, a2, PT_AREG4
+	s32i	a7, a2, PT_AREG7
+	s32i	a8, a2, PT_AREG8
+	s32i	a11, a2, PT_AREG11
+	s32i	a12, a2, PT_AREG12
+	s32i	a15, a2, PT_AREG15
+
+	/*
+	 * Rotate ws so that the current windowbase is at bit 0.
+	 * Assume ws = xxxwww1yy (www1 current window frame).
+	 * Rotate ws right so that a4 = yyxxxwww1.
+	 */
+
+	rsr	a0, windowbase
+	rsr	a3, windowstart		# a3 = xxxwww1yy
+	ssr	a0			# holds WB
+	slli	a0, a3, WSBITS
+	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
+	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
+
+	/* We are done if there are no more than the current register frame. */
+
+	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
+	movi	a0, (1 << (WSBITS-1))
+	_beqz	a3, .Lnospill		# only one active frame? jump
+
+	/* We want 1 at the top, so that we return to the current windowbase */
+
+	or	a3, a3, a0		# 1yyxxxwww
+
+	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+	wsr	a3, windowstart		# save shifted windowstart
+	neg	a0, a3
+	and	a3, a0, a3		# first bit set from right: 000010000
+
+	ffs_ws	a0, a3			# a0: shifts to skip empty frames
+	movi	a3, WSBITS
+	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
+	ssr	a0			# save in SAR for later.
+
+	rsr	a3, windowbase
+	add	a3, a3, a0
+	wsr	a3, windowbase
+	rsync
+
+	rsr	a3, windowstart
+	srl	a3, a3			# shift windowstart
+
+	/* WB is now just one frame below the oldest frame in the register
+	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
+	   and WS differ by one 4-register frame. */
+
+	/* Save frames. Depending what call was used (call4, call8, call12),
+	 * we have to save 4,8. or 12 registers.
+	 */
+
+
+.Lloop: _bbsi.l	a3, 1, .Lc4
+	_bbci.l	a3, 2, .Lc12
+
+.Lc8:	s32e	a4, a13, -16
+	l32e	a4, a5, -12
+	s32e	a8, a4, -32
+	s32e	a5, a13, -12
+	s32e	a6, a13, -8
+	s32e	a7, a13, -4
+	s32e	a9, a4, -28
+	s32e	a10, a4, -24
+	s32e	a11, a4, -20
+	srli	a11, a3, 2		# shift windowbase by 2
+	rotw	2
+	_bnei	a3, 1, .Lloop
+	j	.Lexit
+
+.Lc4:	s32e	a4, a9, -16
+	s32e	a5, a9, -12
+	s32e	a6, a9, -8
+	s32e	a7, a9, -4
+
+	srli	a7, a3, 1
+	rotw	1
+	_bnei	a3, 1, .Lloop
+	j	.Lexit
+
+.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
+
+	/* 12-register frame (call12) */
+
+	l32e	a0, a5, -12
+	s32e	a8, a0, -48
+	mov	a8, a0
+
+	s32e	a9, a8, -44
+	s32e	a10, a8, -40
+	s32e	a11, a8, -36
+	s32e	a12, a8, -32
+	s32e	a13, a8, -28
+	s32e	a14, a8, -24
+	s32e	a15, a8, -20
+	srli	a15, a3, 3
+
+	/* The stack pointer for a4..a7 is out of reach, so we rotate the
+	 * window, grab the stackpointer, and rotate back.
+	 * Alternatively, we could also use the following approach, but that
+	 * makes the fixup routine much more complicated:
+	 * rotw	1
+	 * s32e	a0, a13, -16
+	 * ...
+	 * rotw 2
+	 */
+
+	rotw	1
+	mov	a4, a13
+	rotw	-1
+
+	s32e	a4, a8, -16
+	s32e	a5, a8, -12
+	s32e	a6, a8, -8
+	s32e	a7, a8, -4
+
+	rotw	3
+
+	_beqi	a3, 1, .Lexit
+	j	.Lloop
+
+.Lexit:
+
+	/* Done. Do the final rotation and set WS */
+
+	rotw	1
+	rsr	a3, windowbase
+	ssl	a3
+	movi	a3, 1
+	sll	a3, a3
+	wsr	a3, windowstart
+.Lnospill:
+
+	/* Advance PC, restore registers and SAR, and return from exception. */
+
+	l32i	a3, a2, PT_SAR
+	l32i	a0, a2, PT_AREG0
+	wsr	a3, sar
+	l32i	a3, a2, PT_AREG3
+
+	/* Restore clobbered registers. */
+
+	l32i	a4, a2, PT_AREG4
+	l32i	a7, a2, PT_AREG7
+	l32i	a8, a2, PT_AREG8
+	l32i	a11, a2, PT_AREG11
+	l32i	a12, a2, PT_AREG12
+	l32i	a15, a2, PT_AREG15
+
+	movi	a2, 0
+	rfe
+
+.Linvalid_mask:
+
+	/* We get here because of an unrecoverable error in the window
+	 * registers, so set up a dummy frame and kill the user application.
+	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+	 */
+
+	movi	a0, 1
+	movi	a1, 0
+
+	wsr	a0, windowstart
+	wsr	a1, windowbase
+	rsync
+
+	movi	a0, 0
+
+	rsr	a3, excsave1
+	l32i	a1, a3, EXC_TABLE_KSTK
+
+	movi	a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+	wsr	a4, ps
+	rsync
+
+	movi	a6, SIGSEGV
+	call4	do_exit
+
+	/* shouldn't return, so panic */
+
+	wsr	a0, excsave1
+	call0	unrecoverable_exception		# should not return
+1:	j	1b
+
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+	rsr	a2, windowbase	# get current windowbase (a2 is saved)
+	xsr	a0, depc	# restore depc and a0
+	ssl	a2		# set shift (32 - WB)
+
+	/* We need to make sure the current registers (a0-a3) are preserved.
+	 * To do this, we simply set the bit for the current window frame
+	 * in WS, so that the exception handlers save them to the task stack.
+	 *
+	 * Note: we use a3 to set the windowbase, so we take a special care
+	 * of it, saving it in the original _spill_registers frame across
+	 * the exception handler call.
+	 */
+
+	xsr	a3, excsave1	# get spill-mask
+	slli	a3, a3, 1	# shift left by one
+	addi	a3, a3, 1	# set the bit for the current window frame
+
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
+	wsr	a2, windowstart	# set corrected windowstart
+
+	srli	a3, a3, 1
+	rsr	a2, excsave1
+	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
+	xsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
+	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
+	xsr	a2, excsave1
+
+	/* Return to the original (user task) WINDOWBASE.
+	 * We leave the following frame behind:
+	 * a0, a1, a2	same
+	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+	 * depc:	depc (we have to return to that address)
+	 * excsave_1:	exctable
+	 */
+
+	wsr	a3, windowbase
+	rsync
+
+	/* We are now in the original frame when we entered _spill_registers:
+	 *  a0: return address
+	 *  a1: used, stack pointer
+	 *  a2: kernel stack pointer
+	 *  a3: available
+	 *  depc: exception address
+	 *  excsave: exctable
+	 * Note: This frame might be the same as above.
+	 */
+
+	/* Setup stack pointer. */
+
+	addi	a2, a2, -PT_USER_SIZE
+	s32i	a0, a2, PT_AREG0
+
+	/* Make sure we return to this fixup handler. */
+
+	movi	a3, fast_syscall_spill_registers_fixup_return
+	s32i	a3, a2, PT_DEPC		# setup depc
+
+	/* Jump to the exception handler. */
+
+	rsr	a3, excsave1
+	rsr	a0, exccause
+	addx4	a0, a0, a3              	# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+	jx	a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+	/* When we return here, all registers have been restored (a2: DEPC) */
+
+	wsr	a2, depc		# exception address
+
+	/* Restore fixup handler. */
+
+	rsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
+	movi	a3, fast_syscall_spill_registers_fixup
+	s32i	a3, a2, EXC_TABLE_FIXUP
+	rsr	a3, windowbase
+	s32i	a3, a2, EXC_TABLE_PARAM
+	l32i	a2, a2, EXC_TABLE_KSTK
+
+	/* Load WB at the time the exception occurred. */
+
+	rsr	a3, sar			# WB is still in SAR
+	neg	a3, a3
+	wsr	a3, windowbase
+	rsync
+
+	rsr	a3, excsave1
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+	rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
+
+#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+ENTRY(fast_syscall_spill_registers)
+
+	l32i    a0, a2, PT_AREG0        # restore a0
+	movi	a2, -ENOSYS
+	rfe
+
+ENDPROC(fast_syscall_spill_registers)
+
+#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+#ifdef CONFIG_MMU
+/*
+ * We should never get here. Bail out!
+ */
+
+ENTRY(fast_second_level_miss_double_kernel)
+
+1:
+	call0	unrecoverable_exception		# should not return
+1:	j	1b
+
+ENDPROC(fast_second_level_miss_double_kernel)
+
+/* First-level entry handler for user, kernel, and double 2nd-level
+ * TLB miss exceptions.  Note that for now, user and kernel miss
+ * exceptions share the same entry point and are handled identically.
+ *
+ * An old, less-efficient C version of this function used to exist.
+ * We include it below, interleaved as comments, for reference.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_second_level_miss)
+
+	/* Save a1 and a3. Note: we don't expect a double exception. */
+
+	s32i	a1, a2, PT_AREG1
+	s32i	a3, a2, PT_AREG3
+
+	/* We need to map the page of PTEs for the user task.  Find
+	 * the pointer to that page.  Also, it's possible for tsk->mm
+	 * to be NULL while tsk->active_mm is nonzero if we faulted on
+	 * a vmalloc address.  In that rare case, we must use
+	 * active_mm instead to avoid a fault in this handler.  See
+	 *
+	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
+	 *   (or search Internet on "mm vs. active_mm")
+	 *
+	 *	if (!mm)
+	 *		mm = tsk->active_mm;
+	 *	pgd = pgd_offset (mm, regs->excvaddr);
+	 *	pmd = pmd_offset (pgd, regs->excvaddr);
+	 *	pmdval = *pmd;
+	 */
+
+	GET_CURRENT(a1,a2)
+	l32i	a0, a1, TASK_MM		# tsk->mm
+	beqz	a0, 9f
+
+8:	rsr	a3, excvaddr		# fault address
+	_PGD_OFFSET(a0, a3, a1)
+	l32i	a0, a0, 0		# read pmdval
+	beqz	a0, 2f
+
+	/* Read ptevaddr and convert to top of page-table page.
+	 *
+	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
+	 * 	vpnval += DTLB_WAY_PGTABLE;
+	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
+	 *	write_dtlb_entry (pteval, vpnval);
+	 *
+	 * The messy computation for 'pteval' above really simplifies
+	 * into the following:
+	 *
+	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
+	 *                 | PAGE_DIRECTORY
+	 */
+
+	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
+	add	a0, a0, a1		# pmdval - PAGE_OFFSET
+	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
+	xor	a0, a0, a1
+
+	movi	a1, _PAGE_DIRECTORY
+	or	a0, a0, a1		# ... | PAGE_DIRECTORY
+
+	/*
+	 * We utilize all three wired-ways (7-9) to hold pmd translations.
+	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
+	 * This allows to map the three most common regions to three different
+	 * DTLBs:
+	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
+	 *  2   -> way 8	shared libaries (2000.0000)
+	 *  3   -> way 0	stack (3000.0000)
+	 */
+
+	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
+	rsr	a1, ptevaddr
+	addx2	a3, a3, a3		# ->			0,3,6,9
+	srli	a1, a1, PAGE_SHIFT
+	extui	a3, a3, 2, 2		# ->			0,0,1,2
+	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
+	addi	a3, a3, DTLB_WAY_PGD
+	add	a1, a1, a3		# ... + way_number
+
+3:	wdtlb	a0, a1
+	dsync
+
+	/* Exit critical section. */
+
+4:	rsr	a3, excsave1
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+
+	/* Restore the working registers, and return. */
+
+	l32i	a0, a2, PT_AREG0
+	l32i	a1, a2, PT_AREG1
+	l32i	a3, a2, PT_AREG3
+	l32i	a2, a2, PT_DEPC
+
+	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	/* Restore excsave1 and return. */
+
+	rsr	a2, depc
+	rfe
+
+	/* Return from double exception. */
+
+1:	xsr	a2, depc
+	esync
+	rfde
+
+9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
+	bnez	a0, 8b
+
+	/* Even more unlikely case active_mm == 0.
+	 * We can get here with NMI in the middle of context_switch that
+	 * touches vmalloc area.
+	 */
+	movi	a0, init_mm
+	j	8b
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+2:	/* Special case for cache aliasing.
+	 * We (should) only get here if a clear_user_page, copy_user_page
+	 * or the aliased cache flush functions got preemptively interrupted 
+	 * by another task. Re-establish temporary mapping to the 
+	 * TLBTEMP_BASE areas.
+	 */
+
+	/* We shouldn't be in a double exception */
+
+	l32i	a0, a2, PT_DEPC
+	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+
+	/* Make sure the exception originated in the special functions */
+
+	movi	a0, __tlbtemp_mapping_start
+	rsr	a3, epc1
+	bltu	a3, a0, 2f
+	movi	a0, __tlbtemp_mapping_end
+	bgeu	a3, a0, 2f
+
+	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
+
+	movi	a3, TLBTEMP_BASE_1
+	rsr	a0, excvaddr
+	bltu	a0, a3, 2f
+
+	addi	a1, a0, -TLBTEMP_SIZE
+	bgeu	a1, a3, 2f
+
+	/* Check if we have to restore an ITLB mapping. */
+
+	movi	a1, __tlbtemp_mapping_itlb
+	rsr	a3, epc1
+	sub	a3, a3, a1
+
+	/* Calculate VPN */
+
+	movi	a1, PAGE_MASK
+	and	a1, a1, a0
+
+	/* Jump for ITLB entry */
+
+	bgez	a3, 1f
+
+	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
+
+	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
+	add	a1, a3, a1
+
+	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
+
+	mov	a0, a6
+	movnez	a0, a7, a3
+	j	3b
+
+	/* ITLB entry. We only use dst in a6. */
+
+1:	witlb	a6, a1
+	isync
+	j	4b
+
+
+#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
+
+
+2:	/* Invalid PGD, default exception handling */
+
+	rsr	a1, depc
+	s32i	a1, a2, PT_AREG2
+	mov	a1, a2
+
+	rsr	a2, ps
+	bbsi.l	a2, PS_UM_BIT, 1f
+	j	_kernel_exception
+1:	j	_user_exception
+
+ENDPROC(fast_second_level_miss)
+
+/*
+ * StoreProhibitedException
+ *
+ * Update the pte and invalidate the itlb mapping for this pte.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ *
+ *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_store_prohibited)
+
+	/* Save a1 and a3. */
+
+	s32i	a1, a2, PT_AREG1
+	s32i	a3, a2, PT_AREG3
+
+	GET_CURRENT(a1,a2)
+	l32i	a0, a1, TASK_MM		# tsk->mm
+	beqz	a0, 9f
+
+8:	rsr	a1, excvaddr		# fault address
+	_PGD_OFFSET(a0, a1, a3)
+	l32i	a0, a0, 0
+	beqz	a0, 2f
+
+	/*
+	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
+	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
+	 */
+
+	_PTE_OFFSET(a0, a1, a3)
+	l32i	a3, a0, 0		# read pteval
+	movi	a1, _PAGE_CA_INVALID
+	ball	a3, a1, 2f
+	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
+
+	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
+	or	a3, a3, a1
+	rsr	a1, excvaddr
+	s32i	a3, a0, 0
+
+	/* We need to flush the cache if we have page coloring. */
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+	dhwb	a0, 0
+#endif
+	pdtlb	a0, a1
+	wdtlb	a3, a0
+
+	/* Exit critical section. */
+
+	movi	a0, 0
+	rsr	a3, excsave1
+	s32i	a0, a3, EXC_TABLE_FIXUP
+
+	/* Restore the working registers, and return. */
+
+	l32i	a3, a2, PT_AREG3
+	l32i	a1, a2, PT_AREG1
+	l32i	a0, a2, PT_AREG0
+	l32i	a2, a2, PT_DEPC
+
+	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+	rsr	a2, depc
+	rfe
+
+	/* Double exception. Restore FIXUP handler and return. */
+
+1:	xsr	a2, depc
+	esync
+	rfde
+
+9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
+	j	8b
+
+2:	/* If there was a problem, handle fault in C */
+
+	rsr	a3, depc	# still holds a2
+	s32i	a3, a2, PT_AREG2
+	mov	a1, a2
+
+	rsr	a2, ps
+	bbsi.l	a2, PS_UM_BIT, 1f
+	j	_kernel_exception
+1:	j	_user_exception
+
+ENDPROC(fast_store_prohibited)
+
+#endif /* CONFIG_MMU */
+
+/*
+ * System Calls.
+ *
+ * void system_call (struct pt_regs* regs, int exccause)
+ *                            a2                 a3
+ */
+	.literal_position
+
+ENTRY(system_call)
+
+	entry	a1, 32
+
+	/* regs->syscall = regs->areg[2] */
+
+	l32i	a3, a2, PT_AREG2
+	mov	a6, a2
+	s32i	a3, a2, PT_SYSCALL
+	call4	do_syscall_trace_enter
+	mov	a3, a6
+
+	/* syscall = sys_call_table[syscall_nr] */
+
+	movi	a4, sys_call_table
+	movi	a5, __NR_syscall_count
+	movi	a6, -ENOSYS
+	bgeu	a3, a5, 1f
+
+	addx4	a4, a3, a4
+	l32i	a4, a4, 0
+	movi	a5, sys_ni_syscall;
+	beq	a4, a5, 1f
+
+	/* Load args: arg0 - arg5 are passed via regs. */
+
+	l32i	a6, a2, PT_AREG6
+	l32i	a7, a2, PT_AREG3
+	l32i	a8, a2, PT_AREG4
+	l32i	a9, a2, PT_AREG5
+	l32i	a10, a2, PT_AREG8
+	l32i	a11, a2, PT_AREG9
+
+	/* Pass one additional argument to the syscall: pt_regs (on stack) */
+	s32i	a2, a1, 0
+
+	callx4	a4
+
+1:	/* regs->areg[2] = return_value */
+
+	s32i	a6, a2, PT_AREG2
+	mov	a6, a2
+	call4	do_syscall_trace_leave
+	retw
+
+ENDPROC(system_call)
+
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+	.macro	spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+	call12	1f
+	_j	2f
+	retw
+	.align	4
+1:
+	_entry	a1, 48
+	addi	a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+	.rept	(XCHAL_NUM_AREGS - 32) / 12
+	_entry	a1, 48
+	mov	a12, a0
+	.endr
+#endif
+	_entry	a1, 16
+#if XCHAL_NUM_AREGS % 12 == 0
+	mov	a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+	mov	a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+	mov	a4, a4
+#endif
+	retw
+2:
+#else
+	mov	a12, a12
+#endif
+	.endm
+
+/*
+ * Task switch.
+ *
+ * struct task*  _switch_to (struct task* prev, struct task* next)
+ *         a2                              a2                 a3
+ */
+
+ENTRY(_switch_to)
+
+	entry	a1, 48
+
+	mov	a11, a3			# and 'next' (a3)
+
+	l32i	a4, a2, TASK_THREAD_INFO
+	l32i	a5, a3, TASK_THREAD_INFO
+
+	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+#if THREAD_RA > 1020 || THREAD_SP > 1020
+	addi	a10, a2, TASK_THREAD
+	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
+	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
+#else
+	s32i	a0, a2, THREAD_RA	# save return address
+	s32i	a1, a2, THREAD_SP	# save stack pointer
+#endif
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	movi	a6, __stack_chk_guard
+	l32i	a8, a3, TASK_STACK_CANARY
+	s32i	a8, a6, 0
+#endif
+
+	/* Disable ints while we manipulate the stack pointer. */
+
+	irq_save a14, a3
+	rsync
+
+	/* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+	l32i	a3, a5, THREAD_CPENABLE
+	xsr	a3, cpenable
+	s32i	a3, a4, THREAD_CPENABLE
+#endif
+
+	/* Flush register file. */
+
+	spill_registers_kernel
+
+	/* Set kernel stack (and leave critical section)
+	 * Note: It's save to set it here. The stack will not be overwritten
+	 *       because the kernel stack will only be loaded again after
+	 *       we return from kernel space.
+	 */
+
+	rsr	a3, excsave1		# exc_table
+	addi	a7, a5, PT_REGS_OFFSET
+	s32i	a7, a3, EXC_TABLE_KSTK
+
+	/* restore context of the task 'next' */
+
+	l32i	a0, a11, THREAD_RA	# restore return address
+	l32i	a1, a11, THREAD_SP	# restore stack pointer
+
+	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+	wsr	a14, ps
+	rsync
+
+	retw
+
+ENDPROC(_switch_to)
+
+ENTRY(ret_from_fork)
+
+	/* void schedule_tail (struct task_struct *prev)
+	 * Note: prev is still in a6 (return value from fake call4 frame)
+	 */
+	call4	schedule_tail
+
+	mov	a6, a1
+	call4	do_syscall_trace_leave
+
+	j	common_exception_return
+
+ENDPROC(ret_from_fork)
+
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ *           left from _switch_to: a6 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+	call4	schedule_tail
+	mov	a6, a3
+	callx4	a2
+	j	common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 0000000..9053a56
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,376 @@
+/*
+ * arch/xtensa/kernel/head.S
+ *
+ * Xtensa Processor startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+/*
+ * This module contains the entry code for kernel images. It performs the
+ * minimal setup needed to call the generic C routines.
+ *
+ * Prerequisites:
+ *
+ * - The kernel image has been loaded to the actual address where it was
+ *   compiled to.
+ * - a2 contains either 0 or a pointer to a list of boot parameters.
+ *   (see setup.c for more details)
+ *
+ */
+
+/*
+ *  _start
+ *
+ *  The bootloader passes a pointer to a list of boot parameters in a2.
+ */
+
+	/* The first bytes of the kernel image must be an instruction, so we
+	 * manually allocate and define the literal constant we need for a jx
+	 * instruction.
+	 */
+
+	__HEAD
+	.begin	no-absolute-literals
+
+ENTRY(_start)
+
+	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+	wsr     a2, excsave1
+	_j	_SetupOCD
+
+	.align	4
+	.literal_position
+.Lstartup:
+	.word	_startup
+
+	.align	4
+_SetupOCD:
+	/*
+	 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+	 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+	 * xt-gdb to single step via DEBUG exceptions received directly
+	 * by ocd.
+	 */
+	movi	a1, 1
+	movi	a0, 0
+	wsr	a1, windowstart
+	wsr	a0, windowbase
+	rsync
+
+	movi	a1, LOCKLEVEL
+	wsr	a1, ps
+	rsync
+
+	.global _SetupMMU
+_SetupMMU:
+	Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+	rsr	a2, excsave1
+	movi	a3, XCHAL_KSEG_PADDR
+	bltu	a2, a3, 1f
+	sub	a2, a2, a3
+	movi	a3, XCHAL_KSEG_SIZE
+	bgeu	a2, a3, 1f
+	movi	a3, XCHAL_KSEG_CACHED_VADDR
+	add	a2, a2, a3
+	wsr	a2, excsave1
+1:
+#endif
+#endif
+	.end	no-absolute-literals
+
+	l32r	a0, .Lstartup
+	jx	a0
+
+ENDPROC(_start)
+
+	__REF
+	.literal_position
+
+ENTRY(_startup)
+
+	/* Set a0 to 0 for the remaining initialization. */
+
+	movi	a0, 0
+
+#if XCHAL_HAVE_VECBASE
+	movi    a2, VECBASE_VADDR
+	wsr	a2, vecbase
+#endif
+
+	/* Clear debugging registers. */
+
+#if XCHAL_HAVE_DEBUG
+#if XCHAL_NUM_IBREAK > 0
+	wsr	a0, ibreakenable
+#endif
+	wsr	a0, icount
+	movi	a1, 15
+	wsr	a0, icountlevel
+
+	.set	_index, 0
+	.rept	XCHAL_NUM_DBREAK
+	wsr	a0, SREG_DBREAKC + _index
+	.set	_index, _index + 1
+	.endr
+#endif
+
+	/* Clear CCOUNT (not really necessary, but nice) */
+
+	wsr	a0, ccount	# not really necessary, but nice
+
+	/* Disable zero-loops. */
+
+#if XCHAL_HAVE_LOOPS
+	wsr	a0, lcount
+#endif
+
+	/* Disable all timers. */
+
+	.set	_index, 0
+	.rept	XCHAL_NUM_TIMERS
+	wsr	a0, SREG_CCOMPARE + _index
+	.set	_index, _index + 1
+	.endr
+
+	/* Interrupt initialization. */
+
+	movi	a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
+	wsr	a0, intenable
+	wsr	a2, intclear
+
+	/* Disable coprocessors. */
+
+#if XCHAL_HAVE_CP
+	wsr	a0, cpenable
+#endif
+
+	/*  Initialize the caches.
+	 *  a2, a3 are just working registers (clobbered).
+	 */
+
+#if XCHAL_DCACHE_LINE_LOCKABLE
+	___unlock_dcache_all a2 a3
+#endif
+
+#if XCHAL_ICACHE_LINE_LOCKABLE
+	___unlock_icache_all a2 a3
+#endif
+
+	___invalidate_dcache_all a2 a3
+	___invalidate_icache_all a2 a3
+
+	isync
+
+	initialize_cacheattr
+
+#ifdef CONFIG_HAVE_SMP
+	movi	a2, CCON	# MX External Register to Configure Cache
+	movi	a3, 1
+	wer	a3, a2
+#endif
+
+	/* Setup stack and enable window exceptions (keep irqs disabled) */
+
+	movi	a1, start_info
+	l32i	a1, a1, 0
+
+	movi	a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+					# WOE=1, INTLEVEL=LOCKLEVEL, UM=0
+	wsr	a2, ps			# (enable reg-windows; progmode stack)
+	rsync
+
+#ifdef CONFIG_SMP
+	/*
+	 * Notice that we assume with SMP that cores have PRID
+	 * supported by the cores.
+	 */
+	rsr	a2, prid
+	bnez	a2, .Lboot_secondary
+
+#endif  /* CONFIG_SMP */
+
+	/* Unpack data sections
+	 *
+	 * The linker script used to build the Linux kernel image
+	 * creates a table located at __boot_reloc_table_start
+	 * that contans the information what data needs to be unpacked.
+	 *
+	 * Uses a2-a7.
+	 */
+
+	movi	a2, __boot_reloc_table_start
+	movi	a3, __boot_reloc_table_end
+
+1:	beq	a2, a3, 3f	# no more entries?
+	l32i	a4, a2, 0	# start destination (in RAM)
+	l32i	a5, a2, 4	# end desination (in RAM)
+	l32i	a6, a2, 8	# start source (in ROM)
+	addi	a2, a2, 12	# next entry
+	beq	a4, a5, 1b	# skip, empty entry
+	beq	a4, a6, 1b	# skip, source and dest. are the same
+
+2:	l32i	a7, a6, 0	# load word
+	addi	a6, a6, 4
+	s32i	a7, a4, 0	# store word
+	addi	a4, a4, 4
+	bltu	a4, a5, 2b
+	j	1b
+
+3:
+	/* All code and initialized data segments have been copied.
+	 * Now clear the BSS segment.
+	 */
+
+	movi	a2, __bss_start	# start of BSS
+	movi	a3, __bss_stop	# end of BSS
+
+	__loopt	a2, a3, a4, 2
+	s32i	a0, a2, 0
+	__endla	a2, a3, 4
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+	/* After unpacking, flush the writeback cache to memory so the
+	 * instructions/data are available.
+	 */
+
+	___flush_dcache_all a2 a3
+#endif
+	memw
+	isync
+	___invalidate_icache_all a2 a3
+	isync
+
+	movi	a6, 0
+	xsr	a6, excsave1
+
+	/* init_arch kick-starts the linux kernel */
+
+	call4	init_arch
+	call4	start_kernel
+
+should_never_return:
+	j	should_never_return
+
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+	movi	a2, cpu_start_ccount
+1:
+	l32i	a3, a2, 0
+	beqi	a3, 0, 1b
+	movi	a3, 0
+	s32i	a3, a2, 0
+	memw
+1:
+	l32i	a3, a2, 0
+	beqi	a3, 0, 1b
+	wsr	a3, ccount
+	movi	a3, 0
+	s32i	a3, a2, 0
+	memw
+
+	movi	a6, 0
+	wsr	a6, excsave1
+
+	call4	secondary_start_kernel
+	j	should_never_return
+
+#endif  /* CONFIG_SMP */
+
+ENDPROC(_startup)
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+	___flush_invalidate_dcache_all a2 a3
+#else
+	___invalidate_dcache_all a2 a3
+#endif
+	memw
+	movi	a2, CCON	# MX External Register to Configure Cache
+	movi	a3, 0
+	wer	a3, a2
+	extw
+
+	rsr	a0, prid
+	neg	a2, a0
+	movi	a3, cpu_start_id
+	s32i	a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+	dhwbi	a3, 0
+#endif
+1:
+	l32i	a2, a3, 0
+	dhi	a3, 0
+	bne	a2, a0, 1b
+
+	/*
+	 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+	 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+	 * xt-gdb to single step via DEBUG exceptions received directly
+	 * by ocd.
+	 */
+	movi	a1, 1
+	movi	a0, 0
+	wsr	a1, windowstart
+	wsr	a0, windowbase
+	rsync
+
+	movi	a1, LOCKLEVEL
+	wsr	a1, ps
+	rsync
+
+	j	_startup
+
+ENDPROC(cpu_restart)
+
+#endif  /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+        .section ".data.init.refok"
+        .align  4
+ENTRY(start_info)
+        .long   init_thread_union + KERNEL_STACK_SIZE
+
+/*
+ * BSS section
+ */
+	
+__PAGE_ALIGNED_BSS
+#ifdef CONFIG_MMU
+ENTRY(swapper_pg_dir)
+	.fill	PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
+#endif
+ENTRY(empty_zero_page)
+	.fill	PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
new file mode 100644
index 0000000..c2e387c
--- /dev/null
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -0,0 +1,307 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/log2.h>
+#include <linux/percpu.h>
+#include <linux/perf_event.h>
+#include <variant/core.h>
+
+/* Breakpoint currently in use for each IBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
+
+/* Watchpoint currently in use for each DBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
+
+int hw_breakpoint_slots(int type)
+{
+	switch (type) {
+	case TYPE_INST:
+		return XCHAL_NUM_IBREAK;
+	case TYPE_DATA:
+		return XCHAL_NUM_DBREAK;
+	default:
+		pr_warn("unknown slot type: %d\n", type);
+		return 0;
+	}
+}
+
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+	unsigned int len;
+	unsigned long va;
+
+	va = hw->address;
+	len = hw->len;
+
+	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+			     const struct perf_event_attr *attr,
+			     struct arch_hw_breakpoint *hw)
+{
+	/* Type */
+	switch (attr->bp_type) {
+	case HW_BREAKPOINT_X:
+		hw->type = XTENSA_BREAKPOINT_EXECUTE;
+		break;
+	case HW_BREAKPOINT_R:
+		hw->type = XTENSA_BREAKPOINT_LOAD;
+		break;
+	case HW_BREAKPOINT_W:
+		hw->type = XTENSA_BREAKPOINT_STORE;
+		break;
+	case HW_BREAKPOINT_RW:
+		hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Len */
+	hw->len = attr->bp_len;
+	if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
+		return -EINVAL;
+
+	/* Address */
+	hw->address = attr->bp_addr;
+	if (hw->address & (hw->len - 1))
+		return -EINVAL;
+
+	return 0;
+}
+
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+				    unsigned long val, void *data)
+{
+	return NOTIFY_DONE;
+}
+
+static void xtensa_wsr(unsigned long v, u8 sr)
+{
+	/* We don't have indexed wsr and creating instruction dynamically
+	 * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
+	 * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
+	 * the switch below needs to be extended.
+	 */
+	BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
+	BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
+
+	switch (sr) {
+#if XCHAL_NUM_IBREAK > 0
+	case SREG_IBREAKA + 0:
+		WSR(v, SREG_IBREAKA + 0);
+		break;
+#endif
+#if XCHAL_NUM_IBREAK > 1
+	case SREG_IBREAKA + 1:
+		WSR(v, SREG_IBREAKA + 1);
+		break;
+#endif
+
+#if XCHAL_NUM_DBREAK > 0
+	case SREG_DBREAKA + 0:
+		WSR(v, SREG_DBREAKA + 0);
+		break;
+	case SREG_DBREAKC + 0:
+		WSR(v, SREG_DBREAKC + 0);
+		break;
+#endif
+#if XCHAL_NUM_DBREAK > 1
+	case SREG_DBREAKA + 1:
+		WSR(v, SREG_DBREAKA + 1);
+		break;
+
+	case SREG_DBREAKC + 1:
+		WSR(v, SREG_DBREAKC + 1);
+		break;
+#endif
+	}
+}
+
+static int alloc_slot(struct perf_event **slot, size_t n,
+		      struct perf_event *bp)
+{
+	size_t i;
+
+	for (i = 0; i < n; ++i) {
+		if (!slot[i]) {
+			slot[i] = bp;
+			return i;
+		}
+	}
+	return -EBUSY;
+}
+
+static void set_ibreak_regs(int reg, struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	unsigned long ibreakenable;
+
+	xtensa_wsr(info->address, SREG_IBREAKA + reg);
+	RSR(ibreakenable, SREG_IBREAKENABLE);
+	WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
+}
+
+static void set_dbreak_regs(int reg, struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
+
+	if (info->type & XTENSA_BREAKPOINT_LOAD)
+		dbreakc |= DBREAKC_LOAD_MASK;
+	if (info->type & XTENSA_BREAKPOINT_STORE)
+		dbreakc |= DBREAKC_STOR_MASK;
+
+	xtensa_wsr(info->address, SREG_DBREAKA + reg);
+	xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
+}
+
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+	int i;
+
+	if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
+		/* Breakpoint */
+		i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+		if (i < 0)
+			return i;
+		set_ibreak_regs(i, bp);
+
+	} else {
+		/* Watchpoint */
+		i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+		if (i < 0)
+			return i;
+		set_dbreak_regs(i, bp);
+	}
+	return 0;
+}
+
+static int free_slot(struct perf_event **slot, size_t n,
+		     struct perf_event *bp)
+{
+	size_t i;
+
+	for (i = 0; i < n; ++i) {
+		if (slot[i] == bp) {
+			slot[i] = NULL;
+			return i;
+		}
+	}
+	return -EBUSY;
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	int i;
+
+	if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
+		unsigned long ibreakenable;
+
+		/* Breakpoint */
+		i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+		if (i >= 0) {
+			RSR(ibreakenable, SREG_IBREAKENABLE);
+			WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
+		}
+	} else {
+		/* Watchpoint */
+		i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+		if (i >= 0)
+			xtensa_wsr(0, SREG_DBREAKC + i);
+	}
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+	int i;
+	struct thread_struct *t = &tsk->thread;
+
+	for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+		if (t->ptrace_bp[i]) {
+			unregister_hw_breakpoint(t->ptrace_bp[i]);
+			t->ptrace_bp[i] = NULL;
+		}
+	}
+	for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+		if (t->ptrace_wp[i]) {
+			unregister_hw_breakpoint(t->ptrace_wp[i]);
+			t->ptrace_wp[i] = NULL;
+		}
+	}
+}
+
+/*
+ * Set ptrace breakpoint pointers to zero for this task.
+ * This is required in order to prevent child processes from unregistering
+ * breakpoints held by their parent.
+ */
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+	memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
+	memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
+}
+
+void restore_dbreak(void)
+{
+	int i;
+
+	for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+		struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
+
+		if (bp)
+			set_dbreak_regs(i, bp);
+	}
+	clear_thread_flag(TIF_DB_DISABLED);
+}
+
+int check_hw_breakpoint(struct pt_regs *regs)
+{
+	if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
+		int i;
+		struct perf_event **bp = this_cpu_ptr(bp_on_reg);
+
+		for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+			if (bp[i] && !bp[i]->attr.disabled &&
+			    regs->pc == bp[i]->attr.bp_addr)
+				perf_bp_event(bp[i], regs);
+		}
+		return 0;
+	} else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
+		struct perf_event **bp = this_cpu_ptr(wp_on_reg);
+		int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
+			DEBUGCAUSE_DBNUM_SHIFT;
+
+		if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
+			if (user_mode(regs)) {
+				perf_bp_event(bp[dbnum], regs);
+			} else {
+				set_thread_flag(TIF_DB_DISABLED);
+				xtensa_wsr(0, SREG_DBREAKC + dbnum);
+			}
+		} else {
+			WARN_ONCE(1,
+				  "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
+				  dbnum);
+		}
+		return 0;
+	}
+	return -ENOENT;
+}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 0000000..a48bf2d
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/xtensa/kernel/irq.c
+ *
+ * Xtensa built-in interrupt controller and some generic functions copied
+ * from i386.
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+#include <linux/uaccess.h>
+#include <asm/platform.h>
+
+DECLARE_PER_CPU(unsigned long, nmi_count);
+
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
+{
+	int irq = irq_find_mapping(NULL, hwirq);
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	/* Debugging check for stack overflow: is there less than 1KB free? */
+	{
+		unsigned long sp;
+
+		__asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
+		sp &= THREAD_SIZE - 1;
+
+		if (unlikely(sp < (sizeof(thread_info) + 1024)))
+			printk("Stack overflow in do_IRQ: %ld\n",
+			       sp - sizeof(struct thread_info));
+	}
+#endif
+	generic_handle_irq(irq);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	unsigned cpu __maybe_unused;
+#ifdef CONFIG_SMP
+	show_ipi_list(p, prec);
+#endif
+#if XTENSA_FAKE_NMI
+	seq_printf(p, "%*s:", prec, "NMI");
+	for_each_online_cpu(cpu)
+		seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
+	seq_puts(p, "   Non-maskable interrupts\n");
+#endif
+	return 0;
+}
+
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+		unsigned long int_irq, unsigned long ext_irq,
+		unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 1 || intsize > 2))
+		return -EINVAL;
+	if (intsize == 2 && intspec[1] == 1) {
+		int_irq = xtensa_map_ext_irq(ext_irq);
+		if (int_irq < XCHAL_NUM_INTERRUPTS)
+			*out_hwirq = int_irq;
+		else
+			return -EINVAL;
+	} else {
+		*out_hwirq = int_irq;
+	}
+	*out_type = IRQ_TYPE_NONE;
+	return 0;
+}
+
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+		irq_hw_number_t hw)
+{
+	struct irq_chip *irq_chip = d->host_data;
+	u32 mask = 1 << hw;
+
+	if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_simple_irq, "level");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+	} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_edge_irq, "edge");
+		irq_clear_status_flags(irq, IRQ_LEVEL);
+	} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_level_irq, "level");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+	} else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_percpu_irq, "timer");
+		irq_clear_status_flags(irq, IRQ_LEVEL);
+#ifdef XCHAL_INTTYPE_MASK_PROFILING
+	} else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_percpu_irq, "profiling");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+#endif
+	} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+		/* XCHAL_INTTYPE_MASK_NMI */
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_level_irq, "level");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+	}
+	return 0;
+}
+
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
+{
+	unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+		XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+	unsigned i;
+
+	for (i = 0; mask; ++i, mask >>= 1) {
+		if ((mask & 1) && ext_irq-- == 0)
+			return i;
+	}
+	return XCHAL_NUM_INTERRUPTS;
+}
+
+unsigned xtensa_get_ext_irq_no(unsigned irq)
+{
+	unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+		XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+		((1u << irq) - 1);
+	return hweight32(mask);
+}
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_OF
+	irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+	xtensa_mx_init_legacy(NULL);
+#else
+	xtensa_pic_init_legacy(NULL);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+	ipi_init();
+#endif
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+	unsigned int i, cpu = smp_processor_id();
+
+	for_each_active_irq(i) {
+		struct irq_data *data = irq_get_irq_data(i);
+		struct cpumask *mask;
+		unsigned int newcpu;
+
+		if (irqd_is_per_cpu(data))
+			continue;
+
+		mask = irq_data_get_affinity_mask(data);
+		if (!cpumask_test_cpu(cpu, mask))
+			continue;
+
+		newcpu = cpumask_any_and(mask, cpu_online_mask);
+
+		if (newcpu >= nr_cpu_ids) {
+			pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+					    i, cpu);
+
+			cpumask_setall(mask);
+		}
+		irq_set_affinity(i, mask);
+	}
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S
new file mode 100644
index 0000000..0eeda2e
--- /dev/null
+++ b/arch/xtensa/kernel/mcount.S
@@ -0,0 +1,50 @@
+/*
+ * arch/xtensa/kernel/mcount.S
+ *
+ * Xtensa specific mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+/*
+ * Entry condition:
+ *
+ *   a2:	a0 of the caller
+ */
+
+ENTRY(_mcount)
+
+	entry	a1, 16
+
+	movi	a4, ftrace_trace_function
+	l32i	a4, a4, 0
+	movi	a3, ftrace_stub
+	bne	a3, a4, 1f
+	retw
+
+1: 	xor	a7, a2, a1
+	movi	a3, 0x3fffffff
+	and	a7, a7, a3
+	xor	a7, a7, a1
+
+	xor	a6, a0, a1
+	and	a6, a6, a3
+	xor	a6, a6, a1
+	addi	a6, a6, -MCOUNT_INSN_SIZE
+	callx4	a4
+
+	retw
+
+ENDPROC(_mcount)
+
+ENTRY(ftrace_stub)
+	entry	a1, 16
+	retw
+ENDPROC(ftrace_stub)
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 0000000..902845d
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,189 @@
+/*
+ * arch/xtensa/kernel/module.c
+ *
+ * Module support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+static int
+decode_calln_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+	return (location[0] & 0xf0) == 0x50;
+#endif
+#ifdef __XTENSA_EL__
+	return (location[0] & 0xf) == 0x5;
+#endif
+}
+
+static int
+decode_l32r_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+	return (location[0] & 0xf0) == 0x10;
+#endif
+#ifdef __XTENSA_EL__
+	return (location[0] & 0xf) == 0x1;
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *mod)
+{
+	unsigned int i;
+	Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	unsigned char *location;
+	uint32_t value;
+
+	pr_debug("Applying relocate section %u to %u\n", relsec,
+		 sechdrs[relsec].sh_info);
+
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+		location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rela[i].r_offset;
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rela[i].r_info);
+		value = sym->st_value + rela[i].r_addend;
+
+		switch (ELF32_R_TYPE(rela[i].r_info)) {
+		case R_XTENSA_NONE:
+		case R_XTENSA_DIFF8:
+		case R_XTENSA_DIFF16:
+		case R_XTENSA_DIFF32:
+		case R_XTENSA_ASM_EXPAND:
+			break;
+
+		case R_XTENSA_32:
+		case R_XTENSA_PLT:
+			*(uint32_t *)location += value;
+			break;
+
+		case R_XTENSA_SLOT0_OP:
+			if (decode_calln_opcode(location)) {
+				value -= ((unsigned long)location & -4) + 4;
+				if ((value & 3) != 0 ||
+				    ((value + (1 << 19)) >> 20) != 0) {
+					pr_err("%s: relocation out of range, "
+					       "section %d reloc %d "
+					       "sym '%s'\n",
+					       mod->name, relsec, i,
+					       strtab + sym->st_name);
+					return -ENOEXEC;
+				}
+				value = (signed int)value >> 2;
+#ifdef __XTENSA_EB__
+				location[0] = ((location[0] & ~0x3) |
+					    ((value >> 16) & 0x3));
+				location[1] = (value >> 8) & 0xff;
+				location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+				location[0] = ((location[0] & ~0xc0) |
+					    ((value << 6) & 0xc0));
+				location[1] = (value >> 2) & 0xff;
+				location[2] = (value >> 10) & 0xff;
+#endif
+			} else if (decode_l32r_opcode(location)) {
+				value -= (((unsigned long)location + 3) & -4);
+				if ((value & 3) != 0 ||
+				    (signed int)value >> 18 != -1) {
+					pr_err("%s: relocation out of range, "
+					       "section %d reloc %d "
+					       "sym '%s'\n",
+					       mod->name, relsec, i,
+					       strtab + sym->st_name);
+					return -ENOEXEC;
+				}
+				value = (signed int)value >> 2;
+
+#ifdef __XTENSA_EB__
+				location[1] = (value >> 8) & 0xff;
+				location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+				location[1] = value & 0xff;
+				location[2] = (value >> 8) & 0xff;
+#endif
+			}
+			/* FIXME: Ignore any other opcodes.  The Xtensa
+			   assembler currently assumes that the linker will
+			   always do relaxation and so all PC-relative
+			   operands need relocations.  (The assembler also
+			   writes out the tentative PC-relative values,
+			   assuming no link-time relaxation, so it is usually
+			   safe to ignore the relocations.)  If the
+			   assembler's "--no-link-relax" flag can be made to
+			   work, and if all kernel modules can be assembled
+			   with that flag, then unexpected relocations could
+			   be detected here.  */
+			break;
+
+		case R_XTENSA_SLOT1_OP:
+		case R_XTENSA_SLOT2_OP:
+		case R_XTENSA_SLOT3_OP:
+		case R_XTENSA_SLOT4_OP:
+		case R_XTENSA_SLOT5_OP:
+		case R_XTENSA_SLOT6_OP:
+		case R_XTENSA_SLOT7_OP:
+		case R_XTENSA_SLOT8_OP:
+		case R_XTENSA_SLOT9_OP:
+		case R_XTENSA_SLOT10_OP:
+		case R_XTENSA_SLOT11_OP:
+		case R_XTENSA_SLOT12_OP:
+		case R_XTENSA_SLOT13_OP:
+		case R_XTENSA_SLOT14_OP:
+			pr_err("%s: unexpected FLIX relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+
+		case R_XTENSA_SLOT0_ALT:
+		case R_XTENSA_SLOT1_ALT:
+		case R_XTENSA_SLOT2_ALT:
+		case R_XTENSA_SLOT3_ALT:
+		case R_XTENSA_SLOT4_ALT:
+		case R_XTENSA_SLOT5_ALT:
+		case R_XTENSA_SLOT6_ALT:
+		case R_XTENSA_SLOT7_ALT:
+		case R_XTENSA_SLOT8_ALT:
+		case R_XTENSA_SLOT9_ALT:
+		case R_XTENSA_SLOT10_ALT:
+		case R_XTENSA_SLOT11_ALT:
+		case R_XTENSA_SLOT12_ALT:
+		case R_XTENSA_SLOT13_ALT:
+		case R_XTENSA_SLOT14_ALT:
+			pr_err("%s: unexpected ALT relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+
+		default:
+			pr_err("%s: unexpected relocation: %u\n",
+			       mod->name,
+			       ELF32_R_TYPE(rela[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 0000000..9f38437
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,62 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+	.section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+	_j _SetupOCD
+
+	.begin  no-absolute-literals
+	.literal_position
+
+_SetupOCD:
+	/*
+	 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+	 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+	 * xt-gdb to single step via DEBUG exceptions received directly
+	 * by ocd.
+	 */
+	movi	a1, 1
+	movi	a0, 0
+	wsr	a1, windowstart
+	wsr	a0, windowbase
+	rsync
+
+	movi	a1, LOCKLEVEL
+	wsr	a1, ps
+	rsync
+
+_SetupMMU:
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+
+	/*
+	 * Start Secondary Processors with NULL pointer to boot params.
+	 */
+	movi	a2, 0				#  a2 == NULL
+	movi	a3, _startup
+	jx	a3
+
+	.end    no-absolute-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 0000000..1fc138b
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,212 @@
+/*
+ * DMA coherent memory allocation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Based on version for i386.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/dma-contiguous.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-direct.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/platform.h>
+
+static void do_cache_op(phys_addr_t paddr, size_t size,
+			void (*fn)(unsigned long, unsigned long))
+{
+	unsigned long off = paddr & (PAGE_SIZE - 1);
+	unsigned long pfn = PFN_DOWN(paddr);
+	struct page *page = pfn_to_page(pfn);
+
+	if (!PageHighMem(page))
+		fn((unsigned long)phys_to_virt(paddr), size);
+	else
+		while (size > 0) {
+			size_t sz = min_t(size_t, size, PAGE_SIZE - off);
+			void *vaddr = kmap_atomic(page);
+
+			fn((unsigned long)vaddr + off, sz);
+			kunmap_atomic(vaddr);
+			off = 0;
+			++page;
+			size -= sz;
+		}
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_FROM_DEVICE:
+		do_cache_op(paddr, size, __invalidate_dcache_range);
+		break;
+
+	case DMA_NONE:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_TO_DEVICE:
+		if (XCHAL_DCACHE_IS_WRITEBACK)
+			do_cache_op(paddr, size, __flush_dcache_range);
+		break;
+
+	case DMA_NONE:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+}
+
+#ifdef CONFIG_MMU
+bool platform_vaddr_cached(const void *p)
+{
+	unsigned long addr = (unsigned long)p;
+
+	return addr >= XCHAL_KSEG_CACHED_VADDR &&
+	       addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
+}
+
+bool platform_vaddr_uncached(const void *p)
+{
+	unsigned long addr = (unsigned long)p;
+
+	return addr >= XCHAL_KSEG_BYPASS_VADDR &&
+	       addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
+}
+
+void *platform_vaddr_to_uncached(void *p)
+{
+	return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
+}
+
+void *platform_vaddr_to_cached(void *p)
+{
+	return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+}
+#else
+bool __attribute__((weak)) platform_vaddr_cached(const void *p)
+{
+	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+	return true;
+}
+
+bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
+{
+	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+	return false;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+{
+	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+	return p;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
+{
+	WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+	return p;
+}
+#endif
+
+/*
+ * Note: We assume that the full memory space is always mapped to 'kseg'
+ *	 Otherwise we have to use page attributes (not implemented).
+ */
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		gfp_t flag, unsigned long attrs)
+{
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page *page = NULL;
+
+	/* ignore region speicifiers */
+
+	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+		flag |= GFP_DMA;
+
+	if (gfpflags_allow_blocking(flag))
+		page = dma_alloc_from_contiguous(dev, count, get_order(size),
+						 flag & __GFP_NOWARN);
+
+	if (!page)
+		page = alloc_pages(flag, get_order(size));
+
+	if (!page)
+		return NULL;
+
+	*handle = phys_to_dma(dev, page_to_phys(page));
+
+	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+		return page;
+	}
+
+#ifdef CONFIG_MMU
+	if (PageHighMem(page)) {
+		void *p;
+
+		p = dma_common_contiguous_remap(page, size, VM_MAP,
+						pgprot_noncached(PAGE_KERNEL),
+						__builtin_return_address(0));
+		if (!p) {
+			if (!dma_release_from_contiguous(dev, page, count))
+				__free_pages(page, get_order(size));
+		}
+		return p;
+	}
+#endif
+	BUG_ON(!platform_vaddr_cached(page_address(page)));
+	__invalidate_dcache_range((unsigned long)page_address(page), size);
+	return platform_vaddr_to_uncached(page_address(page));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+		dma_addr_t dma_handle, unsigned long attrs)
+{
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page *page;
+
+	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+		page = vaddr;
+	} else if (platform_vaddr_uncached(vaddr)) {
+		page = virt_to_page(platform_vaddr_to_cached(vaddr));
+	} else {
+#ifdef CONFIG_MMU
+		dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+		page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+	}
+
+	if (!dma_release_from_contiguous(dev, page, count))
+		__free_pages(page, get_order(size));
+}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 0000000..21f13e9
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,216 @@
+/*
+ * arch/xtensa/kernel/pci.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ *
+ * Based largely on work from Cort (ppc/kernel/pci.c)
+ * IO functions copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/platform.h>
+
+/* PCI Controller */
+
+
+/*
+ * pcibios_alloc_controller
+ * pcibios_enable_device
+ * pcibios_fixups
+ * pcibios_align_resource
+ * pcibios_fixup_bus
+ * pci_bus_add_device
+ */
+
+static struct pci_controller *pci_ctrl_head;
+static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
+
+static int pci_bus_count;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+		       resource_size_t size, resource_size_t align)
+{
+	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
+
+	if (res->flags & IORESOURCE_IO) {
+		if (size > 0x100) {
+			pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
+					pci_name(dev), dev->resource - res,
+					size);
+		}
+
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
+	}
+
+	return start;
+}
+
+static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
+					    struct list_head *resources)
+{
+	struct resource *res;
+	unsigned long io_offset;
+	int i;
+
+	io_offset = (unsigned long)pci_ctrl->io_space.base;
+	res = &pci_ctrl->io_resource;
+	if (!res->flags) {
+		if (io_offset)
+			pr_err("I/O resource not set for host bridge %d\n",
+			       pci_ctrl->index);
+		res->start = 0;
+		res->end = IO_SPACE_LIMIT;
+		res->flags = IORESOURCE_IO;
+	}
+	res->start += io_offset;
+	res->end += io_offset;
+	pci_add_resource_offset(resources, res, io_offset);
+
+	for (i = 0; i < 3; i++) {
+		res = &pci_ctrl->mem_resources[i];
+		if (!res->flags) {
+			if (i > 0)
+				continue;
+			pr_err("Memory resource not set for host bridge %d\n",
+			       pci_ctrl->index);
+			res->start = 0;
+			res->end = ~0U;
+			res->flags = IORESOURCE_MEM;
+		}
+		pci_add_resource(resources, res);
+	}
+}
+
+static int __init pcibios_init(void)
+{
+	struct pci_controller *pci_ctrl;
+	struct list_head resources;
+	struct pci_bus *bus;
+	int next_busno = 0, ret;
+
+	pr_info("PCI: Probing PCI hardware\n");
+
+	/* Scan all of the recorded PCI controllers.  */
+	for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+		pci_ctrl->last_busno = 0xff;
+		INIT_LIST_HEAD(&resources);
+		pci_controller_apertures(pci_ctrl, &resources);
+		bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
+					pci_ctrl->ops, pci_ctrl, &resources);
+		if (!bus)
+			continue;
+
+		pci_ctrl->bus = bus;
+		pci_ctrl->last_busno = bus->busn_res.end;
+		if (next_busno <= pci_ctrl->last_busno)
+			next_busno = pci_ctrl->last_busno+1;
+	}
+	pci_bus_count = next_busno;
+	ret = platform_pcibios_fixup();
+	if (ret)
+		return ret;
+
+	for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+		if (pci_ctrl->bus)
+			pci_bus_add_devices(pci_ctrl->bus);
+	}
+
+	return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	if (bus->parent) {
+		/* This is a subordinate bridge */
+		pci_read_bridge_bases(bus);
+	}
+}
+
+void pcibios_set_master(struct pci_dev *dev)
+{
+	/* No special bus mastering setup handling */
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	u16 cmd, old_cmd;
+	int idx;
+	struct resource *r;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+	for (idx=0; idx<6; idx++) {
+		r = &dev->resource[idx];
+		if (!r->start && r->end) {
+			pci_err(dev, "can't enable device: resource collisions\n");
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			cmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			cmd |= PCI_COMMAND_MEMORY;
+	}
+	if (cmd != old_cmd) {
+		pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+
+	return 0;
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s.
+ *  -- paulus.
+ */
+
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
+{
+	struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata;
+	resource_size_t ioaddr = pci_resource_start(pdev, bar);
+
+	if (pci_ctrl == 0)
+		return -EINVAL;		/* should never happen */
+
+	/* Convert to an offset within this PCI controller */
+	ioaddr -= (unsigned long)pci_ctrl->io_space.base;
+
+	vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
+	return 0;
+}
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
new file mode 100644
index 0000000..ff1d813
--- /dev/null
+++ b/arch/xtensa/kernel/perf_event.c
@@ -0,0 +1,446 @@
+/*
+ * Xtensa Performance Monitor Module driver
+ * See Tensilica Debug User's Guide for PMU registers documentation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
+
+/* Global control/status for all perf counters */
+#define XTENSA_PMU_PMG			0x1000
+/* Perf counter values */
+#define XTENSA_PMU_PM(i)		(0x1080 + (i) * 4)
+/* Perf counter control registers */
+#define XTENSA_PMU_PMCTRL(i)		(0x1100 + (i) * 4)
+/* Perf counter status registers */
+#define XTENSA_PMU_PMSTAT(i)		(0x1180 + (i) * 4)
+
+#define XTENSA_PMU_PMG_PMEN		0x1
+
+#define XTENSA_PMU_COUNTER_MASK		0xffffffffULL
+#define XTENSA_PMU_COUNTER_MAX		0x7fffffff
+
+#define XTENSA_PMU_PMCTRL_INTEN		0x00000001
+#define XTENSA_PMU_PMCTRL_KRNLCNT	0x00000008
+#define XTENSA_PMU_PMCTRL_TRACELEVEL	0x000000f0
+#define XTENSA_PMU_PMCTRL_SELECT_SHIFT	8
+#define XTENSA_PMU_PMCTRL_SELECT	0x00001f00
+#define XTENSA_PMU_PMCTRL_MASK_SHIFT	16
+#define XTENSA_PMU_PMCTRL_MASK		0xffff0000
+
+#define XTENSA_PMU_MASK(select, mask) \
+	(((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
+	 ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
+	 XTENSA_PMU_PMCTRL_TRACELEVEL | \
+	 XTENSA_PMU_PMCTRL_INTEN)
+
+#define XTENSA_PMU_PMSTAT_OVFL		0x00000001
+#define XTENSA_PMU_PMSTAT_INTASRT	0x00000010
+
+struct xtensa_pmu_events {
+	/* Array of events currently on this core */
+	struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
+	/* Bitmap of used hardware counters */
+	unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
+};
+static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
+
+static const u32 xtensa_hw_ctl[] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= XTENSA_PMU_MASK(0, 0x1),
+	[PERF_COUNT_HW_INSTRUCTIONS]		= XTENSA_PMU_MASK(2, 0xffff),
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= XTENSA_PMU_MASK(10, 0x1),
+	[PERF_COUNT_HW_CACHE_MISSES]		= XTENSA_PMU_MASK(12, 0x1),
+	/* Taken and non-taken branches + taken loop ends */
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XTENSA_PMU_MASK(2, 0x490),
+	/* Instruction-related + other global stall cycles */
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XTENSA_PMU_MASK(4, 0x1ff),
+	/* Data-related global stall cycles */
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= XTENSA_PMU_MASK(3, 0x1ff),
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+
+static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(10, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(10, 0x2),
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(11, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(11, 0x2),
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(8, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(8, 0x2),
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(9, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(9, 0x8),
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(7, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(7, 0x8),
+		},
+	},
+};
+
+static int xtensa_pmu_cache_event(u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result;
+	int ret;
+
+	cache_type = (config >>  0) & 0xff;
+	cache_op = (config >>  8) & 0xff;
+	cache_result = (config >> 16) & 0xff;
+
+	if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
+	    cache_op >= C(OP_MAX) ||
+	    cache_result >= C(RESULT_MAX))
+		return -EINVAL;
+
+	ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
+
+	if (ret == 0)
+		return -EINVAL;
+
+	return ret;
+}
+
+static inline uint32_t xtensa_pmu_read_counter(int idx)
+{
+	return get_er(XTENSA_PMU_PM(idx));
+}
+
+static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
+{
+	set_er(v, XTENSA_PMU_PM(idx));
+}
+
+static void xtensa_perf_event_update(struct perf_event *event,
+				     struct hw_perf_event *hwc, int idx)
+{
+	uint64_t prev_raw_count, new_raw_count;
+	int64_t delta;
+
+	do {
+		prev_raw_count = local64_read(&hwc->prev_count);
+		new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
+	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+				 new_raw_count) != prev_raw_count);
+
+	delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+}
+
+static bool xtensa_perf_event_set_period(struct perf_event *event,
+					 struct hw_perf_event *hwc, int idx)
+{
+	bool rc = false;
+	s64 left;
+
+	if (!is_sampling_event(event)) {
+		left = XTENSA_PMU_COUNTER_MAX;
+	} else {
+		s64 period = hwc->sample_period;
+
+		left = local64_read(&hwc->period_left);
+		if (left <= -period) {
+			left = period;
+			local64_set(&hwc->period_left, left);
+			hwc->last_period = period;
+			rc = true;
+		} else if (left <= 0) {
+			left += period;
+			local64_set(&hwc->period_left, left);
+			hwc->last_period = period;
+			rc = true;
+		}
+		if (left > XTENSA_PMU_COUNTER_MAX)
+			left = XTENSA_PMU_COUNTER_MAX;
+	}
+
+	local64_set(&hwc->prev_count, -left);
+	xtensa_pmu_write_counter(idx, -left);
+	perf_event_update_userpage(event);
+
+	return rc;
+}
+
+static void xtensa_pmu_enable(struct pmu *pmu)
+{
+	set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static void xtensa_pmu_disable(struct pmu *pmu)
+{
+	set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static int xtensa_pmu_event_init(struct perf_event *event)
+{
+	int ret;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_HARDWARE:
+		if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
+		    xtensa_hw_ctl[event->attr.config] == 0)
+			return -EINVAL;
+		event->hw.config = xtensa_hw_ctl[event->attr.config];
+		return 0;
+
+	case PERF_TYPE_HW_CACHE:
+		ret = xtensa_pmu_cache_event(event->attr.config);
+		if (ret < 0)
+			return ret;
+		event->hw.config = ret;
+		return 0;
+
+	case PERF_TYPE_RAW:
+		/* Not 'previous counter' select */
+		if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
+		    (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
+			return -EINVAL;
+		event->hw.config = (event->attr.config &
+				    (XTENSA_PMU_PMCTRL_KRNLCNT |
+				     XTENSA_PMU_PMCTRL_TRACELEVEL |
+				     XTENSA_PMU_PMCTRL_SELECT |
+				     XTENSA_PMU_PMCTRL_MASK)) |
+			XTENSA_PMU_PMCTRL_INTEN;
+		return 0;
+
+	default:
+		return -ENOENT;
+	}
+}
+
+/*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+static void xtensa_pmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (WARN_ON_ONCE(idx == -1))
+		return;
+
+	if (flags & PERF_EF_RELOAD) {
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+		xtensa_perf_event_set_period(event, hwc, idx);
+	}
+
+	hwc->state = 0;
+
+	set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
+}
+
+static void xtensa_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		set_er(0, XTENSA_PMU_PMCTRL(idx));
+		set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
+		       XTENSA_PMU_PMSTAT(idx));
+		hwc->state |= PERF_HES_STOPPED;
+	}
+
+	if ((flags & PERF_EF_UPDATE) &&
+	    !(event->hw.state & PERF_HES_UPTODATE)) {
+		xtensa_perf_event_update(event, &event->hw, idx);
+		event->hw.state |= PERF_HES_UPTODATE;
+	}
+}
+
+/*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+static int xtensa_pmu_add(struct perf_event *event, int flags)
+{
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (__test_and_set_bit(idx, ev->used_mask)) {
+		idx = find_first_zero_bit(ev->used_mask,
+					  XCHAL_NUM_PERF_COUNTERS);
+		if (idx == XCHAL_NUM_PERF_COUNTERS)
+			return -EAGAIN;
+
+		__set_bit(idx, ev->used_mask);
+		hwc->idx = idx;
+	}
+	ev->event[idx] = event;
+
+	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+	if (flags & PERF_EF_START)
+		xtensa_pmu_start(event, PERF_EF_RELOAD);
+
+	perf_event_update_userpage(event);
+	return 0;
+}
+
+static void xtensa_pmu_del(struct perf_event *event, int flags)
+{
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+
+	xtensa_pmu_stop(event, PERF_EF_UPDATE);
+	__clear_bit(event->hw.idx, ev->used_mask);
+	perf_event_update_userpage(event);
+}
+
+static void xtensa_pmu_read(struct perf_event *event)
+{
+	xtensa_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+	struct perf_callchain_entry_ctx *entry = data;
+
+	perf_callchain_store(entry, frame->pc);
+	return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+			   struct pt_regs *regs)
+{
+	xtensa_backtrace_kernel(regs, entry->max_stack,
+				callchain_trace, NULL, entry);
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+			 struct pt_regs *regs)
+{
+	xtensa_backtrace_user(regs, entry->max_stack,
+			      callchain_trace, entry);
+}
+
+void perf_event_print_debug(void)
+{
+	unsigned long flags;
+	unsigned i;
+
+	local_irq_save(flags);
+	pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
+		get_er(XTENSA_PMU_PMG));
+	for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
+		pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
+			i, get_er(XTENSA_PMU_PM(i)),
+			i, get_er(XTENSA_PMU_PMCTRL(i)),
+			i, get_er(XTENSA_PMU_PMSTAT(i)));
+	local_irq_restore(flags);
+}
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t rc = IRQ_NONE;
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+	unsigned i;
+
+	for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
+	     i < XCHAL_NUM_PERF_COUNTERS;
+	     i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+		uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
+		struct perf_event *event = ev->event[i];
+		struct hw_perf_event *hwc = &event->hw;
+		u64 last_period;
+
+		if (!(v & XTENSA_PMU_PMSTAT_OVFL))
+			continue;
+
+		set_er(v, XTENSA_PMU_PMSTAT(i));
+		xtensa_perf_event_update(event, hwc, i);
+		last_period = hwc->last_period;
+		if (xtensa_perf_event_set_period(event, hwc, i)) {
+			struct perf_sample_data data;
+			struct pt_regs *regs = get_irq_regs();
+
+			perf_sample_data_init(&data, 0, last_period);
+			if (perf_event_overflow(event, &data, regs))
+				xtensa_pmu_stop(event, 0);
+		}
+
+		rc = IRQ_HANDLED;
+	}
+	return rc;
+}
+
+static struct pmu xtensa_pmu = {
+	.pmu_enable = xtensa_pmu_enable,
+	.pmu_disable = xtensa_pmu_disable,
+	.event_init = xtensa_pmu_event_init,
+	.add = xtensa_pmu_add,
+	.del = xtensa_pmu_del,
+	.start = xtensa_pmu_start,
+	.stop = xtensa_pmu_stop,
+	.read = xtensa_pmu_read,
+};
+
+static int xtensa_pmu_setup(int cpu)
+{
+	unsigned i;
+
+	set_er(0, XTENSA_PMU_PMG);
+	for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
+		set_er(0, XTENSA_PMU_PMCTRL(i));
+		set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
+	}
+	return 0;
+}
+
+static int __init xtensa_pmu_init(void)
+{
+	int ret;
+	int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
+
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
+				"perf/xtensa:starting", xtensa_pmu_setup,
+				NULL);
+	if (ret) {
+		pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
+		return ret;
+	}
+#if XTENSA_FAKE_NMI
+	enable_irq(irq);
+#else
+	ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
+			  "pmu", NULL);
+	if (ret < 0)
+		return ret;
+#endif
+
+	ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
+	if (ret)
+		free_irq(irq, NULL);
+
+	return ret;
+}
+early_initcall(xtensa_pmu_init);
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 0000000..1cf0082
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,46 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <asm/platform.h>
+#include <asm/timex.h>
+#include <asm/param.h>		/* HZ */
+
+#define _F(r,f,a,b)							\
+	r __platform_##f a b;                                   	\
+	r platform_##f a __attribute__((weak, alias("__platform_"#f)))
+
+/*
+ * Default functions that are used if no platform specific function is defined.
+ * (Please, refer to include/asm-xtensa/platform.h for more information)
+ */
+
+_F(void, setup, (char** cmd), { });
+_F(void, restart, (void), { while(1); });
+_F(void, halt, (void), { while(1); });
+_F(void, power_off, (void), { while(1); });
+_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
+_F(void, heartbeat, (void), { });
+_F(int,  pcibios_fixup, (void), { return 0; });
+_F(void, pcibios_init, (void), { });
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+_F(void, calibrate_ccount, (void),
+{
+	pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
+	ccount_freq = 10 * 1000000UL;
+});
+#endif
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 0000000..4bb6813
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,373 @@
+/*
+ * arch/xtensa/kernel/process.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <asm/pgtable.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/mmu.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/asm-offsets.h>
+#include <asm/regs.h>
+#include <asm/hw_breakpoint.h>
+
+extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
+
+struct task_struct *current_set[NR_CPUS] = {&init_task, };
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+#if XTENSA_HAVE_COPROCESSORS
+
+void coprocessor_release_all(struct thread_info *ti)
+{
+	unsigned long cpenable;
+	int i;
+
+	/* Make sure we don't switch tasks during this operation. */
+
+	preempt_disable();
+
+	/* Walk through all cp owners and release it for the requested one. */
+
+	cpenable = ti->cpenable;
+
+	for (i = 0; i < XCHAL_CP_MAX; i++) {
+		if (coprocessor_owner[i] == ti) {
+			coprocessor_owner[i] = 0;
+			cpenable &= ~(1 << i);
+		}
+	}
+
+	ti->cpenable = cpenable;
+	coprocessor_clear_cpenable();
+
+	preempt_enable();
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+	unsigned long cpenable, old_cpenable;
+	int i;
+
+	preempt_disable();
+
+	RSR_CPENABLE(old_cpenable);
+	cpenable = ti->cpenable;
+	WSR_CPENABLE(cpenable);
+
+	for (i = 0; i < XCHAL_CP_MAX; i++) {
+		if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+			coprocessor_flush(ti, i);
+		cpenable >>= 1;
+	}
+	WSR_CPENABLE(old_cpenable);
+
+	preempt_enable();
+}
+
+#endif
+
+
+/*
+ * Powermanagement idle function, if any is provided by the platform.
+ */
+void arch_cpu_idle(void)
+{
+	platform_idle();
+}
+
+/*
+ * This is called when the thread calls exit().
+ */
+void exit_thread(struct task_struct *tsk)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_release_all(task_thread_info(tsk));
+#endif
+}
+
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
+void flush_thread(void)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	struct thread_info *ti = current_thread_info();
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+#endif
+	flush_ptrace_hw_breakpoint(current);
+}
+
+/*
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
+ */
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_flush_all(task_thread_info(src));
+#endif
+	*dst = *src;
+	return 0;
+}
+
+/*
+ * Copy thread.
+ *
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ *    regs != NULL, usp_thread_fn is userspace stack pointer.
+ *    It is expected to copy parent regs (in case CLONE_VM is not set
+ *    in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ *    regs == NULL, usp_thread_fn is the function to run in the new thread
+ *    and thread_fn_arg is its parameter.
+ *    childregs are not used for the kernel threads.
+ *
+ * The stack layout for the new thread looks like this:
+ *
+ *	+------------------------+
+ *	|       childregs        |
+ *	+------------------------+ <- thread.sp = sp in dummy-frame
+ *	|      dummy-frame       |    (saved in dummy-frame spill-area)
+ *	+------------------------+
+ *
+ * We create a dummy frame to return to either ret_from_fork or
+ *   ret_from_kernel_thread:
+ *   a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
+ *   sp points to itself (thread.sp)
+ *   a2, a3 are unused for userspace threads,
+ *   a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
+ *
+ * Note: This is a pristine frame, so we don't need any spill region on top of
+ *       childregs.
+ *
+ * The fun part:  if we're keeping the same VM (i.e. cloning a thread,
+ * not an entire process), we're normally given a new usp, and we CANNOT share
+ * any live address register windows.  If we just copy those live frames over,
+ * the two threads (parent and child) will overflow the same frames onto the
+ * parent stack at different times, likely corrupting the parent stack (esp.
+ * if the parent returns from functions that called clone() and calls new
+ * ones, before the child overflows its now old copies of its parent windows).
+ * One solution is to spill windows to the parent stack, but that's fairly
+ * involved.  Much simpler to just not copy those live frames across.
+ */
+
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+		unsigned long thread_fn_arg, struct task_struct *p)
+{
+	struct pt_regs *childregs = task_pt_regs(p);
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+	struct thread_info *ti;
+#endif
+
+	/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
+	SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
+	SPILL_SLOT(childregs, 0) = 0;
+
+	p->thread.sp = (unsigned long)childregs;
+
+	if (!(p->flags & PF_KTHREAD)) {
+		struct pt_regs *regs = current_pt_regs();
+		unsigned long usp = usp_thread_fn ?
+			usp_thread_fn : regs->areg[1];
+
+		p->thread.ra = MAKE_RA_FOR_CALL(
+				(unsigned long)ret_from_fork, 0x1);
+
+		/* This does not copy all the regs.
+		 * In a bout of brilliance or madness,
+		 * ARs beyond a0-a15 exist past the end of the struct.
+		 */
+		*childregs = *regs;
+		childregs->areg[1] = usp;
+		childregs->areg[2] = 0;
+
+		/* When sharing memory with the parent thread, the child
+		   usually starts on a pristine stack, so we have to reset
+		   windowbase, windowstart and wmask.
+		   (Note that such a new thread is required to always create
+		   an initial call4 frame)
+		   The exception is vfork, where the new thread continues to
+		   run on the parent's stack until it calls execve. This could
+		   be a call8 or call12, which requires a legal stack frame
+		   of the previous caller for the overflow handlers to work.
+		   (Note that it's always legal to overflow live registers).
+		   In this case, ensure to spill at least the stack pointer
+		   of that frame. */
+
+		if (clone_flags & CLONE_VM) {
+			/* check that caller window is live and same stack */
+			int len = childregs->wmask & ~0xf;
+			if (regs->areg[1] == usp && len != 0) {
+				int callinc = (regs->areg[0] >> 30) & 3;
+				int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+				put_user(regs->areg[caller_ars+1],
+					 (unsigned __user*)(usp - 12));
+			}
+			childregs->wmask = 1;
+			childregs->windowstart = 1;
+			childregs->windowbase = 0;
+		} else {
+			int len = childregs->wmask & ~0xf;
+			memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+			       &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+		}
+
+		/* The thread pointer is passed in the '4th argument' (= a5) */
+		if (clone_flags & CLONE_SETTLS)
+			childregs->threadptr = childregs->areg[5];
+	} else {
+		p->thread.ra = MAKE_RA_FOR_CALL(
+				(unsigned long)ret_from_kernel_thread, 1);
+
+		/* pass parameters to ret_from_kernel_thread:
+		 * a2 = thread_fn, a3 = thread_fn arg
+		 */
+		SPILL_SLOT(childregs, 3) = thread_fn_arg;
+		SPILL_SLOT(childregs, 2) = usp_thread_fn;
+
+		/* Childregs are only used when we're going to userspace
+		 * in which case start_thread will set them up.
+		 */
+	}
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+	ti = task_thread_info(p);
+	ti->cpenable = 0;
+#endif
+
+	clear_ptrace_hw_breakpoint(p);
+
+	return 0;
+}
+
+
+/*
+ * These bracket the sleeping functions..
+ */
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long sp, pc;
+	unsigned long stack_page = (unsigned long) task_stack_page(p);
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	sp = p->thread.sp;
+	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+
+	do {
+		if (sp < stack_page + sizeof(struct task_struct) ||
+		    sp >= (stack_page + THREAD_SIZE) ||
+		    pc == 0)
+			return 0;
+		if (!in_sched_functions(pc))
+			return pc;
+
+		/* Stack layout: sp-4: ra, sp-3: sp' */
+
+		pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+		sp = *(unsigned long *)sp - 3;
+	} while (count++ < 16);
+	return 0;
+}
+
+/*
+ * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
+ * of processor registers.  Besides different ordering,
+ * xtensa_gregset_t contains non-live register information that
+ * 'struct pt_regs' does not.  Exception handling (primarily) uses
+ * 'struct pt_regs'.  Core files and ptrace use xtensa_gregset_t.
+ *
+ */
+
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
+{
+	unsigned long wb, ws, wm;
+	int live, last;
+
+	wb = regs->windowbase;
+	ws = regs->windowstart;
+	wm = regs->wmask;
+	ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
+
+	/* Don't leak any random bits. */
+
+	memset(elfregs, 0, sizeof(*elfregs));
+
+	/* Note:  PS.EXCM is not set while user task is running; its
+	 * being set in regs->ps is for exception handling convenience.
+	 */
+
+	elfregs->pc		= regs->pc;
+	elfregs->ps		= (regs->ps & ~(1 << PS_EXCM_BIT));
+	elfregs->lbeg		= regs->lbeg;
+	elfregs->lend		= regs->lend;
+	elfregs->lcount		= regs->lcount;
+	elfregs->sar		= regs->sar;
+	elfregs->windowstart	= ws;
+
+	live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+	last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+	memcpy(elfregs->a, regs->areg, live * 4);
+	memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
+}
+
+int dump_fpu(void)
+{
+	return 0;
+}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 0000000..d9541be
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,517 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007  Tensilica Inc.
+ *
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Scott Foehner<sfoehner@yahoo.com>,
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+
+#include <asm/coprocessor.h>
+#include <asm/elf.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+
+
+void user_enable_single_step(struct task_struct *child)
+{
+	child->ptrace |= PT_SINGLESTEP;
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+	child->ptrace &= ~PT_SINGLESTEP;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
+ */
+
+void ptrace_disable(struct task_struct *child)
+{
+	/* Nothing to do.. */
+}
+
+static int ptrace_getregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	xtensa_gregset_t __user *gregset = uregs;
+	unsigned long wb = regs->windowbase;
+	int i;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+		return -EIO;
+
+	__put_user(regs->pc, &gregset->pc);
+	__put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
+	__put_user(regs->lbeg, &gregset->lbeg);
+	__put_user(regs->lend, &gregset->lend);
+	__put_user(regs->lcount, &gregset->lcount);
+	__put_user(regs->windowstart, &gregset->windowstart);
+	__put_user(regs->windowbase, &gregset->windowbase);
+	__put_user(regs->threadptr, &gregset->threadptr);
+
+	for (i = 0; i < XCHAL_NUM_AREGS; i++)
+		__put_user(regs->areg[i],
+			   gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
+
+	return 0;
+}
+
+static int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	xtensa_gregset_t *gregset = uregs;
+	const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+	unsigned long ps;
+	unsigned long wb, ws;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+		return -EIO;
+
+	__get_user(regs->pc, &gregset->pc);
+	__get_user(ps, &gregset->ps);
+	__get_user(regs->lbeg, &gregset->lbeg);
+	__get_user(regs->lend, &gregset->lend);
+	__get_user(regs->lcount, &gregset->lcount);
+	__get_user(ws, &gregset->windowstart);
+	__get_user(wb, &gregset->windowbase);
+	__get_user(regs->threadptr, &gregset->threadptr);
+
+	regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
+
+	if (wb >= XCHAL_NUM_AREGS / 4)
+		return -EFAULT;
+
+	if (wb != regs->windowbase || ws != regs->windowstart) {
+		unsigned long rotws, wmask;
+
+		rotws = (((ws | (ws << WSBITS)) >> wb) &
+			 ((1 << WSBITS) - 1)) & ~1;
+		wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+			(rotws & 0xF) | 1;
+		regs->windowbase = wb;
+		regs->windowstart = ws;
+		regs->wmask = wmask;
+	}
+
+	if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
+					gregset->a, wb * 16))
+		return -EFAULT;
+
+	if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+			     (WSBITS - wb) * 16))
+		return -EFAULT;
+
+	return 0;
+}
+
+
+#if XTENSA_HAVE_COPROCESSORS
+#define CP_OFFSETS(cp) \
+	{ \
+		.elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
+		.ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
+		.sz = sizeof(xtregs_ ## cp ## _t), \
+	}
+
+static const struct {
+	size_t elf_xtregs_offset;
+	size_t ti_offset;
+	size_t sz;
+} cp_offsets[] = {
+	CP_OFFSETS(cp0),
+	CP_OFFSETS(cp1),
+	CP_OFFSETS(cp2),
+	CP_OFFSETS(cp3),
+	CP_OFFSETS(cp4),
+	CP_OFFSETS(cp5),
+	CP_OFFSETS(cp6),
+	CP_OFFSETS(cp7),
+};
+#endif
+
+static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(child);
+	struct thread_info *ti = task_thread_info(child);
+	elf_xtregs_t __user *xtregs = uregs;
+	int ret = 0;
+	int i __maybe_unused;
+
+	if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+		return -EIO;
+
+#if XTENSA_HAVE_COPROCESSORS
+	/* Flush all coprocessor registers to memory. */
+	coprocessor_flush_all(ti);
+
+	for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+		ret |= __copy_to_user((char __user *)xtregs +
+				      cp_offsets[i].elf_xtregs_offset,
+				      (const char *)ti +
+				      cp_offsets[i].ti_offset,
+				      cp_offsets[i].sz);
+#endif
+	ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+			      sizeof(xtregs->opt));
+	ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
+			      sizeof(xtregs->user));
+
+	return ret ? -EFAULT : 0;
+}
+
+static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+	struct thread_info *ti = task_thread_info(child);
+	struct pt_regs *regs = task_pt_regs(child);
+	elf_xtregs_t *xtregs = uregs;
+	int ret = 0;
+	int i __maybe_unused;
+
+	if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+		return -EFAULT;
+
+#if XTENSA_HAVE_COPROCESSORS
+	/* Flush all coprocessors before we overwrite them. */
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+
+	for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+		ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
+					(const char __user *)xtregs +
+					cp_offsets[i].elf_xtregs_offset,
+					cp_offsets[i].sz);
+#endif
+	ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+				sizeof(xtregs->opt));
+	ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
+				sizeof(xtregs->user));
+
+	return ret ? -EFAULT : 0;
+}
+
+static int ptrace_peekusr(struct task_struct *child, long regno,
+			  long __user *ret)
+{
+	struct pt_regs *regs;
+	unsigned long tmp;
+
+	regs = task_pt_regs(child);
+	tmp = 0;  /* Default return value. */
+
+	switch(regno) {
+	case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+		tmp = regs->areg[regno - REG_AR_BASE];
+		break;
+
+	case REG_A_BASE ... REG_A_BASE + 15:
+		tmp = regs->areg[regno - REG_A_BASE];
+		break;
+
+	case REG_PC:
+		tmp = regs->pc;
+		break;
+
+	case REG_PS:
+		/* Note: PS.EXCM is not set while user task is running;
+		 * its being set in regs is for exception handling
+		 * convenience.
+		 */
+		tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
+		break;
+
+	case REG_WB:
+		break;		/* tmp = 0 */
+
+	case REG_WS:
+		{
+			unsigned long wb = regs->windowbase;
+			unsigned long ws = regs->windowstart;
+			tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
+				((1 << WSBITS) - 1);
+			break;
+		}
+	case REG_LBEG:
+		tmp = regs->lbeg;
+		break;
+
+	case REG_LEND:
+		tmp = regs->lend;
+		break;
+
+	case REG_LCOUNT:
+		tmp = regs->lcount;
+		break;
+
+	case REG_SAR:
+		tmp = regs->sar;
+		break;
+
+	case SYSCALL_NR:
+		tmp = regs->syscall;
+		break;
+
+	default:
+		return -EIO;
+	}
+	return put_user(tmp, ret);
+}
+
+static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+	struct pt_regs *regs;
+	regs = task_pt_regs(child);
+
+	switch (regno) {
+	case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+		regs->areg[regno - REG_AR_BASE] = val;
+		break;
+
+	case REG_A_BASE ... REG_A_BASE + 15:
+		regs->areg[regno - REG_A_BASE] = val;
+		break;
+
+	case REG_PC:
+		regs->pc = val;
+		break;
+
+	case SYSCALL_NR:
+		regs->syscall = val;
+		break;
+
+	default:
+		return -EIO;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void ptrace_hbptriggered(struct perf_event *bp,
+				struct perf_sample_data *data,
+				struct pt_regs *regs)
+{
+	int i;
+	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+
+	if (bp->attr.bp_type & HW_BREAKPOINT_X) {
+		for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
+			if (current->thread.ptrace_bp[i] == bp)
+				break;
+		i <<= 1;
+	} else {
+		for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
+			if (current->thread.ptrace_wp[i] == bp)
+				break;
+		i = (i << 1) | 1;
+	}
+
+	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
+}
+
+static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
+{
+	struct perf_event_attr attr;
+
+	ptrace_breakpoint_init(&attr);
+
+	/* Initialise fields to sane defaults. */
+	attr.bp_addr	= 0;
+	attr.bp_len	= 1;
+	attr.bp_type	= type;
+	attr.disabled	= 1;
+
+	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
+					   tsk);
+}
+
+/*
+ * Address bit 0 choose instruction (0) or data (1) break register, bits
+ * 31..1 are the register number.
+ * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
+ * address (0) and control (1).
+ * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
+ * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
+ * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
+ * breakpoint. To set a breakpoint length must be a power of 2 in the range
+ * 1..64 and the address must be length-aligned.
+ */
+
+static long ptrace_gethbpregs(struct task_struct *child, long addr,
+			      long __user *datap)
+{
+	struct perf_event *bp;
+	u32 user_data[2] = {0};
+	bool dbreak = addr & 1;
+	unsigned idx = addr >> 1;
+
+	if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+	    (dbreak && idx >= XCHAL_NUM_DBREAK))
+		return -EINVAL;
+
+	if (dbreak)
+		bp = child->thread.ptrace_wp[idx];
+	else
+		bp = child->thread.ptrace_bp[idx];
+
+	if (bp) {
+		user_data[0] = bp->attr.bp_addr;
+		user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
+		if (dbreak) {
+			if (bp->attr.bp_type & HW_BREAKPOINT_R)
+				user_data[1] |= DBREAKC_LOAD_MASK;
+			if (bp->attr.bp_type & HW_BREAKPOINT_W)
+				user_data[1] |= DBREAKC_STOR_MASK;
+		}
+	}
+
+	if (copy_to_user(datap, user_data, sizeof(user_data)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long ptrace_sethbpregs(struct task_struct *child, long addr,
+			      long __user *datap)
+{
+	struct perf_event *bp;
+	struct perf_event_attr attr;
+	u32 user_data[2];
+	bool dbreak = addr & 1;
+	unsigned idx = addr >> 1;
+	int bp_type = 0;
+
+	if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+	    (dbreak && idx >= XCHAL_NUM_DBREAK))
+		return -EINVAL;
+
+	if (copy_from_user(user_data, datap, sizeof(user_data)))
+		return -EFAULT;
+
+	if (dbreak) {
+		bp = child->thread.ptrace_wp[idx];
+		if (user_data[1] & DBREAKC_LOAD_MASK)
+			bp_type |= HW_BREAKPOINT_R;
+		if (user_data[1] & DBREAKC_STOR_MASK)
+			bp_type |= HW_BREAKPOINT_W;
+	} else {
+		bp = child->thread.ptrace_bp[idx];
+		bp_type = HW_BREAKPOINT_X;
+	}
+
+	if (!bp) {
+		bp = ptrace_hbp_create(child,
+				       bp_type ? bp_type : HW_BREAKPOINT_RW);
+		if (IS_ERR(bp))
+			return PTR_ERR(bp);
+		if (dbreak)
+			child->thread.ptrace_wp[idx] = bp;
+		else
+			child->thread.ptrace_bp[idx] = bp;
+	}
+
+	attr = bp->attr;
+	attr.bp_addr = user_data[0];
+	attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
+	attr.bp_type = bp_type;
+	attr.disabled = !attr.bp_len;
+
+	return modify_user_hw_breakpoint(bp, &attr);
+}
+#endif
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	int ret = -EPERM;
+	void __user *datap = (void __user *) data;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:	/* read word at location addr. */
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
+		break;
+
+	case PTRACE_PEEKUSR:	/* read register specified by addr. */
+		ret = ptrace_peekusr(child, addr, datap);
+		break;
+
+	case PTRACE_POKETEXT:	/* write the word at location addr. */
+	case PTRACE_POKEDATA:
+		ret = generic_ptrace_pokedata(child, addr, data);
+		break;
+
+	case PTRACE_POKEUSR:	/* write register specified by addr. */
+		ret = ptrace_pokeusr(child, addr, data);
+		break;
+
+	case PTRACE_GETREGS:
+		ret = ptrace_getregs(child, datap);
+		break;
+
+	case PTRACE_SETREGS:
+		ret = ptrace_setregs(child, datap);
+		break;
+
+	case PTRACE_GETXTREGS:
+		ret = ptrace_getxregs(child, datap);
+		break;
+
+	case PTRACE_SETXTREGS:
+		ret = ptrace_setxregs(child, datap);
+		break;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	case PTRACE_GETHBPREGS:
+		ret = ptrace_gethbpregs(child, addr, datap);
+		break;
+
+	case PTRACE_SETHBPREGS:
+		ret = ptrace_sethbpregs(child, addr, datap);
+		break;
+#endif
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+unsigned long do_syscall_trace_enter(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+	    tracehook_report_syscall_entry(regs))
+		return -1;
+
+	return regs->areg[2];
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+	int step;
+
+	step = test_thread_flag(TIF_SINGLESTEP);
+
+	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644
index 0000000..07e56e3
--- /dev/null
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -0,0 +1,128 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set.  Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+	int tmp;
+
+	__asm__ __volatile__(
+			"	movi	%1, 1f\n"
+			"	s32i	%1, %4, 0\n"
+			"	wsr	%2, scompare1\n"
+			"1:	s32c1i	%0, %3, 0\n"
+			: "=a" (set), "=&a" (tmp)
+			: "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+			: "memory"
+			);
+	return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+				       unsigned long exccause)
+{
+	if (regs->pc == rcw_probe_pc) {	/* exception on s32c1i ? */
+		regs->pc += 3;		/* skip the s32c1i instruction */
+		rcw_exc = exccause;
+	} else {
+		do_unhandled(regs, exccause);
+	}
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+	int n, cause1, cause2;
+	void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+	rcw_probe_pc = 0;
+	handbus  = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+			do_probed_exception);
+	handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+			do_probed_exception);
+	handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+			do_probed_exception);
+
+	/* First try an S32C1I that does not store: */
+	rcw_exc = 0;
+	rcw_word = 1;
+	n = probed_compare_swap(&rcw_word, 0, 2);
+	cause1 = rcw_exc;
+
+	/* took exception? */
+	if (cause1 != 0) {
+		/* unclean exception? */
+		if (n != 2 || rcw_word != 1)
+			panic("S32C1I exception error");
+	} else if (rcw_word != 1 || n != 1) {
+		panic("S32C1I compare error");
+	}
+
+	/* Then an S32C1I that stores: */
+	rcw_exc = 0;
+	rcw_word = 0x1234567;
+	n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+	cause2 = rcw_exc;
+
+	if (cause2 != 0) {
+		/* unclean exception? */
+		if (n != 0xabcde || rcw_word != 0x1234567)
+			panic("S32C1I exception error (b)");
+	} else if (rcw_word != 0xabcde || n != 0x1234567) {
+		panic("S32C1I store error");
+	}
+
+	/* Verify consistency of exceptions: */
+	if (cause1 || cause2) {
+		pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+		/* If emulation of S32C1I upon bus error gets implemented,
+		 * we can get rid of this panic for single core (not SMP)
+		 */
+		panic("S32C1I exceptions not currently supported");
+	}
+	if (cause1 != cause2)
+		panic("inconsistent S32C1I exceptions");
+
+	trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+	trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+	trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+	return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+	pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+	return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 0000000..351283b
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,737 @@
+/*
+ * arch/xtensa/kernel/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995  Linus Torvalds
+ * Copyright (C) 2001 - 2005  Tensilica Inc.
+ * Copyright (C) 2014 - 2016  Cadence Design Systems Inc.
+ *
+ * Chris Zankel	<chris@zankel.net>
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/screen_info.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+# include <linux/console.h>
+#endif
+
+#ifdef CONFIG_PROC_FS
+# include <linux/seq_file.h>
+#endif
+
+#include <asm/bootparam.h>
+#include <asm/kasan.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/timex.h>
+#include <asm/platform.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/param.h>
+#include <asm/smp.h>
+#include <asm/sysmem.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = {
+	.orig_x = 0,
+	.orig_y = 24,
+	.orig_video_cols = 80,
+	.orig_video_lines = 24,
+	.orig_video_isVGA = 1,
+	.orig_video_points = 16,
+};
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
+int initrd_is_mapped = 0;
+extern int initrd_below_start_ok;
+#endif
+
+#ifdef CONFIG_OF
+void *dtb_start = __dtb_start;
+#endif
+
+extern unsigned long loops_per_jiffy;
+
+/* Command line specified as configuration option. */
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+#endif
+
+#ifdef CONFIG_PARSE_BOOTPARAM
+/*
+ * Boot parameter parsing.
+ *
+ * The Xtensa port uses a list of variable-sized tags to pass data to
+ * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
+ * to be recognised. The list is terminated with a zero-sized
+ * BP_TAG_LAST tag.
+ */
+
+typedef struct tagtable {
+	u32 tag;
+	int (*parse)(const bp_tag_t*);
+} tagtable_t;
+
+#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn 		\
+	__attribute__((used, section(".taglist"))) = { tag, fn }
+
+/* parse current tag */
+
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+	struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+	if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+		return -1;
+
+	return memblock_add(mi->start, mi->end - mi->start);
+}
+
+__tagtable(BP_TAG_MEMORY, parse_tag_mem);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init parse_tag_initrd(const bp_tag_t* tag)
+{
+	struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+	initrd_start = (unsigned long)__va(mi->start);
+	initrd_end = (unsigned long)__va(mi->end);
+
+	return 0;
+}
+
+__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+#ifdef CONFIG_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+	dtb_start = __va(tag->data[0]);
+	return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+#endif /* CONFIG_OF */
+
+static int __init parse_tag_cmdline(const bp_tag_t* tag)
+{
+	strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+	return 0;
+}
+
+__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
+
+static int __init parse_bootparam(const bp_tag_t* tag)
+{
+	extern tagtable_t __tagtable_begin, __tagtable_end;
+	tagtable_t *t;
+
+	/* Boot parameters must start with a BP_TAG_FIRST tag. */
+
+	if (tag->id != BP_TAG_FIRST) {
+		pr_warn("Invalid boot parameters!\n");
+		return 0;
+	}
+
+	tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
+
+	/* Parse all tags. */
+
+	while (tag != NULL && tag->id != BP_TAG_LAST) {
+		for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+			if (tag->id == t->tag) {
+				t->parse(tag);
+				break;
+			}
+		}
+		if (t == &__tagtable_end)
+			pr_warn("Ignoring tag 0x%08x\n", tag->id);
+		tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
+	}
+
+	return 0;
+}
+#else
+static int __init parse_bootparam(const bp_tag_t *tag)
+{
+	pr_info("Ignoring boot parameters at %p\n", tag);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_OF
+
+#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+		int depth, void *data)
+{
+	const __be32 *ranges;
+	int len;
+
+	if (depth > 1)
+		return 0;
+
+	if (!of_flat_dt_is_compatible(node, "simple-bus"))
+		return 0;
+
+	ranges = of_get_flat_dt_prop(node, "ranges", &len);
+	if (!ranges)
+		return 1;
+	if (len == 0)
+		return 1;
+
+	xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+	/* round down to nearest 256MB boundary */
+	xtensa_kio_paddr &= 0xf0000000;
+
+	init_kio();
+
+	return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+		int depth, void *data)
+{
+	return 1;
+}
+#endif
+
+void __init early_init_devtree(void *params)
+{
+	early_init_dt_scan(params);
+	of_scan_flat_dt(xtensa_dt_io_area, NULL);
+
+	if (!command_line[0])
+		strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+#endif /* CONFIG_OF */
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
+	/* Initialize MMU. */
+
+	init_mmu();
+
+	/* Initialize initial KASAN shadow map */
+
+	kasan_early_init();
+
+	/* Parse boot parameters */
+
+	if (bp_start)
+		parse_bootparam(bp_start);
+
+#ifdef CONFIG_OF
+	early_init_devtree(dtb_start);
+#endif
+
+#ifdef CONFIG_CMDLINE_BOOL
+	if (!command_line[0])
+		strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
+	/* Early hook for platforms */
+
+	platform_init(bp_start);
+}
+
+/*
+ * Initialize system. Setup memory and reserve regions.
+ */
+
+extern char _end[];
+extern char _stext[];
+extern char _WindowVectors_text_start;
+extern char _WindowVectors_text_end;
+extern char _DebugInterruptVector_text_start;
+extern char _DebugInterruptVector_text_end;
+extern char _KernelExceptionVector_text_start;
+extern char _KernelExceptionVector_text_end;
+extern char _UserExceptionVector_text_start;
+extern char _UserExceptionVector_text_end;
+extern char _DoubleExceptionVector_text_start;
+extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
+#ifdef CONFIG_SMP
+extern char _SecondaryResetVector_text_start;
+extern char _SecondaryResetVector_text_end;
+#endif
+
+static inline int mem_reserve(unsigned long start, unsigned long end)
+{
+	return memblock_reserve(start, end - start);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	pr_info("config ID: %08x:%08x\n",
+		get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
+	if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
+	    get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
+		pr_info("built for config ID: %08x:%08x\n",
+			XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
+
+	*cmdline_p = command_line;
+	platform_setup(cmdline_p);
+	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+
+	/* Reserve some memory regions */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start < initrd_end) {
+		initrd_is_mapped = mem_reserve(__pa(initrd_start),
+					       __pa(initrd_end)) == 0;
+		initrd_below_start_ok = 1;
+	} else {
+		initrd_start = 0;
+	}
+#endif
+
+	mem_reserve(__pa(_stext), __pa(_end));
+
+#ifdef CONFIG_VECTORS_OFFSET
+	mem_reserve(__pa(&_WindowVectors_text_start),
+		    __pa(&_WindowVectors_text_end));
+
+	mem_reserve(__pa(&_DebugInterruptVector_text_start),
+		    __pa(&_DebugInterruptVector_text_end));
+
+	mem_reserve(__pa(&_KernelExceptionVector_text_start),
+		    __pa(&_KernelExceptionVector_text_end));
+
+	mem_reserve(__pa(&_UserExceptionVector_text_start),
+		    __pa(&_UserExceptionVector_text_end));
+
+	mem_reserve(__pa(&_DoubleExceptionVector_text_start),
+		    __pa(&_DoubleExceptionVector_text_end));
+
+#if XCHAL_EXCM_LEVEL >= 2
+	mem_reserve(__pa(&_Level2InterruptVector_text_start),
+		    __pa(&_Level2InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+	mem_reserve(__pa(&_Level3InterruptVector_text_start),
+		    __pa(&_Level3InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+	mem_reserve(__pa(&_Level4InterruptVector_text_start),
+		    __pa(&_Level4InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+	mem_reserve(__pa(&_Level5InterruptVector_text_start),
+		    __pa(&_Level5InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+	mem_reserve(__pa(&_Level6InterruptVector_text_start),
+		    __pa(&_Level6InterruptVector_text_end));
+#endif
+
+#endif /* CONFIG_VECTORS_OFFSET */
+
+#ifdef CONFIG_SMP
+	mem_reserve(__pa(&_SecondaryResetVector_text_start),
+		    __pa(&_SecondaryResetVector_text_end));
+#endif
+	parse_early_param();
+	bootmem_init();
+	kasan_init();
+	unflatten_and_copy_device_tree();
+
+#ifdef CONFIG_SMP
+	smp_init_cpus();
+#endif
+
+	paging_init();
+	zones_init();
+
+#ifdef CONFIG_VT
+# if defined(CONFIG_VGA_CONSOLE)
+	conswitchp = &vga_con;
+# elif defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+# endif
+#endif
+
+#ifdef CONFIG_PCI
+	platform_pcibios_init();
+#endif
+}
+
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		struct cpu *cpu = &per_cpu(cpu_data, i);
+		cpu->hotpluggable = !!i;
+		register_cpu(cpu, i);
+	}
+
+	return 0;
+}
+subsys_initcall(topology_init);
+
+void cpu_reset(void)
+{
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+	local_irq_disable();
+	/*
+	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+	 * be flushed.
+	 * Way 4 is not currently used by linux.
+	 * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
+	 * Way 5 shall be flushed and way 6 shall be set to identity mapping
+	 * on MMUv3.
+	 */
+	local_flush_tlb_all();
+	invalidate_page_directory();
+#if XCHAL_HAVE_SPANNING_WAY
+	/* MMU v3 */
+	{
+		unsigned long vaddr = (unsigned long)cpu_reset;
+		unsigned long paddr = __pa(vaddr);
+		unsigned long tmpaddr = vaddr + SZ_512M;
+		unsigned long tmp0, tmp1, tmp2, tmp3;
+
+		/*
+		 * Find a place for the temporary mapping. It must not be
+		 * in the same 512MB region with vaddr or paddr, otherwise
+		 * there may be multihit exception either on entry to the
+		 * temporary mapping, or on entry to the identity mapping.
+		 * (512MB is the biggest page size supported by TLB.)
+		 */
+		while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
+			tmpaddr += SZ_512M;
+
+		/* Invalidate mapping in the selected temporary area */
+		if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT))
+			invalidate_itlb_entry(itlb_probe(tmpaddr));
+		if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT))
+			invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
+
+		/*
+		 * Map two consecutive pages starting at the physical address
+		 * of this function to the temporary mapping area.
+		 */
+		write_itlb_entry(__pte((paddr & PAGE_MASK) |
+				       _PAGE_HW_VALID |
+				       _PAGE_HW_EXEC |
+				       _PAGE_CA_BYPASS),
+				 tmpaddr & PAGE_MASK);
+		write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
+				       _PAGE_HW_VALID |
+				       _PAGE_HW_EXEC |
+				       _PAGE_CA_BYPASS),
+				 (tmpaddr & PAGE_MASK) + PAGE_SIZE);
+
+		/* Reinitialize TLB */
+		__asm__ __volatile__ ("movi	%0, 1f\n\t"
+				      "movi	%3, 2f\n\t"
+				      "add	%0, %0, %4\n\t"
+				      "add	%3, %3, %5\n\t"
+				      "jx	%0\n"
+				      /*
+				       * No literal, data or stack access
+				       * below this point
+				       */
+				      "1:\n\t"
+				      /* Initialize *tlbcfg */
+				      "movi	%0, 0\n\t"
+				      "wsr	%0, itlbcfg\n\t"
+				      "wsr	%0, dtlbcfg\n\t"
+				      /* Invalidate TLB way 5 */
+				      "movi	%0, 4\n\t"
+				      "movi	%1, 5\n"
+				      "1:\n\t"
+				      "iitlb	%1\n\t"
+				      "idtlb	%1\n\t"
+				      "add	%1, %1, %6\n\t"
+				      "addi	%0, %0, -1\n\t"
+				      "bnez	%0, 1b\n\t"
+				      /* Initialize TLB way 6 */
+				      "movi	%0, 7\n\t"
+				      "addi	%1, %9, 3\n\t"
+				      "addi	%2, %9, 6\n"
+				      "1:\n\t"
+				      "witlb	%1, %2\n\t"
+				      "wdtlb	%1, %2\n\t"
+				      "add	%1, %1, %7\n\t"
+				      "add	%2, %2, %7\n\t"
+				      "addi	%0, %0, -1\n\t"
+				      "bnez	%0, 1b\n\t"
+				      /* Jump to identity mapping */
+				      "jx	%3\n"
+				      "2:\n\t"
+				      /* Complete way 6 initialization */
+				      "witlb	%1, %2\n\t"
+				      "wdtlb	%1, %2\n\t"
+				      /* Invalidate temporary mapping */
+				      "sub	%0, %9, %7\n\t"
+				      "iitlb	%0\n\t"
+				      "add	%0, %0, %8\n\t"
+				      "iitlb	%0"
+				      : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
+					"=&a"(tmp3)
+				      : "a"(tmpaddr - vaddr),
+					"a"(paddr - vaddr),
+					"a"(SZ_128M), "a"(SZ_512M),
+					"a"(PAGE_SIZE),
+					"a"((tmpaddr + SZ_512M) & PAGE_MASK)
+				      : "memory");
+	}
+#endif
+#endif
+	__asm__ __volatile__ ("movi	a2, 0\n\t"
+			      "wsr	a2, icountlevel\n\t"
+			      "movi	a2, 0\n\t"
+			      "wsr	a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+			      "wsr	a2, ibreakenable\n\t"
+#endif
+#if XCHAL_HAVE_LOOPS
+			      "wsr	a2, lcount\n\t"
+#endif
+			      "movi	a2, 0x1f\n\t"
+			      "wsr	a2, ps\n\t"
+			      "isync\n\t"
+			      "jx	%0\n\t"
+			      :
+			      : "a" (XCHAL_RESET_VECTOR_VADDR)
+			      : "a2");
+	for (;;)
+		;
+}
+
+void machine_restart(char * cmd)
+{
+	platform_restart();
+}
+
+void machine_halt(void)
+{
+	platform_halt();
+	while (1);
+}
+
+void machine_power_off(void)
+{
+	platform_power_off();
+	while (1);
+}
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Display some core information through /proc/cpuinfo.
+ */
+
+static int
+c_show(struct seq_file *f, void *slot)
+{
+	/* high-level stuff */
+	seq_printf(f, "CPU count\t: %u\n"
+		      "CPU list\t: %*pbl\n"
+		      "vendor_id\t: Tensilica\n"
+		      "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+		      "core ID\t\t: " XCHAL_CORE_ID "\n"
+		      "build ID\t: 0x%x\n"
+		      "config ID\t: %08x:%08x\n"
+		      "byte order\t: %s\n"
+		      "cpu MHz\t\t: %lu.%02lu\n"
+		      "bogomips\t: %lu.%02lu\n",
+		      num_online_cpus(),
+		      cpumask_pr_args(cpu_online_mask),
+		      XCHAL_BUILD_UNIQUE_ID,
+		      get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
+		      XCHAL_HAVE_BE ?  "big" : "little",
+		      ccount_freq/1000000,
+		      (ccount_freq/10000) % 100,
+		      loops_per_jiffy/(500000/HZ),
+		      (loops_per_jiffy/(5000/HZ)) % 100);
+	seq_puts(f, "flags\t\t: "
+#if XCHAL_HAVE_NMI
+		     "nmi "
+#endif
+#if XCHAL_HAVE_DEBUG
+		     "debug "
+# if XCHAL_HAVE_OCD
+		     "ocd "
+# endif
+#endif
+#if XCHAL_HAVE_DENSITY
+	    	     "density "
+#endif
+#if XCHAL_HAVE_BOOLEANS
+		     "boolean "
+#endif
+#if XCHAL_HAVE_LOOPS
+		     "loop "
+#endif
+#if XCHAL_HAVE_NSA
+		     "nsa "
+#endif
+#if XCHAL_HAVE_MINMAX
+		     "minmax "
+#endif
+#if XCHAL_HAVE_SEXT
+		     "sext "
+#endif
+#if XCHAL_HAVE_CLAMPS
+		     "clamps "
+#endif
+#if XCHAL_HAVE_MAC16
+		     "mac16 "
+#endif
+#if XCHAL_HAVE_MUL16
+		     "mul16 "
+#endif
+#if XCHAL_HAVE_MUL32
+		     "mul32 "
+#endif
+#if XCHAL_HAVE_MUL32_HIGH
+		     "mul32h "
+#endif
+#if XCHAL_HAVE_FP
+		     "fpu "
+#endif
+#if XCHAL_HAVE_S32C1I
+		     "s32c1i "
+#endif
+		     "\n");
+
+	/* Registers. */
+	seq_printf(f,"physical aregs\t: %d\n"
+		     "misc regs\t: %d\n"
+		     "ibreak\t\t: %d\n"
+		     "dbreak\t\t: %d\n",
+		     XCHAL_NUM_AREGS,
+		     XCHAL_NUM_MISC_REGS,
+		     XCHAL_NUM_IBREAK,
+		     XCHAL_NUM_DBREAK);
+
+
+	/* Interrupt. */
+	seq_printf(f,"num ints\t: %d\n"
+		     "ext ints\t: %d\n"
+		     "int levels\t: %d\n"
+		     "timers\t\t: %d\n"
+		     "debug level\t: %d\n",
+		     XCHAL_NUM_INTERRUPTS,
+		     XCHAL_NUM_EXTINTERRUPTS,
+		     XCHAL_NUM_INTLEVELS,
+		     XCHAL_NUM_TIMERS,
+		     XCHAL_DEBUGLEVEL);
+
+	/* Cache */
+	seq_printf(f,"icache line size: %d\n"
+		     "icache ways\t: %d\n"
+		     "icache size\t: %d\n"
+		     "icache flags\t: "
+#if XCHAL_ICACHE_LINE_LOCKABLE
+		     "lock "
+#endif
+		     "\n"
+		     "dcache line size: %d\n"
+		     "dcache ways\t: %d\n"
+		     "dcache size\t: %d\n"
+		     "dcache flags\t: "
+#if XCHAL_DCACHE_IS_WRITEBACK
+		     "writeback "
+#endif
+#if XCHAL_DCACHE_LINE_LOCKABLE
+		     "lock "
+#endif
+		     "\n",
+		     XCHAL_ICACHE_LINESIZE,
+		     XCHAL_ICACHE_WAYS,
+		     XCHAL_ICACHE_SIZE,
+		     XCHAL_DCACHE_LINESIZE,
+		     XCHAL_DCACHE_WAYS,
+		     XCHAL_DCACHE_SIZE);
+
+	return 0;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+	return (*pos == 0) ? (void *)1 : NULL;
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+	return NULL;
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op =
+{
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= c_show,
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 0000000..f88e7a0
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,496 @@
+/*
+ * arch/xtensa/kernel/signal.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005, 2006 Tensilica Inc.
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ * 1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/tracehook.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/ucontext.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/unistd.h>
+
+extern struct task_struct *coproc_owners[];
+
+struct rt_sigframe
+{
+	struct siginfo info;
+	struct ucontext uc;
+	struct {
+		xtregs_opt_t opt;
+		xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+		xtregs_coprocessor_t cp;
+#endif
+	} xtregs;
+	unsigned char retcode[6];
+	unsigned int window[4];
+};
+
+/* 
+ * Flush register windows stored in pt_regs to stack.
+ * Returns 1 for errors.
+ */
+
+int
+flush_window_regs_user(struct pt_regs *regs)
+{
+	const unsigned long ws = regs->windowstart;
+	const unsigned long wb = regs->windowbase;
+	unsigned long sp = 0;
+	unsigned long wm;
+	int err = 1;
+	int base;
+
+	/* Return if no other frames. */
+
+	if (regs->wmask == 1)
+		return 0;
+
+	/* Rotate windowmask and skip empty frames. */
+
+	wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
+	base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
+		
+	/* For call8 or call12 frames, we need the previous stack pointer. */
+
+	if ((regs->wmask & 2) == 0)
+		if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
+			goto errout;
+
+	/* Spill frames to stack. */
+
+	while (base < XCHAL_NUM_AREGS / 4) {
+
+		int m = (wm >> base);
+		int inc = 0;
+
+		/* Save registers a4..a7 (call8) or a4...a11 (call12) */
+
+		if (m & 2) {			/* call4 */
+			inc = 1;
+
+		} else if (m & 4) {		/* call8 */
+			if (copy_to_user(&SPILL_SLOT_CALL8(sp, 4),
+					 &regs->areg[(base + 1) * 4], 16))
+				goto errout;
+			inc = 2;
+
+		} else if (m & 8) {	/* call12 */
+			if (copy_to_user(&SPILL_SLOT_CALL12(sp, 4),
+					 &regs->areg[(base + 1) * 4], 32))
+				goto errout;
+			inc = 3;
+		}
+
+		/* Save current frame a0..a3 under next SP */
+
+		sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
+		if (copy_to_user(&SPILL_SLOT(sp, 0), &regs->areg[base * 4], 16))
+			goto errout;
+
+		/* Get current stack pointer for next loop iteration. */
+
+		sp = regs->areg[base * 4 + 1];
+		base += inc;
+	}
+
+	regs->wmask = 1;
+	regs->windowstart = 1 << wb;
+
+	return 0;
+
+errout:
+	return err;
+}
+
+/*
+ * Note: We don't copy double exception 'regs', we have to finish double exc. 
+ * first before we return to signal handler! This dbl.exc.handler might cause 
+ * another double exception, but I think we are fine as the situation is the 
+ * same as if we had returned to the signal handerl and got an interrupt 
+ * immediately...
+ */
+
+static int
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
+{
+	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+	struct thread_info *ti = current_thread_info();
+	int err = 0;
+
+#define COPY(x)	err |= __put_user(regs->x, &sc->sc_##x)
+	COPY(pc);
+	COPY(ps);
+	COPY(lbeg);
+	COPY(lend);
+	COPY(lcount);
+	COPY(sar);
+#undef COPY
+
+	err |= flush_window_regs_user(regs);
+	err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+	err |= __put_user(0, &sc->sc_xtregs);
+
+	if (err)
+		return err;
+
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_flush_all(ti);
+	coprocessor_release_all(ti);
+	err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+			      sizeof (frame->xtregs.cp));
+#endif
+	err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+			      sizeof (xtregs_opt_t));
+	err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+			      sizeof (xtregs_user_t));
+
+	err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
+
+	return err;
+}
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
+{
+	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+	struct thread_info *ti = current_thread_info();
+	unsigned int err = 0;
+	unsigned long ps;
+
+#define COPY(x)	err |= __get_user(regs->x, &sc->sc_##x)
+	COPY(pc);
+	COPY(lbeg);
+	COPY(lend);
+	COPY(lcount);
+	COPY(sar);
+#undef COPY
+
+	/* All registers were flushed to stack. Start with a prestine frame. */
+
+	regs->wmask = 1;
+	regs->windowbase = 0;
+	regs->windowstart = 1;
+
+	regs->syscall = -1;		/* disable syscall checks */
+
+	/* For PS, restore only PS.CALLINC.
+	 * Assume that all other bits are either the same as for the signal
+	 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+	 */
+	err |= __get_user(ps, &sc->sc_ps);
+	regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
+
+	/* Additional corruption checks */
+
+	if ((regs->lcount > 0)
+	    && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+		err = 1;
+
+	err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
+
+	if (err)
+		return err;
+
+	/* The signal handler may have used coprocessors in which
+	 * case they are still enabled.  We disable them to force a
+	 * reloading of the original task's CP state by the lazy
+	 * context-switching mechanisms of CP exception handling.
+	 * Also, we essentially discard any coprocessor state that the
+	 * signal handler created. */
+
+#if XTENSA_HAVE_COPROCESSORS
+	coprocessor_release_all(ti);
+	err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+				sizeof (frame->xtregs.cp));
+#endif
+	err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+				sizeof (xtregs_user_t));
+	err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+				sizeof (xtregs_opt_t));
+
+	return err;
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
+				    long a4, long a5, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	sigset_t set;
+	int ret;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	if (regs->depc > 64)
+		panic("rt_sigreturn in double exception!\n");
+
+	frame = (struct rt_sigframe __user *) regs->areg[1];
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, frame))
+		goto badframe;
+
+	ret = regs->areg[2];
+
+	if (restore_altstack(&frame->uc.uc_stack))
+		goto badframe;
+
+	return ret;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+gen_return_code(unsigned char *codemem)
+{
+	int err = 0;
+
+	/*
+	 * The 12-bit immediate is really split up within the 24-bit MOVI
+	 * instruction.  As long as the above system call numbers fit within
+	 * 8-bits, the following code works fine. See the Xtensa ISA for
+	 * details.
+	 */
+
+#if __NR_rt_sigreturn > 255
+# error Generating the MOVI instruction below breaks!
+#endif
+
+#ifdef __XTENSA_EB__   /* Big Endian version */
+	/* Generate instruction:  MOVI a2, __NR_rt_sigreturn */
+	err |= __put_user(0x22, &codemem[0]);
+	err |= __put_user(0x0a, &codemem[1]);
+	err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+	/* Generate instruction:  SYSCALL */
+	err |= __put_user(0x00, &codemem[3]);
+	err |= __put_user(0x05, &codemem[4]);
+	err |= __put_user(0x00, &codemem[5]);
+
+#elif defined __XTENSA_EL__   /* Little Endian version */
+	/* Generate instruction:  MOVI a2, __NR_rt_sigreturn */
+	err |= __put_user(0x22, &codemem[0]);
+	err |= __put_user(0xa0, &codemem[1]);
+	err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+	/* Generate instruction:  SYSCALL */
+	err |= __put_user(0x00, &codemem[3]);
+	err |= __put_user(0x50, &codemem[4]);
+	err |= __put_user(0x00, &codemem[5]);
+#else
+# error Must use compiler for Xtensa processors.
+#endif
+
+	/* Flush generated code out of the data cache */
+
+	if (err == 0) {
+		__invalidate_icache_range((unsigned long)codemem, 6UL);
+		__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
+	}
+
+	return err;
+}
+
+
+static int setup_frame(struct ksignal *ksig, sigset_t *set,
+		       struct pt_regs *regs)
+{
+	struct rt_sigframe *frame;
+	int err = 0, sig = ksig->sig;
+	unsigned long sp, ra, tp;
+
+	sp = regs->areg[1];
+
+	if ((ksig->ka.sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+		sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	frame = (void *)((sp - sizeof(*frame)) & -16ul);
+
+	if (regs->depc > 64)
+		panic ("Double exception sys_sigreturn\n");
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+		return -EFAULT;
+	}
+
+	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+		err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+	}
+
+	/* Create the user context.  */
+
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]);
+	err |= setup_sigcontext(frame, regs);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
+		ra = (unsigned long)ksig->ka.sa.sa_restorer;
+	} else {
+
+		/* Create sys_rt_sigreturn syscall in stack frame */
+
+		err |= gen_return_code(frame->retcode);
+
+		if (err) {
+			return -EFAULT;
+		}
+		ra = (unsigned long) frame->retcode;
+	}
+
+	/* 
+	 * Create signal handler execution context.
+	 * Return context not modified until this point.
+	 */
+
+	/* Set up registers for signal handler; preserve the threadptr */
+	tp = regs->threadptr;
+	start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
+		     (unsigned long) frame);
+
+	/* Set up a stack frame for a call4
+	 * Note: PS.CALLINC is set to one by start_thread
+	 */
+	regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
+	regs->areg[6] = (unsigned long) sig;
+	regs->areg[7] = (unsigned long) &frame->info;
+	regs->areg[8] = (unsigned long) &frame->uc;
+	regs->threadptr = tp;
+
+	pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+		 current->comm, current->pid, sig, frame, regs->pc);
+
+	return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+	struct ksignal ksig;
+
+	task_pt_regs(current)->icountlevel = 0;
+
+	if (get_signal(&ksig)) {
+		int ret;
+
+		/* Are we from a system call? */
+
+		if ((signed)regs->syscall >= 0) {
+
+			/* If so, check system call restarting.. */
+
+			switch (regs->areg[2]) {
+				case -ERESTARTNOHAND:
+				case -ERESTART_RESTARTBLOCK:
+					regs->areg[2] = -EINTR;
+					break;
+
+				case -ERESTARTSYS:
+					if (!(ksig.ka.sa.sa_flags & SA_RESTART)) {
+						regs->areg[2] = -EINTR;
+						break;
+					}
+					/* fallthrough */
+				case -ERESTARTNOINTR:
+					regs->areg[2] = regs->syscall;
+					regs->pc -= 3;
+					break;
+
+				default:
+					/* nothing to do */
+					if (regs->areg[2] != 0)
+					break;
+			}
+		}
+
+		/* Whee!  Actually deliver the signal.  */
+		/* Set up the stack frame */
+		ret = setup_frame(&ksig, sigmask_to_save(), regs);
+		signal_setup_done(ret, &ksig, 0);
+		if (current->ptrace & PT_SINGLESTEP)
+			task_pt_regs(current)->icountlevel = 1;
+
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if ((signed) regs->syscall >= 0) {
+		/* Restart the system call - no handlers present */
+		switch (regs->areg[2]) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs->areg[2] = regs->syscall;
+			regs->pc -= 3;
+			break;
+		case -ERESTART_RESTARTBLOCK:
+			regs->areg[2] = __NR_restart_syscall;
+			regs->pc -= 3;
+			break;
+		}
+	}
+
+	/* If there's no signal to deliver, we just restore the saved mask.  */
+	restore_saved_sigmask();
+
+	if (current->ptrace & PT_SINGLESTEP)
+		task_pt_regs(current)->icountlevel = 1;
+	return;
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SIGPENDING))
+		do_signal(regs);
+
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+		tracehook_notify_resume(regs);
+}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 0000000..932d646
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,611 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+#  error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+		unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+		unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ	0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+static struct irqaction ipi_irqaction = {
+	.handler =	ipi_interrupt,
+	.flags =	IRQF_PERCPU,
+	.name =		"ipi",
+};
+
+void ipi_init(void)
+{
+	unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+	setup_irq(irq, &ipi_irqaction);
+}
+
+static inline unsigned int get_core_count(void)
+{
+	/* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+	unsigned int syscfgid = get_er(SYSCFGID);
+	return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+	/* Bits 0...18 of SYSCFGID contain the core id  */
+	unsigned int core_id = get_er(SYSCFGID);
+	return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned i;
+
+	for (i = 0; i < max_cpus; ++i)
+		set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+	unsigned i;
+	unsigned int ncpus = get_core_count();
+	unsigned int core_id = get_core_id();
+
+	pr_info("%s: Core Count = %d\n", __func__, ncpus);
+	pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+	for (i = 0; i < ncpus; ++i)
+		set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+	unsigned int cpu = smp_processor_id();
+	BUG_ON(cpu != 0);
+	cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+	struct mm_struct *mm = &init_mm;
+	unsigned int cpu = smp_processor_id();
+
+	init_mmu();
+
+#ifdef CONFIG_DEBUG_KERNEL
+	if (boot_secondary_processors == 0) {
+		pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+			__func__, boot_secondary_processors, cpu);
+		for (;;)
+			__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+	}
+
+	pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+		__func__, boot_secondary_processors, cpu);
+#endif
+	/* Init EXCSAVE1 */
+
+	secondary_trap_init();
+
+	/* All kernel threads share the same mm context. */
+
+	mmget(mm);
+	mmgrab(mm);
+	current->active_mm = mm;
+	cpumask_set_cpu(cpu, mm_cpumask(mm));
+	enter_lazy_tlb(mm, current);
+
+	preempt_disable();
+	trace_hardirqs_off();
+
+	calibrate_delay();
+
+	notify_cpu_starting(cpu);
+
+	secondary_init_irq();
+	local_timer_setup(cpu);
+
+	set_cpu_online(cpu, true);
+
+	local_irq_enable();
+
+	complete(&cpu_running);
+
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+static void mx_cpu_start(void *p)
+{
+	unsigned cpu = (unsigned)p;
+	unsigned long run_stall_mask = get_er(MPSCORE);
+
+	set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+			__func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+	unsigned cpu = (unsigned)p;
+	unsigned long run_stall_mask = get_er(MPSCORE);
+
+	set_er(run_stall_mask | (1u << cpu), MPSCORE);
+	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+			__func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+	unsigned long ccount;
+	int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+	cpu_start_id = cpu;
+	system_flush_invalidate_dcache_range(
+			(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+#endif
+	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+	for (i = 0; i < 2; ++i) {
+		do
+			ccount = get_ccount();
+		while (!ccount);
+
+		cpu_start_ccount = ccount;
+
+		while (time_before(jiffies, timeout)) {
+			mb();
+			if (!cpu_start_ccount)
+				break;
+		}
+
+		if (cpu_start_ccount) {
+			smp_call_function_single(0, mx_cpu_stop,
+					(void *)cpu, 1);
+			cpu_start_ccount = 0;
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+	int ret = 0;
+
+	if (cpu_asid_cache(cpu) == 0)
+		cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+	start_info.stack = (unsigned long)task_pt_regs(idle);
+	wmb();
+
+	pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+			__func__, cpu, idle, start_info.stack);
+
+	ret = boot_secondary(cpu, idle);
+	if (ret == 0) {
+		wait_for_completion_timeout(&cpu_running,
+				msecs_to_jiffies(1000));
+		if (!cpu_online(cpu))
+			ret = -EIO;
+	}
+
+	if (ret)
+		pr_err("CPU %u failed to boot\n", cpu);
+
+	return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	/*
+	 * Take this CPU offline.  Once we clear this, we can't return,
+	 * and we must not schedule until we're ready to give up the cpu.
+	 */
+	set_cpu_online(cpu, false);
+
+	/*
+	 * OK - migrate IRQs away from this CPU
+	 */
+	migrate_irqs();
+
+	/*
+	 * Flush user cache and TLB mappings, and then remove this CPU
+	 * from the vm mask set of all processes.
+	 */
+	local_flush_cache_all();
+	local_flush_tlb_all();
+	invalidate_page_directory();
+
+	clear_tasks_mm_cpumask(cpu);
+
+	return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+	smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+	while (time_before(jiffies, timeout)) {
+		system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+				sizeof(cpu_start_id));
+		if (cpu_start_id == -cpu) {
+			platform_cpu_kill(cpu);
+			return;
+		}
+	}
+	pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+	cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+	idle_task_exit();
+	local_irq_disable();
+	__asm__ __volatile__(
+			"	movi	a2, cpu_restart\n"
+			"	jx	a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+	IPI_RESCHEDULE = 0,
+	IPI_CALL_FUNC,
+	IPI_CPU_STOP,
+	IPI_MAX
+};
+
+static const struct {
+	const char *short_text;
+	const char *long_text;
+} ipi_text[] = {
+	{ .short_text = "RES", .long_text = "Rescheduling interrupts" },
+	{ .short_text = "CAL", .long_text = "Function call interrupts" },
+	{ .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+	unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+		enum ipi_msg_type msg_id)
+{
+	int index;
+	unsigned long mask = 0;
+
+	for_each_cpu(index, callmask)
+		if (index != smp_processor_id())
+			mask |= 1 << index;
+
+	set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+	struct cpumask targets;
+
+	cpumask_copy(&targets, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &targets);
+	send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+	set_cpu_online(cpu, false);
+	machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+	unsigned int cpu = smp_processor_id();
+	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+	unsigned int msg;
+	unsigned i;
+
+	msg = get_er(MIPICAUSE(cpu));
+	for (i = 0; i < IPI_MAX; i++)
+		if (msg & (1 << i)) {
+			set_er(1 << i, MIPICAUSE(cpu));
+			++ipi->ipi_count[i];
+		}
+
+	if (msg & (1 << IPI_RESCHEDULE))
+		scheduler_ipi();
+	if (msg & (1 << IPI_CALL_FUNC))
+		generic_smp_call_function_interrupt();
+	if (msg & (1 << IPI_CPU_STOP))
+		ipi_cpu_stop(cpu);
+
+	return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+	unsigned int cpu;
+	unsigned i;
+
+	for (i = 0; i < IPI_MAX; ++i) {
+		seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+		for_each_online_cpu(cpu)
+			seq_printf(p, " %10lu",
+					per_cpu(ipi_data, cpu).ipi_count[i]);
+		seq_printf(p, "   %s\n", ipi_text[i].long_text);
+	}
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+	pr_debug("setup_profiling_timer %d\n", multiplier);
+	return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+	struct vm_area_struct *vma;
+	unsigned long addr1;
+	unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+	local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+	local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct flush_data fd = {
+		.vma = vma,
+		.addr1 = addr,
+	};
+	on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+		     unsigned long start, unsigned long end)
+{
+	struct flush_data fd = {
+		.vma = vma,
+		.addr1 = start,
+		.addr2 = end,
+	};
+	on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	struct flush_data fd = {
+		.addr1 = start,
+		.addr2 = end,
+	};
+	on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+	local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+	on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+		     unsigned long address, unsigned long pfn)
+{
+	struct flush_data fd = {
+		.vma = vma,
+		.addr1 = address,
+		.addr2 = pfn,
+	};
+	on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+		     unsigned long start, unsigned long end)
+{
+	struct flush_data fd = {
+		.vma = vma,
+		.addr1 = start,
+		.addr2 = end,
+	};
+	on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	struct flush_data fd = {
+		.addr1 = start,
+		.addr2 = end,
+	};
+	on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	__invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+		unsigned long size)
+{
+	struct flush_data fd = {
+		.addr1 = start,
+		.addr2 = size,
+	};
+	on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+	struct flush_data *fd = arg;
+	__flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+		unsigned long size)
+{
+	struct flush_data fd = {
+		.addr1 = start,
+		.addr2 = size,
+	};
+	on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 0000000..0df4080
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,266 @@
+/*
+ * Kernel and userspace stack tracing.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+			   int (*ufn)(struct stackframe *frame, void *data),
+			   void *data)
+{
+	unsigned long windowstart = regs->windowstart;
+	unsigned long windowbase = regs->windowbase;
+	unsigned long a0 = regs->areg[0];
+	unsigned long a1 = regs->areg[1];
+	unsigned long pc = regs->pc;
+	struct stackframe frame;
+	int index;
+
+	if (!depth--)
+		return;
+
+	frame.pc = pc;
+	frame.sp = a1;
+
+	if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+		return;
+
+	/* Two steps:
+	 *
+	 * 1. Look through the register window for the
+	 * previous PCs in the call trace.
+	 *
+	 * 2. Look on the stack.
+	 */
+
+	/* Step 1.  */
+	/* Rotate WINDOWSTART to move the bit corresponding to
+	 * the current window to the bit #0.
+	 */
+	windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+	/* Look for bits that are set, they correspond to
+	 * valid windows.
+	 */
+	for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+		if (windowstart & (1 << index)) {
+			/* Get the PC from a0 and a1. */
+			pc = MAKE_PC_FROM_RA(a0, pc);
+			/* Read a0 and a1 from the
+			 * corresponding position in AREGs.
+			 */
+			a0 = regs->areg[index * 4];
+			a1 = regs->areg[index * 4 + 1];
+
+			frame.pc = pc;
+			frame.sp = a1;
+
+			if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+				return;
+		}
+
+	/* Step 2. */
+	/* We are done with the register window, we need to
+	 * look through the stack.
+	 */
+	if (!depth)
+		return;
+
+	/* Start from the a1 register. */
+	/* a1 = regs->areg[1]; */
+	while (a0 != 0 && depth--) {
+		pc = MAKE_PC_FROM_RA(a0, pc);
+
+		/* Check if the region is OK to access. */
+		if (!access_ok(VERIFY_READ, &SPILL_SLOT(a1, 0), 8))
+			return;
+		/* Copy a1, a0 from user space stack frame. */
+		if (__get_user(a0, &SPILL_SLOT(a1, 0)) ||
+		    __get_user(a1, &SPILL_SLOT(a1, 1)))
+			return;
+
+		frame.pc = pc;
+		frame.sp = a1;
+
+		if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+			return;
+	}
+}
+EXPORT_SYMBOL(xtensa_backtrace_user);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+			     int (*kfn)(struct stackframe *frame, void *data),
+			     int (*ufn)(struct stackframe *frame, void *data),
+			     void *data)
+{
+	unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ?
+		regs->depc : regs->pc;
+	unsigned long sp_start, sp_end;
+	unsigned long a0 = regs->areg[0];
+	unsigned long a1 = regs->areg[1];
+
+	sp_start = a1 & ~(THREAD_SIZE - 1);
+	sp_end = sp_start + THREAD_SIZE;
+
+	/* Spill the register window to the stack first. */
+	spill_registers();
+
+	/* Read the stack frames one by one and create the PC
+	 * from the a0 and a1 registers saved there.
+	 */
+	while (a1 > sp_start && a1 < sp_end && depth--) {
+		struct stackframe frame;
+
+		frame.pc = pc;
+		frame.sp = a1;
+
+		if (kernel_text_address(pc) && kfn(&frame, data))
+			return;
+
+		if (pc == (unsigned long)&common_exception_return) {
+			regs = (struct pt_regs *)a1;
+			if (user_mode(regs)) {
+				if (ufn == NULL)
+					return;
+				xtensa_backtrace_user(regs, depth, ufn, data);
+				return;
+			}
+			a0 = regs->areg[0];
+			a1 = regs->areg[1];
+			continue;
+		}
+
+		sp_start = a1;
+
+		pc = MAKE_PC_FROM_RA(a0, pc);
+		a0 = SPILL_SLOT(a1, 0);
+		a1 = SPILL_SLOT(a1, 1);
+	}
+}
+EXPORT_SYMBOL(xtensa_backtrace_kernel);
+
+#endif
+
+void walk_stackframe(unsigned long *sp,
+		int (*fn)(struct stackframe *frame, void *data),
+		void *data)
+{
+	unsigned long a0, a1;
+	unsigned long sp_end;
+
+	a1 = (unsigned long)sp;
+	sp_end = ALIGN(a1, THREAD_SIZE);
+
+	spill_registers();
+
+	while (a1 < sp_end) {
+		struct stackframe frame;
+
+		sp = (unsigned long *)a1;
+
+		a0 = SPILL_SLOT(a1, 0);
+		a1 = SPILL_SLOT(a1, 1);
+
+		if (a1 <= (unsigned long)sp)
+			break;
+
+		frame.pc = MAKE_PC_FROM_RA(a0, a1);
+		frame.sp = a1;
+
+		if (fn(&frame, data))
+			return;
+	}
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+	struct stack_trace *trace;
+	unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+	struct stack_trace_data *trace_data = data;
+	struct stack_trace *trace = trace_data->trace;
+
+	if (trace_data->skip) {
+		--trace_data->skip;
+		return 0;
+	}
+	if (!kernel_text_address(frame->pc))
+		return 0;
+
+	trace->entries[trace->nr_entries++] = frame->pc;
+	return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+	struct stack_trace_data trace_data = {
+		.trace = trace,
+		.skip = trace->skip,
+	};
+	walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+	unsigned long addr;
+	unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+	struct return_addr_data *r = data;
+
+	if (r->skip) {
+		--r->skip;
+		return 0;
+	}
+	if (!kernel_text_address(frame->pc))
+		return 0;
+	r->addr = frame->pc;
+	return 1;
+}
+
+unsigned long return_address(unsigned level)
+{
+	struct return_addr_data r = {
+		.skip = level + 1,
+	};
+	walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+	return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 0000000..8201748
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,98 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+#include <linux/uaccess.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/shm.h>
+
+typedef void (*syscall_t)(void);
+
+syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
+	[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
+
+#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
+#include <uapi/asm/unistd.h>
+};
+
+#define COLOUR_ALIGN(addr, pgoff) \
+	((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+	 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
+{
+	unsigned long ret;
+	long err;
+
+	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
+	if (err)
+		return err;
+	return (long)ret;
+}
+
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+		unsigned long long offset, unsigned long long len)
+{
+	return ksys_fadvise64_64(fd, offset, len, advice);
+}
+
+#ifdef CONFIG_MMU
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct vm_area_struct *vmm;
+
+	if (flags & MAP_FIXED) {
+		/* We do not accept a shared mapping if it would violate
+		 * cache aliasing constraints.
+		 */
+		if ((flags & MAP_SHARED) &&
+				((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+	if (!addr)
+		addr = TASK_UNMAPPED_BASE;
+
+	if (flags & MAP_SHARED)
+		addr = COLOUR_ALIGN(addr, pgoff);
+	else
+		addr = PAGE_ALIGN(addr);
+
+	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+		/* At this point:  (!vmm || addr < vmm->vm_end). */
+		if (TASK_SIZE - len < addr)
+			return -ENOMEM;
+		if (!vmm || addr + len <= vm_start_gap(vmm))
+			return addr;
+		addr = vmm->vm_end;
+		if (flags & MAP_SHARED)
+			addr = COLOUR_ALIGN(addr, pgoff);
+	}
+}
+#endif
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 0000000..fd524a5
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,218 @@
+/*
+ * arch/xtensa/kernel/time.c
+ *
+ * Timer and clock support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/sched_clock.h>
+
+#include <asm/timex.h>
+#include <asm/platform.h>
+
+unsigned long ccount_freq;		/* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
+
+static u64 ccount_read(struct clocksource *cs)
+{
+	return (u64)get_ccount();
+}
+
+static u64 notrace ccount_sched_clock_read(void)
+{
+	return get_ccount();
+}
+
+static struct clocksource ccount_clocksource = {
+	.name = "ccount",
+	.rating = 200,
+	.read = ccount_read,
+	.mask = CLOCKSOURCE_MASK(32),
+	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int ccount_timer_set_next_event(unsigned long delta,
+		struct clock_event_device *dev);
+struct ccount_timer {
+	struct clock_event_device evt;
+	int irq_enabled;
+	char name[24];
+};
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
+
+static int ccount_timer_set_next_event(unsigned long delta,
+		struct clock_event_device *dev)
+{
+	unsigned long flags, next;
+	int ret = 0;
+
+	local_irq_save(flags);
+	next = get_ccount() + delta;
+	set_linux_timer(next);
+	if (next - get_ccount() > delta)
+		ret = -ETIME;
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+/*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+static int ccount_timer_shutdown(struct clock_event_device *evt)
+{
+	struct ccount_timer *timer =
+		container_of(evt, struct ccount_timer, evt);
+
+	if (timer->irq_enabled) {
+		disable_irq(evt->irq);
+		timer->irq_enabled = 0;
+	}
+	return 0;
+}
+
+static int ccount_timer_set_oneshot(struct clock_event_device *evt)
+{
+	struct ccount_timer *timer =
+		container_of(evt, struct ccount_timer, evt);
+
+	if (!timer->irq_enabled) {
+		enable_irq(evt->irq);
+		timer->irq_enabled = 1;
+	}
+	return 0;
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id);
+static struct irqaction timer_irqaction = {
+	.handler =	timer_interrupt,
+	.flags =	IRQF_TIMER,
+	.name =		"timer",
+};
+
+void local_timer_setup(unsigned cpu)
+{
+	struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+	struct clock_event_device *clockevent = &timer->evt;
+
+	timer->irq_enabled = 1;
+	clockevent->name = timer->name;
+	snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+	clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
+	clockevent->rating = 300;
+	clockevent->set_next_event = ccount_timer_set_next_event;
+	clockevent->set_state_shutdown = ccount_timer_shutdown;
+	clockevent->set_state_oneshot = ccount_timer_set_oneshot;
+	clockevent->tick_resume = ccount_timer_set_oneshot;
+	clockevent->cpumask = cpumask_of(cpu);
+	clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+	if (WARN(!clockevent->irq, "error: can't map timer irq"))
+		return;
+	clockevents_config_and_register(clockevent, ccount_freq,
+					0xf, 0xffffffff);
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+#ifdef CONFIG_OF
+static void __init calibrate_ccount(void)
+{
+	struct device_node *cpu;
+	struct clk *clk;
+
+	cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+	if (cpu) {
+		clk = of_clk_get(cpu, 0);
+		if (!IS_ERR(clk)) {
+			ccount_freq = clk_get_rate(clk);
+			return;
+		} else {
+			pr_warn("%s: CPU input clock not found\n",
+				__func__);
+		}
+	} else {
+		pr_warn("%s: CPU node not found in the device tree\n",
+			__func__);
+	}
+
+	platform_calibrate_ccount();
+}
+#else
+static inline void calibrate_ccount(void)
+{
+	platform_calibrate_ccount();
+}
+#endif
+#endif
+
+void __init time_init(void)
+{
+	of_clk_init(NULL);
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+	pr_info("Calibrating CPU frequency ");
+	calibrate_ccount();
+	pr_cont("%d.%02d MHz\n",
+		(int)ccount_freq / 1000000,
+		(int)(ccount_freq / 10000) % 100);
+#else
+	ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
+#endif
+	WARN(!ccount_freq,
+	     "%s: CPU clock frequency is not set up correctly\n",
+	     __func__);
+	clocksource_register_hz(&ccount_clocksource, ccount_freq);
+	local_timer_setup(0);
+	setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+	sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+	timer_probe();
+}
+
+/*
+ * The timer interrupt is called HZ times per second.
+ */
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
+
+	set_linux_timer(get_linux_timer());
+	evt->event_handler(evt);
+
+	/* Allow platform to do something useful (Wdog). */
+	platform_heartbeat();
+
+	return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void calibrate_delay(void)
+{
+	loops_per_jiffy = ccount_freq / HZ;
+	pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+		loops_per_jiffy / (1000000 / HZ),
+		(loops_per_jiffy / (10000 / HZ)) % 100);
+}
+#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 0000000..86507fa
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,546 @@
+/*
+ * arch/xtensa/kernel/traps.c
+ *
+ * Exception handling.
+ *
+ * Derived from code with the following copyrights:
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Modified for R3000 by Paul M. Antoine, 1995, 1996
+ * Complete output from die() by Ulf Carlsson, 1998
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Essentially rewritten for the Xtensa architecture port.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel	<chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
+
+#include <asm/stacktrace.h>
+#include <asm/ptrace.h>
+#include <asm/timex.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/hw_breakpoint.h>
+
+/*
+ * Machine specific interrupt handlers
+ */
+
+extern void kernel_exception(void);
+extern void user_exception(void);
+
+extern void fast_syscall_kernel(void);
+extern void fast_syscall_user(void);
+extern void fast_alloca(void);
+extern void fast_unaligned(void);
+extern void fast_second_level_miss(void);
+extern void fast_store_prohibited(void);
+extern void fast_coprocessor(void);
+
+extern void do_illegal_instruction (struct pt_regs*);
+extern void do_interrupt (struct pt_regs*);
+extern void do_nmi(struct pt_regs *);
+extern void do_unaligned_user (struct pt_regs*);
+extern void do_multihit (struct pt_regs*, unsigned long);
+extern void do_page_fault (struct pt_regs*, unsigned long);
+extern void do_debug (struct pt_regs*);
+extern void system_call (struct pt_regs*);
+
+/*
+ * The vector table must be preceded by a save area (which
+ * implies it must be in RAM, unless one places RAM immediately
+ * before a ROM and puts the vector at the start of the ROM (!))
+ */
+
+#define KRNL		0x01
+#define USER		0x02
+
+#define COPROCESSOR(x)							\
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+
+typedef struct {
+	int cause;
+	int fast;
+	void* handler;
+} dispatch_init_table_t;
+
+static dispatch_init_table_t __initdata dispatch_init_table[] = {
+
+{ EXCCAUSE_ILLEGAL_INSTRUCTION,	0,	   do_illegal_instruction},
+{ EXCCAUSE_SYSTEM_CALL,		KRNL,	   fast_syscall_kernel },
+{ EXCCAUSE_SYSTEM_CALL,		USER,	   fast_syscall_user },
+{ EXCCAUSE_SYSTEM_CALL,		0,	   system_call },
+/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ EXCCAUSE_LEVEL1_INTERRUPT,	0,	   do_interrupt },
+{ EXCCAUSE_ALLOCA,		USER|KRNL, fast_alloca },
+/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+/* EXCCAUSE_PRIVILEGED unhandled */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef CONFIG_XTENSA_UNALIGNED_USER
+{ EXCCAUSE_UNALIGNED,		USER,	   fast_unaligned },
+#endif
+{ EXCCAUSE_UNALIGNED,		0,	   do_unaligned_user },
+{ EXCCAUSE_UNALIGNED,		KRNL,	   fast_unaligned },
+#endif
+#ifdef CONFIG_MMU
+{ EXCCAUSE_ITLB_MISS,		0,	   do_page_fault },
+{ EXCCAUSE_ITLB_MISS,		USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MULTIHIT,		0,	   do_multihit },
+{ EXCCAUSE_ITLB_PRIVILEGE,	0,	   do_page_fault },
+/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+{ EXCCAUSE_DTLB_MISS,		USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS,		0,	   do_page_fault },
+{ EXCCAUSE_DTLB_MULTIHIT,		0,	   do_multihit },
+{ EXCCAUSE_DTLB_PRIVILEGE,	0,	   do_page_fault },
+/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,	0,	   do_page_fault },
+#endif /* CONFIG_MMU */
+/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
+#if XTENSA_HAVE_COPROCESSOR(0)
+COPROCESSOR(0),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(1)
+COPROCESSOR(1),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(2)
+COPROCESSOR(2),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(3)
+COPROCESSOR(3),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(4)
+COPROCESSOR(4),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(5)
+COPROCESSOR(5),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(6)
+COPROCESSOR(6),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(7)
+COPROCESSOR(7),
+#endif
+#if XTENSA_FAKE_NMI
+{ EXCCAUSE_MAPPED_NMI,			0,		do_nmi },
+#endif
+{ EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
+{ -1, -1, 0 }
+
+};
+
+/* The exception table <exc_table> serves two functions:
+ * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
+ * 2. it is a temporary memory buffer for the exception handlers.
+ */
+
+DEFINE_PER_CPU(struct exc_table, exc_table);
+DEFINE_PER_CPU(struct debug_table, debug_table);
+
+void die(const char*, struct pt_regs*, long);
+
+static inline void
+__die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+	if (!user_mode(regs))
+		die(str, regs, err);
+}
+
+/*
+ * Unhandled Exceptions. Kill user task or panic if in kernel space.
+ */
+
+void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+{
+	__die_if_kernel("Caught unhandled exception - should not happen",
+			regs, SIGKILL);
+
+	/* If in user mode, send SIGILL signal to current process */
+	pr_info_ratelimited("Caught unhandled exception in '%s' "
+			    "(pid = %d, pc = %#010lx) - should not happen\n"
+			    "\tEXCCAUSE is %ld\n",
+			    current->comm, task_pid_nr(current), regs->pc,
+			    exccause);
+	force_sig(SIGILL, current);
+}
+
+/*
+ * Multi-hit exception. This if fatal!
+ */
+
+void do_multihit(struct pt_regs *regs, unsigned long exccause)
+{
+	die("Caught multihit exception", regs, SIGKILL);
+}
+
+/*
+ * IRQ handler.
+ */
+
+extern void do_IRQ(int, struct pt_regs *);
+
+#if XTENSA_FAKE_NMI
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+	unsigned intread = get_sr(interrupt);
+	unsigned intenable = get_sr(intenable);
+
+	BUG_ON(intread & intenable &
+	       ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+		 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+		 BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
+
+DEFINE_PER_CPU(unsigned long, nmi_count);
+
+void do_nmi(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+
+	if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
+		trace_hardirqs_off();
+
+	old_regs = set_irq_regs(regs);
+	nmi_enter();
+	++*this_cpu_ptr(&nmi_count);
+	check_valid_nmi();
+	xtensa_pmu_irq_handler(0, NULL);
+	nmi_exit();
+	set_irq_regs(old_regs);
+}
+#endif
+
+void do_interrupt(struct pt_regs *regs)
+{
+	static const unsigned int_level_mask[] = {
+		0,
+		XCHAL_INTLEVEL1_MASK,
+		XCHAL_INTLEVEL2_MASK,
+		XCHAL_INTLEVEL3_MASK,
+		XCHAL_INTLEVEL4_MASK,
+		XCHAL_INTLEVEL5_MASK,
+		XCHAL_INTLEVEL6_MASK,
+		XCHAL_INTLEVEL7_MASK,
+	};
+	struct pt_regs *old_regs;
+
+	trace_hardirqs_off();
+
+	old_regs = set_irq_regs(regs);
+	irq_enter();
+
+	for (;;) {
+		unsigned intread = get_sr(interrupt);
+		unsigned intenable = get_sr(intenable);
+		unsigned int_at_level = intread & intenable;
+		unsigned level;
+
+		for (level = LOCKLEVEL; level > 0; --level) {
+			if (int_at_level & int_level_mask[level]) {
+				int_at_level &= int_level_mask[level];
+				break;
+			}
+		}
+
+		if (level == 0)
+			break;
+
+		do_IRQ(__ffs(int_at_level), regs);
+	}
+
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+/*
+ * Illegal instruction. Fatal if in kernel space.
+ */
+
+void
+do_illegal_instruction(struct pt_regs *regs)
+{
+	__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
+
+	/* If in user mode, send SIGILL signal to current process. */
+
+	pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+			    current->comm, task_pid_nr(current), regs->pc);
+	force_sig(SIGILL, current);
+}
+
+
+/*
+ * Handle unaligned memory accesses from user space. Kill task.
+ *
+ * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
+ * accesses causes from user space.
+ */
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+void
+do_unaligned_user (struct pt_regs *regs)
+{
+	__die_if_kernel("Unhandled unaligned exception in kernel",
+			regs, SIGKILL);
+
+	current->thread.bad_vaddr = regs->excvaddr;
+	current->thread.error_code = -3;
+	pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
+			    "(pid = %d, pc = %#010lx)\n",
+			    regs->excvaddr, current->comm,
+			    task_pid_nr(current), regs->pc);
+	force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr, current);
+}
+#endif
+
+/* Handle debug events.
+ * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
+ * preemption disabled to avoid rescheduling and keep mapping of hardware
+ * breakpoint structures to debug registers intact, so that
+ * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
+ */
+void
+do_debug(struct pt_regs *regs)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	int ret = check_hw_breakpoint(regs);
+
+	preempt_enable();
+	if (ret == 0)
+		return;
+#endif
+	__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
+
+	/* If in user mode, send SIGTRAP signal to current process */
+
+	force_sig(SIGTRAP, current);
+}
+
+
+#define set_handler(type, cause, handler)				\
+	do {								\
+		unsigned int cpu;					\
+									\
+		for_each_possible_cpu(cpu)				\
+			per_cpu(exc_table, cpu).type[cause] = (handler);\
+	} while (0)
+
+/* Set exception C handler - for temporary use when probing exceptions */
+
+void * __init trap_set_handler(int cause, void *handler)
+{
+	void *previous = per_cpu(exc_table, 0).default_handler[cause];
+
+	set_handler(default_handler, cause, handler);
+	return previous;
+}
+
+
+static void trap_init_excsave(void)
+{
+	unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
+	__asm__ __volatile__("wsr  %0, excsave1\n" : : "a" (excsave1));
+}
+
+static void trap_init_debug(void)
+{
+	unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
+
+	this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
+	__asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
+			     :: "a"(debugsave));
+}
+
+/*
+ * Initialize dispatch tables.
+ *
+ * The exception vectors are stored compressed the __init section in the
+ * dispatch_init_table. This function initializes the following three tables
+ * from that compressed table:
+ * - fast user		first dispatch table for user exceptions
+ * - fast kernel	first dispatch table for kernel exceptions
+ * - default C-handler	C-handler called by the default fast handler.
+ *
+ * See vectors.S for more details.
+ */
+
+void __init trap_init(void)
+{
+	int i;
+
+	/* Setup default vectors. */
+
+	for (i = 0; i < EXCCAUSE_N; i++) {
+		set_handler(fast_user_handler, i, user_exception);
+		set_handler(fast_kernel_handler, i, kernel_exception);
+		set_handler(default_handler, i, do_unhandled);
+	}
+
+	/* Setup specific handlers. */
+
+	for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
+
+		int fast = dispatch_init_table[i].fast;
+		int cause = dispatch_init_table[i].cause;
+		void *handler = dispatch_init_table[i].handler;
+
+		if (fast == 0)
+			set_handler(default_handler, cause, handler);
+		if (fast && fast & USER)
+			set_handler(fast_user_handler, cause, handler);
+		if (fast && fast & KRNL)
+			set_handler(fast_kernel_handler, cause, handler);
+	}
+
+	/* Initialize EXCSAVE_1 to hold the address of the exception table. */
+	trap_init_excsave();
+	trap_init_debug();
+}
+
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+	trap_init_excsave();
+	trap_init_debug();
+}
+#endif
+
+/*
+ * This function dumps the current valid window frame and other base registers.
+ */
+
+void show_regs(struct pt_regs * regs)
+{
+	int i, wmask;
+
+	show_regs_print_info(KERN_DEFAULT);
+
+	wmask = regs->wmask & ~1;
+
+	for (i = 0; i < 16; i++) {
+		if ((i % 8) == 0)
+			pr_info("a%02d:", i);
+		pr_cont(" %08lx", regs->areg[i]);
+	}
+	pr_cont("\n");
+	pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+		regs->pc, regs->ps, regs->depc, regs->excvaddr);
+	pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+		regs->lbeg, regs->lend, regs->lcount, regs->sar);
+	if (user_mode(regs))
+		pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+			regs->windowbase, regs->windowstart, regs->wmask,
+			regs->syscall);
+}
+
+static int show_trace_cb(struct stackframe *frame, void *data)
+{
+	if (kernel_text_address(frame->pc))
+		pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
+	return 0;
+}
+
+void show_trace(struct task_struct *task, unsigned long *sp)
+{
+	if (!sp)
+		sp = stack_pointer(task);
+
+	pr_info("Call Trace:\n");
+	walk_stackframe(sp, show_trace_cb, NULL);
+#ifndef CONFIG_KALLSYMS
+	pr_cont("\n");
+#endif
+}
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	int i = 0;
+	unsigned long *stack;
+
+	if (!sp)
+		sp = stack_pointer(task);
+	stack = sp;
+
+	pr_info("Stack:\n");
+
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (kstack_end(sp))
+			break;
+		pr_cont(" %08lx", *sp++);
+		if (i % 8 == 7)
+			pr_cont("\n");
+	}
+	show_trace(task, stack);
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+	static int die_counter;
+
+	console_verbose();
+	spin_lock_irq(&die_lock);
+
+	pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+		IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
+	show_regs(regs);
+	if (!user_mode(regs))
+		show_stack(NULL, (unsigned long*)regs->areg[1]);
+
+	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+	spin_unlock_irq(&die_lock);
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+
+	if (panic_on_oops)
+		panic("Fatal exception");
+
+	do_exit(err);
+}
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 0000000..841503d
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,791 @@
+/*
+ * arch/xtensa/kernel/vectors.S
+ *
+ * This file contains all exception vectors (user, kernel, and double),
+ * as well as the window vectors (overflow and underflow), and the debug
+ * vector. These are the primary vectors executed by the processor if an
+ * exception occurs.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+/*
+ * We use a two-level table approach. The user and kernel exception vectors
+ * use a first-level dispatch table to dispatch the exception to a registered
+ * fast handler or the default handler, if no fast handler was registered.
+ * The default handler sets up a C-stack and dispatches the exception to a
+ * registerd C handler in the second-level dispatch table.
+ *
+ * Fast handler entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original value in depc
+ *   a3:	dispatch table
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	a3
+ *
+ * The value for PT_DEPC saved to stack also functions as a boolean to
+ * indicate that the exception is either a double or a regular exception:
+ *
+ *   PT_DEPC	>= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
+ *		<  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note:  Neither the kernel nor the user exception handler generate literals.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/vectors.h>
+
+#define WINDOW_VECTORS_SIZE   0x180
+
+
+/*
+ * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
+ *
+ * We get here when an exception occurred while we were in userland.
+ * We switch to the kernel stack and jump to the first level handler
+ * associated to the exception cause.
+ *
+ * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
+ *       decremented by PT_USER_SIZE.
+ */
+
+	.section .UserExceptionVector.text, "ax"
+
+ENTRY(_UserExceptionVector)
+
+	xsr	a3, excsave1		# save a3 and get dispatch table
+	wsr	a2, depc		# save a2
+	l32i	a2, a3, EXC_TABLE_KSTK	# load kernel stack to a2
+	s32i	a0, a2, PT_AREG0	# save a0 to ESF
+	rsr	a0, exccause		# retrieve exception cause
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3		# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_USER	# load handler
+	xsr	a3, excsave1		# restore a3 and dispatch table
+	jx	a0
+
+ENDPROC(_UserExceptionVector)
+
+/*
+ * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
+ *
+ * We get this exception when we were already in kernel space.
+ * We decrement the current stack pointer (kernel) by PT_SIZE and
+ * jump to the first-level handler associated with the exception cause.
+ *
+ * Note: we need to preserve space for the spill region.
+ */
+
+	.section .KernelExceptionVector.text, "ax"
+
+ENTRY(_KernelExceptionVector)
+
+	xsr	a3, excsave1		# save a3, and get dispatch table
+	wsr	a2, depc		# save a2
+	addi	a2, a1, -16-PT_SIZE	# adjust stack pointer
+	s32i	a0, a2, PT_AREG0	# save a0 to ESF
+	rsr	a0, exccause		# retrieve exception cause
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3		# find entry in table
+	l32i	a0, a0, EXC_TABLE_FAST_KERNEL	# load handler address
+	xsr	a3, excsave1		# restore a3 and dispatch table
+	jx	a0
+
+ENDPROC(_KernelExceptionVector)
+
+/*
+ * Double exception vector (Exceptions with PS.EXCM == 1)
+ * We get this exception when another exception occurs while were are
+ * already in an exception, such as window overflow/underflow exception,
+ * or 'expected' exceptions, for example memory exception when we were trying
+ * to read data from an invalid address in user space.
+ *
+ * Note that this vector is never invoked for level-1 interrupts, because such
+ * interrupts are disabled (masked) when PS.EXCM is set.
+ *
+ * We decode the exception and take the appropriate action.  However, the
+ * double exception vector is much more careful, because a lot more error
+ * cases go through the double exception vector than through the user and
+ * kernel exception vectors.
+ *
+ * Occasionally, the kernel expects a double exception to occur.  This usually
+ * happens when accessing user-space memory with the user's permissions
+ * (l32e/s32e instructions).  The kernel state, though, is not always suitable
+ * for immediate transfer of control to handle_double, where "normal" exception
+ * processing occurs. Also in kernel mode, TLB misses can occur if accessing
+ * vmalloc memory, possibly requiring repair in a double exception handler.
+ *
+ * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
+ * a boolean variable and a pointer to a fixup routine. If the variable
+ * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
+ * zero indicates to use the default kernel/user exception handler.
+ * There is only one exception, when the value is identical to the exc_table
+ * label, the kernel is in trouble. This mechanism is used to protect critical
+ * sections, mainly when the handler writes to the stack to assert the stack
+ * pointer is valid. Once the fixup/default handler leaves that area, the
+ * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
+ *
+ * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
+ * nonzero address of a fixup routine before it could cause a double exception
+ * and reset it before it returns.
+ *
+ * Some other things to take care of when a fast exception handler doesn't
+ * specify a particular fixup handler but wants to use the default handlers:
+ *
+ *  - The original stack pointer (in a1) must not be modified. The fast
+ *    exception handler should only use a2 as the stack pointer.
+ *
+ *  - If the fast handler manipulates the stack pointer (in a2), it has to
+ *    register a valid fixup handler and cannot use the default handlers.
+ *
+ *  - The handler can use any other generic register from a3 to a15, but it
+ *    must save the content of these registers to stack (PT_AREG3...PT_AREGx)
+ *
+ *  - These registers must be saved before a double exception can occur.
+ *
+ *  - If we ever implement handling signals while in double exceptions, the
+ *    number of registers a fast handler has saved (excluding a0 and a1) must
+ *    be written to  PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
+ *
+ * The fixup handlers are special handlers:
+ *
+ *  - Fixup entry conditions differ from regular exceptions:
+ *
+ *	a0:	   DEPC
+ *	a1: 	   a1
+ *	a2:	   trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ *	a3:	   exctable
+ *	depc:	   a0
+ *	excsave_1: a3
+ *
+ *  - When the kernel enters the fixup handler, it still assumes it is in a
+ *    critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
+ *    The fixup handler, therefore, has to re-register itself as the fixup
+ *    handler before it returns from the double exception.
+ *
+ *  - Fixup handler can share the same exception frame with the fast handler.
+ *    The kernel stack pointer is not changed when entering the fixup handler.
+ *
+ *  - Fixup handlers can jump to the default kernel and user exception
+ *    handlers. Before it jumps, though, it has to setup a exception frame
+ *    on stack. Because the default handler resets the register fixup handler
+ *    the fixup handler must make sure that the default handler returns to
+ *    it instead of the exception address, so it can re-register itself as
+ *    the fixup handler.
+ *
+ * In case of a critical condition where the kernel cannot recover, we jump
+ * to unrecoverable_exception with the following entry conditions.
+ * All registers a0...a15 are unchanged from the last exception, except:
+ *
+ *	a0:	   last address before we jumped to the unrecoverable_exception.
+ *	excsave_1: a0
+ *
+ *
+ * See the handle_alloca_user and spill_registers routines for example clients.
+ *
+ * FIXME: Note: we currently don't allow signal handling coming from a double
+ *        exception, so the item markt with (*) is not required.
+ */
+
+	.section .DoubleExceptionVector.text, "ax"
+
+ENTRY(_DoubleExceptionVector)
+
+	xsr	a3, excsave1
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+
+	/* Check for kernel double exception (usually fatal). */
+
+	rsr	a2, ps
+	_bbsi.l	a2, PS_UM_BIT, 1f
+	j	.Lksp
+
+	.align	4
+	.literal_position
+1:
+	/* Check if we are currently handling a window exception. */
+	/* Note: We don't need to indicate that we enter a critical section. */
+
+	xsr	a0, depc		# get DEPC, save a0
+
+	movi	a2, WINDOW_VECTORS_VADDR
+	_bltu	a0, a2, .Lfixup
+	addi	a2, a2, WINDOW_VECTORS_SIZE
+	_bgeu	a0, a2, .Lfixup
+
+	/* Window overflow/underflow exception. Get stack pointer. */
+
+	l32i	a2, a3, EXC_TABLE_KSTK
+
+	/* Check for overflow/underflow exception, jump if overflow. */
+
+	bbci.l	a0, 6, _DoubleExceptionVector_WindowOverflow
+
+	/*
+	 * Restart window underflow exception.
+	 * Currently:
+	 *	depc = orig a0,
+	 *	a0 = orig DEPC,
+	 *	a2 = new sp based on KSTK from exc_table
+	 *	a3 = excsave_1
+	 *	excsave_1 = orig a3
+	 *
+	 * We return to the instruction in user space that caused the window
+	 * underflow exception. Therefore, we change window base to the value
+	 * before we entered the window underflow exception and prepare the
+	 * registers to return as if we were coming from a regular exception
+	 * by changing depc (in a0).
+	 * Note: We can trash the current window frame (a0...a3) and depc!
+	 */
+_DoubleExceptionVector_WindowUnderflow:
+	xsr	a3, excsave1
+	wsr	a2, depc		# save stack pointer temporarily
+	rsr	a0, ps
+	extui	a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+	wsr	a0, windowbase
+	rsync
+
+	/* We are now in the previous window frame. Save registers again. */
+
+	xsr	a2, depc		# save a2 and get stack pointer
+	s32i	a0, a2, PT_AREG0
+	xsr	a3, excsave1
+	rsr	a0, exccause
+	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
+	addx4	a0, a0, a3
+	xsr	a3, excsave1
+	l32i	a0, a0, EXC_TABLE_FAST_USER
+	jx	a0
+
+	/*
+	 * We only allow the ITLB miss exception if we are in kernel space.
+	 * All other exceptions are unexpected and thus unrecoverable!
+	 */
+
+#ifdef CONFIG_MMU
+	.extern fast_second_level_miss_double_kernel
+
+.Lksp:	/* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+	rsr	a3, exccause
+	beqi	a3, EXCCAUSE_ITLB_MISS, 1f
+	addi	a3, a3, -EXCCAUSE_DTLB_MISS
+	bnez	a3, .Lunrecoverable
+1:	movi	a3, fast_second_level_miss_double_kernel
+	jx	a3
+#else
+.equ	.Lksp,	.Lunrecoverable
+#endif
+
+	/* Critical! We can't handle this situation. PANIC! */
+
+	.extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a0, depc
+
+.Lunrecoverable:
+	rsr	a3, excsave1
+	wsr	a0, excsave1
+	call0	unrecoverable_exception
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+	/* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
+
+	/* Enter critical section. */
+
+	l32i	a2, a3, EXC_TABLE_FIXUP
+	s32i	a3, a3, EXC_TABLE_FIXUP
+	beq	a2, a3, .Lunrecoverable_fixup	# critical section
+	beqz	a2, .Ldflt			# no handler was registered
+
+	/* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
+
+	jx	a2
+
+.Ldflt:	/* Get stack pointer. */
+
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	addi	a2, a2, -PT_USER_SIZE
+
+	/* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
+
+	s32i	a0, a2, PT_DEPC
+	l32i	a0, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a0, depc
+	s32i	a0, a2, PT_AREG0
+
+	/* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
+
+	rsr	a0, exccause
+	addx4	a0, a0, a3
+	xsr	a3, excsave1
+	l32i	a0, a0, EXC_TABLE_FAST_USER
+	jx	a0
+
+	/*
+	 * Restart window OVERFLOW exception.
+	 * Currently:
+	 *	depc = orig a0,
+	 *	a0 = orig DEPC,
+	 *	a2 = new sp based on KSTK from exc_table
+	 *	a3 = EXCSAVE_1
+	 *	excsave_1 = orig a3
+	 *
+	 * We return to the instruction in user space that caused the window
+	 * overflow exception. Therefore, we change window base to the value
+	 * before we entered the window overflow exception and prepare the
+	 * registers to return as if we were coming from a regular exception
+	 * by changing DEPC (in a0).
+	 *
+	 * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+	 * can clobber depc.
+	 *
+	 * The tricky part here is that overflow8 and overflow12 handlers
+	 * save a0, then clobber a0.  To restart the handler, we have to restore
+	 * a0 if the double exception was past the point where a0 was clobbered.
+	 *
+	 * To keep things simple, we take advantage of the fact all overflow
+	 * handlers save a0 in their very first instruction.  If DEPC was past
+	 * that instruction, we can safely restore a0 from where it was saved
+	 * on the stack.
+	 *
+	 * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
+	 */
+_DoubleExceptionVector_WindowOverflow:
+	extui	a2, a0, 0, 6	# get offset into 64-byte vector handler
+	beqz	a2, 1f		# if at start of vector, don't restore
+
+	addi	a0, a0, -128
+	bbsi.l	a0, 8, 1f	# don't restore except for overflow 8 and 12
+
+	/*
+	 * This fixup handler is for the extremely unlikely case where the
+	 * overflow handler's reference thru a0 gets a hardware TLB refill
+	 * that bumps out the (distinct, aliasing) TLB entry that mapped its
+	 * prior references thru a9/a13, and where our reference now thru
+	 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+	 */
+	movi	a2, window_overflow_restore_a0_fixup
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+
+	bbsi.l	a0, 7, 2f
+
+	/*
+	 * Restore a0 as saved by _WindowOverflow8().
+	 */
+
+	l32e	a0, a9, -16
+	wsr	a0, depc	# replace the saved a0
+	j	3f
+
+2:
+	/*
+	 * Restore a0 as saved by _WindowOverflow12().
+	 */
+
+	l32e	a0, a13, -16
+	wsr	a0, depc	# replace the saved a0
+3:
+	xsr	a3, excsave1
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+1:
+	/*
+	 * Restore WindowBase while leaving all address registers restored.
+	 * We have to use ROTW for this, because WSR.WINDOWBASE requires
+	 * an address register (which would prevent restore).
+	 *
+	 * Window Base goes from 0 ... 7 (Module 8)
+	 * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+	 */
+
+	rsr	a0, ps
+	extui	a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+	rsr	a2, windowbase
+	sub	a0, a2, a0
+	extui	a0, a0, 0, 3
+
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+	beqi	a0, 1, .L1pane
+	beqi	a0, 3, .L3pane
+
+	rsr	a0, depc
+	rotw	-2
+
+	/*
+	 * We are now in the user code's original window frame.
+	 * Process the exception as a user exception as if it was
+	 * taken by the user code.
+	 *
+	 * This is similar to the user exception vector,
+	 * except that PT_DEPC isn't set to EXCCAUSE.
+	 */
+1:
+	xsr	a3, excsave1
+	wsr	a2, depc
+	l32i	a2, a3, EXC_TABLE_KSTK
+	s32i	a0, a2, PT_AREG0
+	rsr	a0, exccause
+
+	s32i	a0, a2, PT_DEPC
+
+_DoubleExceptionVector_handle_exception:
+	addi	a0, a0, -EXCCAUSE_UNALIGNED
+	beqz	a0, 2f
+	addx4	a0, a0, a3
+	l32i	a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED
+	xsr	a3, excsave1
+	jx	a0
+2:
+	movi	a0, user_exception
+	xsr	a3, excsave1
+	jx	a0
+
+.L1pane:
+	rsr	a0, depc
+	rotw	-1
+	j	1b
+
+.L3pane:
+	rsr	a0, depc
+	rotw	-3
+	j	1b
+
+ENDPROC(_DoubleExceptionVector)
+
+	.text
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+	.literal_position
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+	rsr	a0, ps
+	extui	a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+	rsr	a2, windowbase
+	sub	a0, a2, a0
+	extui	a0, a0, 0, 3
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+
+	_beqi	a0, 1, .Lhandle_1
+	_beqi	a0, 3, .Lhandle_3
+
+	.macro	overflow_fixup_handle_exception_pane n
+
+	rsr	a0, depc
+	rotw	-\n
+
+	xsr	a3, excsave1
+	wsr	a2, depc
+	l32i	a2, a3, EXC_TABLE_KSTK
+	s32i	a0, a2, PT_AREG0
+
+	movi	a0, .Lrestore_\n
+	s32i	a0, a2, PT_DEPC
+	rsr	a0, exccause
+	j	_DoubleExceptionVector_handle_exception
+
+	.endm
+
+	overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+	overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+	overflow_fixup_handle_exception_pane 3
+
+	.macro	overflow_fixup_restore_a0_pane n
+
+	rotw	\n
+	/* Need to preserve a0 value here to be able to handle exception
+	 * that may occur on a0 reload from stack. It may occur because
+	 * TLB miss handler may not be atomic and pointer to page table
+	 * may be lost before we get here. There are no free registers,
+	 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+	 */
+	xsr	a3, excsave1
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	movi	a2, window_overflow_restore_a0_fixup
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+	bbsi.l	a0, 7, 1f
+	l32e	a0, a9, -16
+	j	2f
+1:
+	l32e	a0, a13, -16
+2:
+	rotw	-\n
+
+	.endm
+
+.Lrestore_2:
+	overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+	xsr	a3, excsave1
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	movi	a2, 0
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+	rfe
+
+.Lrestore_1:
+	overflow_fixup_restore_a0_pane 1
+	j	.Lset_default_fixup
+.Lrestore_3:
+	overflow_fixup_restore_a0_pane 3
+	j	.Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+/*
+ * Debug interrupt vector
+ *
+ * There is not much space here, so simply jump to another handler.
+ * EXCSAVE[DEBUGLEVEL] has been set to that handler.
+ */
+
+	.section .DebugInterruptVector.text, "ax"
+
+ENTRY(_DebugInterruptVector)
+
+	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+	s32i	a0, a3, DT_DEBUG_SAVE
+	l32i	a0, a3, DT_DEBUG_EXCEPTION
+	jx	a0
+
+ENDPROC(_DebugInterruptVector)
+
+
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space.  With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+	.macro	irq_entry_level level
+
+	.if	XCHAL_EXCM_LEVEL >= \level
+	.section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+	wsr	a0, excsave2
+	rsr	a0, epc\level
+	wsr	a0, epc1
+	.if	\level <= LOCKLEVEL
+	movi	a0, EXCCAUSE_LEVEL1_INTERRUPT
+	.else
+	movi	a0, EXCCAUSE_MAPPED_NMI
+	.endif
+	wsr	a0, exccause
+	rsr	a0, eps\level
+					# branch to user or kernel vector
+	j	_SimulateUserKernelVectorException
+	.endif
+
+	.endm
+
+	irq_entry_level 2
+	irq_entry_level 3
+	irq_entry_level 4
+	irq_entry_level 5
+	irq_entry_level 6
+
+
+/* Window overflow and underflow handlers.
+ * The handlers must be 64 bytes apart, first starting with the underflow
+ * handlers underflow-4 to underflow-12, then the overflow handlers
+ * overflow-4 to overflow-12.
+ *
+ * Note: We rerun the underflow handlers if we hit an exception, so
+ *	 we try to access any page that would cause a page fault early.
+ */
+
+#define ENTRY_ALIGN64(name)	\
+	.globl name;		\
+	.align 64;		\
+	name:
+
+	.section		.WindowVectors.text, "ax"
+
+
+/* 4-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow4)
+
+	s32e	a0, a5, -16
+	s32e	a1, a5, -12
+	s32e	a2, a5,  -8
+	s32e	a3, a5,  -4
+	rfwo
+
+ENDPROC(_WindowOverflow4)
+
+
+#if XCHAL_EXCM_LEVEL >= 2
+	/*  Not a window vector - but a convenient location
+	 *  (where we know there's space) for continuation of
+	 *  medium priority interrupt dispatch code.
+	 *  On entry here, a0 contains PS, and EPC2 contains saved a0:
+	 */
+	.align 4
+_SimulateUserKernelVectorException:
+	addi	a0, a0, (1 << PS_EXCM_BIT)
+#if !XTENSA_FAKE_NMI
+	wsr	a0, ps
+#endif
+	bbsi.l	a0, PS_UM_BIT, 1f	# branch if user mode
+	xsr	a0, excsave2		# restore a0
+	j	_KernelExceptionVector	# simulate kernel vector exception
+1:	xsr	a0, excsave2		# restore a0
+	j	_UserExceptionVector	# simulate user vector exception
+#endif
+
+
+/* 4-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow4)
+
+	l32e	a0, a5, -16
+	l32e	a1, a5, -12
+	l32e	a2, a5,  -8
+	l32e	a3, a5,  -4
+	rfwu
+
+ENDPROC(_WindowUnderflow4)
+
+/* 8-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow8)
+
+	s32e	a0, a9, -16
+	l32e	a0, a1, -12
+	s32e	a2, a9,  -8
+	s32e	a1, a9, -12
+	s32e	a3, a9,  -4
+	s32e	a4, a0, -32
+	s32e	a5, a0, -28
+	s32e	a6, a0, -24
+	s32e	a7, a0, -20
+	rfwo
+
+ENDPROC(_WindowOverflow8)
+
+/* 8-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow8)
+
+	l32e	a1, a9, -12
+	l32e	a0, a9, -16
+	l32e	a7, a1, -12
+	l32e	a2, a9,  -8
+	l32e	a4, a7, -32
+	l32e	a3, a9,  -4
+	l32e	a5, a7, -28
+	l32e	a6, a7, -24
+	l32e	a7, a7, -20
+	rfwu
+
+ENDPROC(_WindowUnderflow8)
+
+/* 12-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow12)
+
+	s32e	a0,  a13, -16
+	l32e	a0,  a1,  -12
+	s32e	a1,  a13, -12
+	s32e	a2,  a13,  -8
+	s32e	a3,  a13,  -4
+	s32e	a4,  a0,  -48
+	s32e	a5,  a0,  -44
+	s32e	a6,  a0,  -40
+	s32e	a7,  a0,  -36
+	s32e	a8,  a0,  -32
+	s32e	a9,  a0,  -28
+	s32e	a10, a0,  -24
+	s32e	a11, a0,  -20
+	rfwo
+
+ENDPROC(_WindowOverflow12)
+
+/* 12-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow12)
+
+	l32e	a1,  a13, -12
+	l32e	a0,  a13, -16
+	l32e	a11, a1,  -12
+	l32e	a2,  a13,  -8
+	l32e	a4,  a11, -48
+	l32e	a8,  a11, -32
+	l32e	a3,  a13,  -4
+	l32e	a5,  a11, -44
+	l32e	a6,  a11, -40
+	l32e	a7,  a11, -36
+	l32e	a9,  a11, -28
+	l32e	a10, a11, -24
+	l32e	a11, a11, -20
+	rfwu
+
+ENDPROC(_WindowUnderflow12)
+
+	.text
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..fa92699
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,336 @@
+/*
+ * arch/xtensa/kernel/vmlinux.lds.S
+ *
+ * Xtensa linker script
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#include <asm/vectors.h>
+#include <variant/core.h>
+
+OUTPUT_ARCH(xtensa)
+ENTRY(_start)
+
+#ifdef __XTENSA_EB__
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+/* Note: In the following macros, it would be nice to specify only the
+   vector name and section kind and construct "sym" and "section" using
+   CPP concatenation, but that does not work reliably.  Concatenating a
+   string with "." produces an invalid token.  CPP will not print a
+   warning because it thinks this is an assembly file, but it leaves
+   them as multiple tokens and there may or may not be whitespace
+   between them.  */
+
+/* Macro for a relocation entry */
+
+#define RELOCATE_ENTRY(sym, section)		\
+	LONG(sym ## _start);			\
+	LONG(sym ## _end);			\
+	LONG(LOADADDR(section))
+
+/*
+ * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
+ * defined code for every vector is located with other init data. At startup
+ * time head.S copies code for every vector to its final position according
+ * to description recorded in the corresponding RELOCATE_ENTRY.
+ */
+
+#ifdef CONFIG_VECTORS_OFFSET
+#define SECTION_VECTOR(sym, section, addr, prevsec)                         \
+  section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3)      \
+  {									    \
+    . = ALIGN(4);							    \
+    sym ## _start = ABSOLUTE(.);		 			    \
+    *(section)								    \
+    sym ## _end = ABSOLUTE(.);						    \
+  }
+#else
+#define SECTION_VECTOR(section, addr)					    \
+  . = addr;								    \
+  *(section)
+#endif
+
+/*
+ *  Mapping of input sections to output sections when linking.
+ */
+
+SECTIONS
+{
+  . = KERNELOFFSET;
+  /* .text section */
+
+  _text = .;
+  _stext = .;
+
+  .text :
+  {
+    /* The HEAD_TEXT section must be the first section! */
+    HEAD_TEXT
+
+#ifndef CONFIG_VECTORS_OFFSET
+  . = ALIGN(PAGE_SIZE);
+  _vecbase = .;
+
+  SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR)
+#if XCHAL_EXCM_LEVEL >= 2
+  SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+  SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+  SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+  SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+  SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
+#endif
+  SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
+  SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
+  SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
+  SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+#endif
+
+    IRQENTRY_TEXT
+    SOFTIRQENTRY_TEXT
+    ENTRY_TEXT
+    TEXT_TEXT
+    SCHED_TEXT
+    CPUIDLE_TEXT
+    LOCK_TEXT
+
+  }
+  _etext = .;
+  PROVIDE (etext = .);
+
+  . = ALIGN(16);
+
+  RODATA
+
+  /*  Relocation table */
+
+  .fixup   : { *(.fixup) }
+
+  EXCEPTION_TABLE(16)
+  NOTES
+  /* Data section */
+
+  _sdata = .;
+  RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+  _edata = .;
+
+  /* Initialization code and data: */
+
+  . = ALIGN(PAGE_SIZE);
+  __init_begin = .;
+  INIT_TEXT_SECTION(PAGE_SIZE)
+
+  .init.data :
+  {
+    INIT_DATA
+    . = ALIGN(0x4);
+    __tagtable_begin = .;
+    *(.taglist)
+    __tagtable_end = .;
+
+    . = ALIGN(16);
+    __boot_reloc_table_start = ABSOLUTE(.);
+
+#ifdef CONFIG_VECTORS_OFFSET
+    RELOCATE_ENTRY(_WindowVectors_text,
+		   .WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+    RELOCATE_ENTRY(_Level2InterruptVector_text,
+		   .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+    RELOCATE_ENTRY(_Level3InterruptVector_text,
+		   .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+    RELOCATE_ENTRY(_Level4InterruptVector_text,
+		   .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+    RELOCATE_ENTRY(_Level5InterruptVector_text,
+		   .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+    RELOCATE_ENTRY(_Level6InterruptVector_text,
+		   .Level6InterruptVector.text);
+#endif
+    RELOCATE_ENTRY(_KernelExceptionVector_text,
+		   .KernelExceptionVector.text);
+    RELOCATE_ENTRY(_UserExceptionVector_text,
+		   .UserExceptionVector.text);
+    RELOCATE_ENTRY(_DoubleExceptionVector_text,
+		   .DoubleExceptionVector.text);
+    RELOCATE_ENTRY(_DebugInterruptVector_text,
+		   .DebugInterruptVector.text);
+#endif
+#if defined(CONFIG_SMP)
+    RELOCATE_ENTRY(_SecondaryResetVector_text,
+		   .SecondaryResetVector.text);
+#endif
+
+  
+    __boot_reloc_table_end = ABSOLUTE(.) ;
+
+    INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+    INIT_CALLS
+    CON_INITCALL
+    SECURITY_INITCALL
+    INIT_RAM_FS
+  }
+
+  PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
+
+  /* We need this dummy segment here */
+
+  . = ALIGN(4);
+  .dummy : { LONG(0) }
+
+#ifdef CONFIG_VECTORS_OFFSET
+  /* The vectors are relocated to the real position at startup time */
+
+  SECTION_VECTOR (_WindowVectors_text,
+		  .WindowVectors.text,
+		  WINDOW_VECTORS_VADDR,
+		  .dummy)
+  SECTION_VECTOR (_DebugInterruptVector_text,
+		  .DebugInterruptVector.text,
+		  DEBUG_VECTOR_VADDR,
+		  .WindowVectors.text)
+#undef LAST
+#define LAST	.DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+  SECTION_VECTOR (_Level2InterruptVector_text,
+		  .Level2InterruptVector.text,
+		  INTLEVEL2_VECTOR_VADDR,
+		  LAST)
+# undef LAST
+# define LAST	.Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+  SECTION_VECTOR (_Level3InterruptVector_text,
+		  .Level3InterruptVector.text,
+		  INTLEVEL3_VECTOR_VADDR,
+		  LAST)
+# undef LAST
+# define LAST	.Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+  SECTION_VECTOR (_Level4InterruptVector_text,
+		  .Level4InterruptVector.text,
+		  INTLEVEL4_VECTOR_VADDR,
+		  LAST)
+# undef LAST
+# define LAST	.Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+  SECTION_VECTOR (_Level5InterruptVector_text,
+		  .Level5InterruptVector.text,
+		  INTLEVEL5_VECTOR_VADDR,
+		  LAST)
+# undef LAST
+# define LAST	.Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+  SECTION_VECTOR (_Level6InterruptVector_text,
+		  .Level6InterruptVector.text,
+		  INTLEVEL6_VECTOR_VADDR,
+		  LAST)
+# undef LAST
+# define LAST	.Level6InterruptVector.text
+#endif
+  SECTION_VECTOR (_KernelExceptionVector_text,
+		  .KernelExceptionVector.text,
+		  KERNEL_VECTOR_VADDR,
+		  LAST)
+#undef LAST
+  SECTION_VECTOR (_UserExceptionVector_text,
+		  .UserExceptionVector.text,
+		  USER_VECTOR_VADDR,
+		  .KernelExceptionVector.text)
+  SECTION_VECTOR (_DoubleExceptionVector_text,
+		  .DoubleExceptionVector.text,
+		  DOUBLEEXC_VECTOR_VADDR,
+		  .UserExceptionVector.text)
+
+  . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+
+#endif
+#if defined(CONFIG_SMP)
+
+  SECTION_VECTOR (_SecondaryResetVector_text,
+		  .SecondaryResetVector.text,
+		  RESET_VECTOR1_VADDR,
+		  .DoubleExceptionVector.text)
+
+  . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+
+#endif
+
+  . = ALIGN(PAGE_SIZE);
+
+  __init_end = .;
+
+  BSS_SECTION(0, 8192, 0)
+
+  _end = .;
+
+  .xt.lit : { *(.xt.lit) }
+  .xt.prop : { *(.xt.prop) }
+
+  .debug  0 :  { *(.debug) }
+  .line  0 :  { *(.line) }
+  .debug_srcinfo  0 :  { *(.debug_srcinfo) }
+  .debug_sfnames  0 :  { *(.debug_sfnames) }
+  .debug_aranges  0 :  { *(.debug_aranges) }
+  .debug_pubnames  0 :  { *(.debug_pubnames) }
+  .debug_info  0 :  { *(.debug_info) }
+  .debug_abbrev  0 :  { *(.debug_abbrev) }
+  .debug_line  0 :  { *(.debug_line) }
+  .debug_frame  0 :  { *(.debug_frame) }
+  .debug_str  0 :  { *(.debug_str) }
+  .debug_loc  0 :  { *(.debug_loc) }
+  .debug_macinfo  0 :  { *(.debug_macinfo) }
+  .debug_weaknames  0 :  { *(.debug_weaknames) }
+  .debug_funcnames  0 :  { *(.debug_funcnames) }
+  .debug_typenames  0 :  { *(.debug_typenames) }
+  .debug_varnames  0 :  { *(.debug_varnames) }
+
+  .xt.insn 0 :
+  {
+    *(.xt.insn)
+    *(.gnu.linkonce.x*)
+  }
+
+  .xt.lit 0 :
+  {
+    *(.xt.lit)
+    *(.gnu.linkonce.p*)
+  }
+
+  /* Sections to be discarded */
+  DISCARDS
+}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 0000000..04f19de
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,139 @@
+/*
+ * arch/xtensa/kernel/xtensa_ksyms.c
+ *
+ * Export Xtensa-specific functions for loadable modules.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005  Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/in6.h>
+
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/ftrace.h>
+#ifdef CONFIG_BLK_DEV_FD
+#include <asm/floppy.h>
+#endif
+#ifdef CONFIG_NET
+#include <net/checksum.h>
+#endif /* CONFIG_NET */
+
+
+/*
+ * String functions
+ */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+EXPORT_SYMBOL(__strncpy_user);
+#endif
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * gcc internal math functions
+ */
+extern long long __ashrdi3(long long, int);
+extern long long __ashldi3(long long, int);
+extern long long __lshrdi3(long long, int);
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern long long __muldi3(long long, long long);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
+extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
+extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+extern int __ucmpdi2(int, int);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__ucmpdi2);
+
+void __xtensa_libgcc_window_spill(void)
+{
+	BUG();
+}
+EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
+
+unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+{
+	BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_and_4);
+
+unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+{
+	BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_or_4);
+
+/*
+ * Networking support
+ */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+
+/*
+ * Architecture-specific symbols
+ */
+EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
+
+/*
+ * Kernel hacking ...
+ */
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+// FIXME EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+
+extern long common_exception_return;
+EXPORT_SYMBOL(common_exception_return);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif