v4.19.13 snapshot.
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile
new file mode 100644
index 0000000..fbe7417
--- /dev/null
+++ b/arch/c6x/kernel/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for arch/c6x/kernel/
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := process.o traps.o irq.o signal.o ptrace.o
+obj-y += setup.o sys_c6x.o time.o devicetree.o
+obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
+obj-y += soc.o
+
+obj-$(CONFIG_MODULES)           += module.o
diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c
new file mode 100644
index 0000000..0f8fde4
--- /dev/null
+++ b/arch/c6x/kernel/asm-offsets.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <asm/procinfo.h>
+#include <linux/kbuild.h>
+#include <linux/unistd.h>
+
+void foo(void)
+{
+	OFFSET(REGS_A16,	pt_regs, a16);
+	OFFSET(REGS_A17,	pt_regs, a17);
+	OFFSET(REGS_A18,	pt_regs, a18);
+	OFFSET(REGS_A19,	pt_regs, a19);
+	OFFSET(REGS_A20,	pt_regs, a20);
+	OFFSET(REGS_A21,	pt_regs, a21);
+	OFFSET(REGS_A22,	pt_regs, a22);
+	OFFSET(REGS_A23,	pt_regs, a23);
+	OFFSET(REGS_A24,	pt_regs, a24);
+	OFFSET(REGS_A25,	pt_regs, a25);
+	OFFSET(REGS_A26,	pt_regs, a26);
+	OFFSET(REGS_A27,	pt_regs, a27);
+	OFFSET(REGS_A28,	pt_regs, a28);
+	OFFSET(REGS_A29,	pt_regs, a29);
+	OFFSET(REGS_A30,	pt_regs, a30);
+	OFFSET(REGS_A31,	pt_regs, a31);
+
+	OFFSET(REGS_B16,	pt_regs, b16);
+	OFFSET(REGS_B17,	pt_regs, b17);
+	OFFSET(REGS_B18,	pt_regs, b18);
+	OFFSET(REGS_B19,	pt_regs, b19);
+	OFFSET(REGS_B20,	pt_regs, b20);
+	OFFSET(REGS_B21,	pt_regs, b21);
+	OFFSET(REGS_B22,	pt_regs, b22);
+	OFFSET(REGS_B23,	pt_regs, b23);
+	OFFSET(REGS_B24,	pt_regs, b24);
+	OFFSET(REGS_B25,	pt_regs, b25);
+	OFFSET(REGS_B26,	pt_regs, b26);
+	OFFSET(REGS_B27,	pt_regs, b27);
+	OFFSET(REGS_B28,	pt_regs, b28);
+	OFFSET(REGS_B29,	pt_regs, b29);
+	OFFSET(REGS_B30,	pt_regs, b30);
+	OFFSET(REGS_B31,	pt_regs, b31);
+
+	OFFSET(REGS_A0,		pt_regs, a0);
+	OFFSET(REGS_A1,		pt_regs, a1);
+	OFFSET(REGS_A2,		pt_regs, a2);
+	OFFSET(REGS_A3,		pt_regs, a3);
+	OFFSET(REGS_A4,		pt_regs, a4);
+	OFFSET(REGS_A5,		pt_regs, a5);
+	OFFSET(REGS_A6,		pt_regs, a6);
+	OFFSET(REGS_A7,		pt_regs, a7);
+	OFFSET(REGS_A8,		pt_regs, a8);
+	OFFSET(REGS_A9,		pt_regs, a9);
+	OFFSET(REGS_A10,	pt_regs, a10);
+	OFFSET(REGS_A11,	pt_regs, a11);
+	OFFSET(REGS_A12,	pt_regs, a12);
+	OFFSET(REGS_A13,	pt_regs, a13);
+	OFFSET(REGS_A14,	pt_regs, a14);
+	OFFSET(REGS_A15,	pt_regs, a15);
+
+	OFFSET(REGS_B0,		pt_regs, b0);
+	OFFSET(REGS_B1,		pt_regs, b1);
+	OFFSET(REGS_B2,		pt_regs, b2);
+	OFFSET(REGS_B3,		pt_regs, b3);
+	OFFSET(REGS_B4,		pt_regs, b4);
+	OFFSET(REGS_B5,		pt_regs, b5);
+	OFFSET(REGS_B6,		pt_regs, b6);
+	OFFSET(REGS_B7,		pt_regs, b7);
+	OFFSET(REGS_B8,		pt_regs, b8);
+	OFFSET(REGS_B9,		pt_regs, b9);
+	OFFSET(REGS_B10,	pt_regs, b10);
+	OFFSET(REGS_B11,	pt_regs, b11);
+	OFFSET(REGS_B12,	pt_regs, b12);
+	OFFSET(REGS_B13,	pt_regs, b13);
+	OFFSET(REGS_DP,		pt_regs, dp);
+	OFFSET(REGS_SP,		pt_regs, sp);
+
+	OFFSET(REGS_TSR,	pt_regs, tsr);
+	OFFSET(REGS_ORIG_A4,	pt_regs, orig_a4);
+
+	DEFINE(REGS__END,	sizeof(struct pt_regs));
+	BLANK();
+
+	OFFSET(THREAD_PC,	thread_struct, pc);
+	OFFSET(THREAD_B15_14,	thread_struct, b15_14);
+	OFFSET(THREAD_A15_14,	thread_struct, a15_14);
+	OFFSET(THREAD_B13_12,	thread_struct, b13_12);
+	OFFSET(THREAD_A13_12,	thread_struct, a13_12);
+	OFFSET(THREAD_B11_10,	thread_struct, b11_10);
+	OFFSET(THREAD_A11_10,	thread_struct, a11_10);
+	OFFSET(THREAD_RICL_ICL,	thread_struct, ricl_icl);
+	BLANK();
+
+	OFFSET(TASK_STATE,	task_struct, state);
+	BLANK();
+
+	OFFSET(THREAD_INFO_FLAGS,	thread_info, flags);
+	OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
+	BLANK();
+
+	/* These would be unneccessary if we ran asm files
+	 * through the preprocessor.
+	 */
+	DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
+	DEFINE(KTHREAD_START_SP, THREAD_START_SP);
+	DEFINE(ENOSYS_, ENOSYS);
+	DEFINE(NR_SYSCALLS_, __NR_syscalls);
+
+	DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
+	DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
+	DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
+	DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
+
+	DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
+	DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
+}
diff --git a/arch/c6x/kernel/c6x_ksyms.c b/arch/c6x/kernel/c6x_ksyms.c
new file mode 100644
index 0000000..0ba3e0b
--- /dev/null
+++ b/arch/c6x/kernel/c6x_ksyms.c
@@ -0,0 +1,66 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <asm/checksum.h>
+#include <linux/io.h>
+
+/*
+ * libgcc functions - used internally by the compiler...
+ */
+extern int __c6xabi_divi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divi);
+
+extern unsigned __c6xabi_divu(unsigned	dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divu);
+
+extern int __c6xabi_remi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_remi);
+
+extern unsigned __c6xabi_remu(unsigned	dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_remu);
+
+extern int __c6xabi_divremi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divremi);
+
+extern unsigned __c6xabi_divremu(unsigned  dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divremu);
+
+extern unsigned long long __c6xabi_mpyll(unsigned long long src1,
+					 unsigned long long src2);
+EXPORT_SYMBOL(__c6xabi_mpyll);
+
+extern long long __c6xabi_negll(long long src);
+EXPORT_SYMBOL(__c6xabi_negll);
+
+extern unsigned long long __c6xabi_llshl(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshl);
+
+extern long long __c6xabi_llshr(long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshr);
+
+extern unsigned long long __c6xabi_llshru(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshru);
+
+extern void __c6xabi_strasgi(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi);
+
+extern void __c6xabi_push_rts(void);
+EXPORT_SYMBOL(__c6xabi_push_rts);
+
+extern void __c6xabi_pop_rts(void);
+EXPORT_SYMBOL(__c6xabi_pop_rts);
+
+extern void __c6xabi_strasgi_64plus(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi_64plus);
+
+/* lib functions */
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
new file mode 100644
index 0000000..fa3e574
--- /dev/null
+++ b/arch/c6x/kernel/devicetree.c
@@ -0,0 +1,18 @@
+/*
+ *  Architecture specific OF callbacks.
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+	c6x_add_memory(base, size);
+}
diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S
new file mode 100644
index 0000000..2721c90
--- /dev/null
+++ b/arch/c6x/kernel/entry.S
@@ -0,0 +1,739 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004-2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
+;  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+; Registers naming
+#define DP	B14
+#define SP	B15
+
+#ifndef CONFIG_PREEMPT
+#define resume_kernel restore_all
+#endif
+
+	.altmacro
+
+	.macro MASK_INT reg
+	MVC	.S2	CSR,reg
+	CLR	.S2	reg,0,0,reg
+	MVC	.S2	reg,CSR
+	.endm
+
+	.macro UNMASK_INT reg
+	MVC	.S2	CSR,reg
+	SET	.S2	reg,0,0,reg
+	MVC	.S2	reg,CSR
+	.endm
+
+	.macro GET_THREAD_INFO reg
+	SHR	.S1X	SP,THREAD_SHIFT,reg
+	SHL	.S1	reg,THREAD_SHIFT,reg
+	.endm
+
+	;;
+	;;  This defines the normal kernel pt_regs layout.
+	;;
+	.macro SAVE_ALL __rp __tsr
+	STW	.D2T2	B0,*SP--[2]		; save original B0
+	MVKL	.S2	current_ksp,B0
+	MVKH	.S2	current_ksp,B0
+	LDW	.D2T2	*B0,B1			; KSP
+
+	NOP	3
+	STW	.D2T2	B1,*+SP[1]		; save original B1
+	XOR	.D2	SP,B1,B0		; (SP ^ KSP)
+	LDW	.D2T2	*+SP[1],B1		; restore B0/B1
+	LDW	.D2T2	*++SP[2],B0
+	SHR	.S2	B0,THREAD_SHIFT,B0	; 0 if already using kstack
+  [B0]	STDW	.D2T2	SP:DP,*--B1[1]		; user: save user sp/dp kstack
+  [B0]	MV	.S2	B1,SP			;    and switch to kstack
+||[!B0] STDW	.D2T2	SP:DP,*--SP[1]		; kernel: save on current stack
+
+	SUBAW	.D2	SP,2,SP
+
+	ADD	.D1X	SP,-8,A15
+ ||	STDW	.D2T1	A15:A14,*SP--[16]	; save A15:A14
+
+	STDW	.D2T2	B13:B12,*SP--[1]
+ ||	STDW	.D1T1	A13:A12,*A15--[1]
+ ||	MVC	.S2	__rp,B13
+
+	STDW	.D2T2	B11:B10,*SP--[1]
+ ||	STDW	.D1T1	A11:A10,*A15--[1]
+ ||	MVC	.S2	CSR,B12
+
+	STDW	.D2T2	B9:B8,*SP--[1]
+ ||	STDW	.D1T1	A9:A8,*A15--[1]
+ ||	MVC	.S2	RILC,B11
+	STDW	.D2T2	B7:B6,*SP--[1]
+ ||	STDW	.D1T1	A7:A6,*A15--[1]
+ ||	MVC	.S2	ILC,B10
+
+	STDW	.D2T2	B5:B4,*SP--[1]
+ ||	STDW	.D1T1	A5:A4,*A15--[1]
+
+	STDW	.D2T2	B3:B2,*SP--[1]
+ ||	STDW	.D1T1	A3:A2,*A15--[1]
+ ||	MVC	.S2	__tsr,B5
+
+	STDW	.D2T2	B1:B0,*SP--[1]
+ ||	STDW	.D1T1	A1:A0,*A15--[1]
+ ||	MV	.S1X	B5,A5
+
+	STDW	.D2T2	B31:B30,*SP--[1]
+ ||	STDW	.D1T1	A31:A30,*A15--[1]
+	STDW	.D2T2	B29:B28,*SP--[1]
+ ||	STDW	.D1T1	A29:A28,*A15--[1]
+	STDW	.D2T2	B27:B26,*SP--[1]
+ ||	STDW	.D1T1	A27:A26,*A15--[1]
+	STDW	.D2T2	B25:B24,*SP--[1]
+ ||	STDW	.D1T1	A25:A24,*A15--[1]
+	STDW	.D2T2	B23:B22,*SP--[1]
+ ||	STDW	.D1T1	A23:A22,*A15--[1]
+	STDW	.D2T2	B21:B20,*SP--[1]
+ ||	STDW	.D1T1	A21:A20,*A15--[1]
+	STDW	.D2T2	B19:B18,*SP--[1]
+ ||	STDW	.D1T1	A19:A18,*A15--[1]
+	STDW	.D2T2	B17:B16,*SP--[1]
+ ||	STDW	.D1T1	A17:A16,*A15--[1]
+
+	STDW	.D2T2	B13:B12,*SP--[1]	; save PC and CSR
+
+	STDW	.D2T2	B11:B10,*SP--[1]	; save RILC and ILC
+	STDW	.D2T1	A5:A4,*SP--[1]		; save TSR and orig A4
+
+	;; We left an unused word on the stack just above pt_regs.
+	;; It is used to save whether or not this frame is due to
+	;; a syscall. It is cleared here, but the syscall handler
+	;; sets it to a non-zero value.
+	MVK	.L2	0,B1
+	STW	.D2T2	B1,*+SP(REGS__END+8)	; clear syscall flag
+	.endm
+
+	.macro RESTORE_ALL __rp __tsr
+	LDDW	.D2T2	*++SP[1],B9:B8		; get TSR (B9)
+	LDDW	.D2T2	*++SP[1],B11:B10	; get RILC (B11) and ILC (B10)
+	LDDW	.D2T2	*++SP[1],B13:B12	; get PC (B13) and CSR (B12)
+
+	ADDAW	.D1X	SP,30,A15
+
+	LDDW	.D1T1	*++A15[1],A17:A16
+ ||	LDDW	.D2T2	*++SP[1],B17:B16
+	LDDW	.D1T1	*++A15[1],A19:A18
+ ||	LDDW	.D2T2	*++SP[1],B19:B18
+	LDDW	.D1T1	*++A15[1],A21:A20
+ ||	LDDW	.D2T2	*++SP[1],B21:B20
+	LDDW	.D1T1	*++A15[1],A23:A22
+ ||	LDDW	.D2T2	*++SP[1],B23:B22
+	LDDW	.D1T1	*++A15[1],A25:A24
+ ||	LDDW	.D2T2	*++SP[1],B25:B24
+	LDDW	.D1T1	*++A15[1],A27:A26
+ ||	LDDW	.D2T2	*++SP[1],B27:B26
+	LDDW	.D1T1	*++A15[1],A29:A28
+ ||	LDDW	.D2T2	*++SP[1],B29:B28
+	LDDW	.D1T1	*++A15[1],A31:A30
+ ||	LDDW	.D2T2	*++SP[1],B31:B30
+
+	LDDW	.D1T1	*++A15[1],A1:A0
+ ||	LDDW	.D2T2	*++SP[1],B1:B0
+
+	LDDW	.D1T1	*++A15[1],A3:A2
+ ||	LDDW	.D2T2	*++SP[1],B3:B2
+ ||	MVC	.S2	B9,__tsr
+	LDDW	.D1T1	*++A15[1],A5:A4
+ ||	LDDW	.D2T2	*++SP[1],B5:B4
+ ||	MVC	.S2	B11,RILC
+	LDDW	.D1T1	*++A15[1],A7:A6
+ ||	LDDW	.D2T2	*++SP[1],B7:B6
+ ||	MVC	.S2	B10,ILC
+
+	LDDW	.D1T1	*++A15[1],A9:A8
+ ||	LDDW	.D2T2	*++SP[1],B9:B8
+ ||	MVC	.S2	B13,__rp
+
+	LDDW	.D1T1	*++A15[1],A11:A10
+ ||	LDDW	.D2T2	*++SP[1],B11:B10
+ ||	MVC	.S2	B12,CSR
+
+	LDDW	.D1T1	*++A15[1],A13:A12
+ ||	LDDW	.D2T2	*++SP[1],B13:B12
+
+	MV	.D2X	A15,SP
+ ||	MVKL	.S1	current_ksp,A15
+	MVKH	.S1	current_ksp,A15
+ ||	ADDAW	.D1X	SP,6,A14
+	STW	.D1T1	A14,*A15	; save kernel stack pointer
+
+	LDDW	.D2T1	*++SP[1],A15:A14
+
+	B	.S2	__rp		; return from interruption
+	LDDW	.D2T2	*+SP[1],SP:DP
+	NOP	4
+	.endm
+
+	.section .text
+
+	;;
+	;; Jump to schedule() then return to ret_from_exception
+	;;
+_reschedule:
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	schedule,A0
+	MVKH	.S1	schedule,A0
+	B	.S2X	A0
+#else
+	B	.S1	schedule
+#endif
+	ADDKPC	.S2	ret_from_exception,B3,4
+
+	;;
+	;; Called before syscall handler when process is being debugged
+	;;
+tracesys_on:
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	syscall_trace_entry,A0
+	MVKH	.S1	syscall_trace_entry,A0
+	B	.S2X	A0
+#else
+	B	.S1	syscall_trace_entry
+#endif
+	ADDKPC	.S2	ret_from_syscall_trace,B3,3
+	ADD	.S1X	8,SP,A4
+
+ret_from_syscall_trace:
+	;; tracing returns (possibly new) syscall number
+	MV	.D2X	A4,B0
+ ||	MVK	.S2	__NR_syscalls,B1
+	CMPLTU	.L2	B0,B1,B1
+
+ [!B1]	BNOP	.S2	ret_from_syscall_function,5
+ ||	MVK	.S1	-ENOSYS,A4
+
+	;; reload syscall args from (possibly modified) stack frame
+	;; and get syscall handler addr from sys_call_table:
+	LDW	.D2T2	*+SP(REGS_B4+8),B4
+ ||	MVKL	.S2	sys_call_table,B1
+	LDW	.D2T1	*+SP(REGS_A6+8),A6
+ ||	MVKH	.S2	sys_call_table,B1
+	LDW	.D2T2	*+B1[B0],B0
+ ||	MVKL	.S2	ret_from_syscall_function,B3
+	LDW	.D2T2	*+SP(REGS_B6+8),B6
+ ||	MVKH	.S2	ret_from_syscall_function,B3
+	LDW	.D2T1	*+SP(REGS_A8+8),A8
+	LDW	.D2T2	*+SP(REGS_B8+8),B8
+	NOP
+	; B0 = sys_call_table[__NR_*]
+	BNOP	.S2	B0,5			; branch to syscall handler
+ ||	LDW	.D2T1	*+SP(REGS_ORIG_A4+8),A4
+
+syscall_exit_work:
+	AND	.D1	_TIF_SYSCALL_TRACE,A2,A0
+ [!A0]	BNOP	.S1	work_pending,5
+ [A0]	B	.S2	syscall_trace_exit
+	ADDKPC	.S2	resume_userspace,B3,1
+	MVC	.S2	CSR,B1
+	SET	.S2	B1,0,0,B1
+	MVC	.S2	B1,CSR		; enable ints
+
+work_pending:
+	AND	.D1	_TIF_NEED_RESCHED,A2,A0
+ [!A0]	BNOP	.S1	work_notifysig,5
+
+work_resched:
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	schedule,A1
+	MVKH	.S1	schedule,A1
+	B	.S2X	A1
+#else
+	B	.S2	schedule
+#endif
+	ADDKPC	.S2	work_rescheduled,B3,4
+work_rescheduled:
+	;; make sure we don't miss an interrupt setting need_resched or
+	;; sigpending between sampling and the rti
+	MASK_INT B2
+	GET_THREAD_INFO A12
+	LDW	.D1T1	*+A12(THREAD_INFO_FLAGS),A2
+	MVK	.S1	_TIF_WORK_MASK,A1
+	MVK	.S1	_TIF_NEED_RESCHED,A3
+	NOP	2
+	AND	.D1	A1,A2,A0
+ ||	AND	.S1	A3,A2,A1
+ [!A0]	BNOP	.S1	restore_all,5
+ [A1]	BNOP	.S1	work_resched,5
+
+work_notifysig:
+	;; enable interrupts for do_notify_resume()
+	UNMASK_INT B2
+	B	.S2	do_notify_resume
+	LDW	.D2T1	*+SP(REGS__END+8),A6 ; syscall flag
+	ADDKPC	.S2	resume_userspace,B3,1
+	ADD	.S1X	8,SP,A4		; pt_regs pointer is first arg
+	MV	.D2X	A2,B4		; thread_info flags is second arg
+
+	;;
+	;; On C64x+, the return way from exception and interrupt
+	;; is a little bit different
+	;;
+ENTRY(ret_from_exception)
+#ifdef CONFIG_PREEMPT
+	MASK_INT B2
+#endif
+
+ENTRY(ret_from_interrupt)
+	;;
+	;; Check if we are comming from user mode.
+	;;
+	LDW	.D2T2	*+SP(REGS_TSR+8),B0
+	MVK	.S2	0x40,B1
+	NOP	3
+	AND	.D2	B0,B1,B0
+ [!B0]	BNOP	.S2	resume_kernel,5
+
+resume_userspace:
+	;; make sure we don't miss an interrupt setting need_resched or
+	;; sigpending between sampling and the rti
+	MASK_INT B2
+	GET_THREAD_INFO A12
+	LDW	.D1T1	*+A12(THREAD_INFO_FLAGS),A2
+	MVK	.S1	_TIF_WORK_MASK,A1
+	MVK	.S1	_TIF_NEED_RESCHED,A3
+	NOP	2
+	AND	.D1	A1,A2,A0
+ [A0]	BNOP	.S1	work_pending,5
+	BNOP	.S1	restore_all,5
+
+	;;
+	;; System call handling
+	;; B0 = syscall number (in sys_call_table)
+	;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
+	;; A4 is the return value register
+	;;
+system_call_saved:
+	MVK	.L2	1,B2
+	STW	.D2T2	B2,*+SP(REGS__END+8)	; set syscall flag
+	MVC	.S2	B2,ECR			; ack the software exception
+
+	UNMASK_INT B2			; re-enable global IT
+
+system_call_saved_noack:
+	;; Check system call number
+	MVK	.S2	__NR_syscalls,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	sys_ni_syscall,A0
+#endif
+	CMPLTU	.L2	B0,B1,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKH	.S1	sys_ni_syscall,A0
+#endif
+
+	;; Check for ptrace
+	GET_THREAD_INFO A12
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B1]	B	.S2X	A0
+#else
+ [!B1]	B	.S2	sys_ni_syscall
+#endif
+ [!B1]	ADDKPC	.S2	ret_from_syscall_function,B3,4
+
+	;; Get syscall handler addr from sys_call_table
+	;; call tracesys_on or call syscall handler
+	LDW	.D1T1	*+A12(THREAD_INFO_FLAGS),A2
+ ||	MVKL	.S2	sys_call_table,B1
+	MVKH	.S2	sys_call_table,B1
+	LDW	.D2T2	*+B1[B0],B0
+	NOP	2
+	; A2 = thread_info flags
+	AND	.D1	_TIF_SYSCALL_TRACE,A2,A2
+ [A2]	BNOP	.S1	tracesys_on,5
+	;; B0 = _sys_call_table[__NR_*]
+	B	.S2	B0
+	ADDKPC	.S2	ret_from_syscall_function,B3,4
+
+ret_from_syscall_function:
+	STW	.D2T1	A4,*+SP(REGS_A4+8)	; save return value in A4
+						; original A4 is in orig_A4
+syscall_exit:
+	;; make sure we don't miss an interrupt setting need_resched or
+	;; sigpending between sampling and the rti
+	MASK_INT B2
+	LDW	.D1T1	*+A12(THREAD_INFO_FLAGS),A2
+	MVK	.S1	_TIF_ALLWORK_MASK,A1
+	NOP	3
+	AND	.D1	A1,A2,A2 ; check for work to do
+ [A2]	BNOP	.S1	syscall_exit_work,5
+
+restore_all:
+	RESTORE_ALL NRP,NTSR
+
+	;;
+	;; After a fork we jump here directly from resume,
+	;; so that A4 contains the previous task structure.
+	;;
+ENTRY(ret_from_fork)
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	schedule_tail,A0
+	MVKH	.S1	schedule_tail,A0
+	B	.S2X	A0
+#else
+	B	.S2	schedule_tail
+#endif
+	ADDKPC	.S2	ret_from_fork_2,B3,4
+ret_from_fork_2:
+	;; return 0 in A4 for child process
+	GET_THREAD_INFO A12
+	BNOP	.S2	syscall_exit,3
+	MVK	.L2	0,B0
+	STW	.D2T2	B0,*+SP(REGS_A4+8)
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	schedule_tail,A0
+	MVKH	.S1	schedule_tail,A0
+	B	.S2X	A0
+#else
+	B	.S2	schedule_tail
+#endif
+	LDW	.D2T2	*+SP(REGS_A0+8),B10 /* get fn  */
+	ADDKPC	.S2	0f,B3,3
+0:
+	B	.S2	B10		   /* call fn */
+	LDW	.D2T1	*+SP(REGS_A1+8),A4 /* get arg */
+	ADDKPC	.S2	ret_from_fork_2,B3,3
+ENDPROC(ret_from_kernel_thread)
+
+	;;
+	;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
+	;;
+	.macro SAVE_ALL_INT
+	SAVE_ALL IRP,ITSR
+	.endm
+
+	.macro CALL_INT int
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	c6x_do_IRQ,A0
+	MVKH	.S1	c6x_do_IRQ,A0
+	BNOP	.S2X	A0,1
+	MVK	.S1	int,A4
+	ADDAW	.D2	SP,2,B4
+	MVKL	.S2	ret_from_interrupt,B3
+	MVKH	.S2	ret_from_interrupt,B3
+#else
+	CALLP   .S2	c6x_do_IRQ,B3
+ ||	MVK	.S1	int,A4
+ ||	ADDAW	.D2	SP,2,B4
+	B	.S1	ret_from_interrupt
+	NOP	5
+#endif
+	.endm
+
+ENTRY(_int4_handler)
+	SAVE_ALL_INT
+	CALL_INT 4
+ENDPROC(_int4_handler)
+
+ENTRY(_int5_handler)
+	SAVE_ALL_INT
+	CALL_INT 5
+ENDPROC(_int5_handler)
+
+ENTRY(_int6_handler)
+	SAVE_ALL_INT
+	CALL_INT 6
+ENDPROC(_int6_handler)
+
+ENTRY(_int7_handler)
+	SAVE_ALL_INT
+	CALL_INT 7
+ENDPROC(_int7_handler)
+
+ENTRY(_int8_handler)
+	SAVE_ALL_INT
+	CALL_INT 8
+ENDPROC(_int8_handler)
+
+ENTRY(_int9_handler)
+	SAVE_ALL_INT
+	CALL_INT 9
+ENDPROC(_int9_handler)
+
+ENTRY(_int10_handler)
+	SAVE_ALL_INT
+	CALL_INT 10
+ENDPROC(_int10_handler)
+
+ENTRY(_int11_handler)
+	SAVE_ALL_INT
+	CALL_INT 11
+ENDPROC(_int11_handler)
+
+ENTRY(_int12_handler)
+	SAVE_ALL_INT
+	CALL_INT 12
+ENDPROC(_int12_handler)
+
+ENTRY(_int13_handler)
+	SAVE_ALL_INT
+	CALL_INT 13
+ENDPROC(_int13_handler)
+
+ENTRY(_int14_handler)
+	SAVE_ALL_INT
+	CALL_INT 14
+ENDPROC(_int14_handler)
+
+ENTRY(_int15_handler)
+	SAVE_ALL_INT
+	CALL_INT 15
+ENDPROC(_int15_handler)
+
+	;;
+	;; Handler for uninitialized and spurious interrupts
+	;;
+ENTRY(_bad_interrupt)
+	B	.S2	IRP
+	NOP	5
+ENDPROC(_bad_interrupt)
+
+	;;
+	;; Entry for NMI/exceptions/syscall
+	;;
+ENTRY(_nmi_handler)
+	SAVE_ALL NRP,NTSR
+
+	MVC	.S2	EFR,B2
+	CMPEQ	.L2	1,B2,B2
+ ||	MVC	.S2	TSR,B1
+	CLR	.S2	B1,10,10,B1
+	MVC	.S2	B1,TSR
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B2]	MVKL	.S1	process_exception,A0
+ [!B2]	MVKH	.S1	process_exception,A0
+ [!B2]	B	.S2X	A0
+#else
+ [!B2]	B	.S2	process_exception
+#endif
+ [B2]	B	.S2	system_call_saved
+ [!B2]	ADDAW	.D2	SP,2,B1
+ [!B2]	MV	.D1X	B1,A4
+	ADDKPC	.S2	ret_from_trap,B3,2
+
+ret_from_trap:
+	MV	.D2X	A4,B0
+ [!B0]	BNOP	.S2	ret_from_exception,5
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S2	system_call_saved_noack,B3
+	MVKH	.S2	system_call_saved_noack,B3
+#endif
+	LDW	.D2T2	*+SP(REGS_B0+8),B0
+	LDW	.D2T1	*+SP(REGS_A4+8),A4
+	LDW	.D2T2	*+SP(REGS_B4+8),B4
+	LDW	.D2T1	*+SP(REGS_A6+8),A6
+	LDW	.D2T2	*+SP(REGS_B6+8),B6
+	LDW	.D2T1	*+SP(REGS_A8+8),A8
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	B	.S2	B3
+#else
+ ||	B	.S2	system_call_saved_noack
+#endif
+	LDW	.D2T2	*+SP(REGS_B8+8),B8
+	NOP	4
+ENDPROC(_nmi_handler)
+
+	;;
+	;; Jump to schedule() then return to ret_from_isr
+	;;
+#ifdef	CONFIG_PREEMPT
+resume_kernel:
+	GET_THREAD_INFO A12
+	LDW	.D1T1	*+A12(THREAD_INFO_PREEMPT_COUNT),A1
+	NOP	4
+ [A1]	BNOP	.S2	restore_all,5
+
+preempt_schedule:
+	GET_THREAD_INFO A2
+	LDW	.D1T1	*+A2(THREAD_INFO_FLAGS),A1
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S2	preempt_schedule_irq,B0
+	MVKH	.S2	preempt_schedule_irq,B0
+	NOP	2
+#else
+	NOP	4
+#endif
+	AND	.D1	_TIF_NEED_RESCHED,A1,A1
+ [!A1]	BNOP	.S2	restore_all,5
+#ifdef CONFIG_C6X_BIG_KERNEL
+	B	.S2	B0
+#else
+	B	.S2	preempt_schedule_irq
+#endif
+	ADDKPC	.S2	preempt_schedule,B3,4
+#endif /* CONFIG_PREEMPT */
+
+ENTRY(enable_exception)
+	DINT
+	MVC	.S2	TSR,B0
+	MVC	.S2	B3,NRP
+	MVK	.L2	0xc,B1
+	OR	.D2	B0,B1,B0
+	MVC	.S2	B0,TSR			;  Set GEE and XEN in TSR
+	B	.S2	NRP
+	NOP	5
+ENDPROC(enable_exception)
+
+	;;
+	;; Special system calls
+	;; return address is in B3
+	;;
+ENTRY(sys_rt_sigreturn)
+	ADD	.D1X	SP,8,A4
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	do_rt_sigreturn,A0
+	MVKH	.S1	do_rt_sigreturn,A0
+	BNOP	.S2X	A0,5
+#else
+ ||	B	.S2	do_rt_sigreturn
+	NOP	5
+#endif
+ENDPROC(sys_rt_sigreturn)
+
+ENTRY(sys_pread_c6x)
+	MV	.D2X	A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	sys_pread64,A0
+	MVKH	.S1	sys_pread64,A0
+	BNOP	.S2X	A0,5
+#else
+ ||	B	.S2	sys_pread64
+	NOP	5
+#endif
+ENDPROC(sys_pread_c6x)
+
+ENTRY(sys_pwrite_c6x)
+	MV	.D2X	A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	sys_pwrite64,A0
+	MVKH	.S1	sys_pwrite64,A0
+	BNOP	.S2X	A0,5
+#else
+ ||	B	.S2	sys_pwrite64
+	NOP	5
+#endif
+ENDPROC(sys_pwrite_c6x)
+
+;; On Entry
+;;   A4 - path
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_truncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	MV	.S2	B4,B5
+	MV	.D2X	A6,B4
+#else
+	MV	.D2X	A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	sys_truncate64,A0
+	MVKH	.S1	sys_truncate64,A0
+	BNOP	.S2X	A0,5
+#else
+ ||	B	.S2	sys_truncate64
+	NOP	5
+#endif
+ENDPROC(sys_truncate64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_ftruncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	MV	.S2	B4,B5
+	MV	.D2X	A6,B4
+#else
+	MV	.D2X	A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ ||	MVKL	.S1	sys_ftruncate64,A0
+	MVKH	.S1	sys_ftruncate64,A0
+	BNOP	.S2X	A0,5
+#else
+ ||	B	.S2	sys_ftruncate64
+	NOP	5
+#endif
+ENDPROC(sys_ftruncate64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - offset_lo (LE), offset_hi (BE)
+;;   A6 - offset_lo (BE), offset_hi (LE)
+;;   B6 - len_lo (LE), len_hi (BE)
+;;   A8 - len_lo (BE), len_hi (LE)
+;;   B8 - advice
+ENTRY(sys_fadvise64_64_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	sys_fadvise64_64,A0
+	MVKH	.S1	sys_fadvise64_64,A0
+	BNOP	.S2X	A0,2
+#else
+	B	.S2	sys_fadvise64_64
+	NOP	2
+#endif
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	MV	.L2	B4,B5
+ ||	MV	.D2X	A6,B4
+	MV	.L1	A8,A6
+ ||	MV	.D1X	B6,A7
+#else
+	MV	.D2X	A6,B5
+	MV	.L1	A8,A7
+ ||	MV	.D1X	B6,A6
+#endif
+	MV	.L2	B8,B6
+ENDPROC(sys_fadvise64_64_c6x)
+
+;; On Entry
+;;   A4 - fd
+;;   B4 - mode
+;;   A6 - offset_hi
+;;   B6 - offset_lo
+;;   A8 - len_hi
+;;   B8 - len_lo
+ENTRY(sys_fallocate_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	sys_fallocate,A0
+	MVKH	.S1	sys_fallocate,A0
+	BNOP	.S2X	A0,1
+#else
+	B	.S2	sys_fallocate
+	NOP
+#endif
+	MV	.D1	A6,A7
+	MV	.D1X	B6,A6
+	MV	.D2X	A8,B7
+	MV	.D2	B8,B6
+ENDPROC(sys_fallocate_c6x)
+
+	;; put this in .neardata for faster access when using DSBT mode
+	.section .neardata,"aw",@progbits
+	.global	current_ksp
+	.hidden	current_ksp
+current_ksp:
+	.word	init_thread_union + THREAD_START_SP
diff --git a/arch/c6x/kernel/head.S b/arch/c6x/kernel/head.S
new file mode 100644
index 0000000..133eab6
--- /dev/null
+++ b/arch/c6x/kernel/head.S
@@ -0,0 +1,84 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+#include <linux/linkage.h>
+#include <linux/of_fdt.h>
+#include <asm/asm-offsets.h>
+
+	__HEAD
+ENTRY(_c_int00)
+	;; Save magic and pointer
+	MV	.S1	A4,A10
+	MV	.S2	B4,B10
+	MVKL	.S2	__bss_start,B5
+	MVKH	.S2	__bss_start,B5
+	MVKL	.S2	__bss_stop,B6
+	MVKH	.S2	__bss_stop,B6
+	SUB	.L2	B6,B5,B6 ; bss size
+
+	;; Set the stack pointer
+	MVKL	.S2	current_ksp,B0
+	MVKH	.S2	current_ksp,B0
+	LDW	.D2T2	*B0,B15
+
+	;; clear bss
+	SHR	.S2	B6,3,B0	  ; number of dwords to clear
+	ZERO	.L2	B13
+	ZERO	.L2	B12
+bss_loop:
+	BDEC	.S2	bss_loop,B0
+	NOP	3
+	CMPLT	.L2	B0,0,B1
+ [!B1]	STDW	.D2T2	B13:B12,*B5++[1]
+
+	NOP	4
+	AND	.D2	~7,B15,B15
+
+	;; Clear GIE and PGIE
+	MVC	.S2	CSR,B2
+	CLR	.S2	B2,0,1,B2
+	MVC	.S2	B2,CSR
+	MVC	.S2	TSR,B2
+	CLR	.S2	B2,0,1,B2
+	MVC	.S2	B2,TSR
+	MVC	.S2	ITSR,B2
+	CLR	.S2	B2,0,1,B2
+	MVC	.S2	B2,ITSR
+	MVC	.S2	NTSR,B2
+	CLR	.S2	B2,0,1,B2
+	MVC	.S2	B2,NTSR
+
+	;; pass DTB pointer to machine_init (or zero if none)
+	MVKL	.S1	OF_DT_HEADER,A0
+	MVKH	.S1	OF_DT_HEADER,A0
+	CMPEQ	.L1	A10,A0,A0
+  [A0]	MV	.S1X	B10,A4
+  [!A0] MVK	.S1	0,A4
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	machine_init,A0
+	MVKH	.S1	machine_init,A0
+	B	.S2X	A0
+	ADDKPC  .S2     0f,B3,4
+0:
+#else
+	CALLP	.S2	machine_init,B3
+#endif
+
+	;; Jump to Linux init
+#ifdef CONFIG_C6X_BIG_KERNEL
+	MVKL	.S1	start_kernel,A0
+	MVKH	.S1	start_kernel,A0
+	B	.S2X	A0
+#else
+	B	.S2	start_kernel
+#endif
+	NOP	5
+L1:	BNOP	.S2	L1,5
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
new file mode 100644
index 0000000..247e0eb
--- /dev/null
+++ b/arch/c6x/kernel/irq.c
@@ -0,0 +1,131 @@
+/*
+ *  Copyright (C) 2011-2012 Texas Instruments Incorporated
+ *
+ *  This borrows heavily from powerpc version, which is:
+ *
+ *  Derived from arch/i386/kernel/irq.c
+ *    Copyright (C) 1992 Linus Torvalds
+ *  Adapted from arch/i386 by Gary Thomas
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ *    Copyright (C) 1996-2001 Cort Dougan
+ *  Adapted for Power Macintosh by Paul Mackerras
+ *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/megamod-pic.h>
+#include <asm/special_insns.h>
+
+unsigned long irq_err_count;
+
+static DEFINE_RAW_SPINLOCK(core_irq_lock);
+
+static void mask_core_irq(struct irq_data *data)
+{
+	unsigned int prio = data->hwirq;
+
+	raw_spin_lock(&core_irq_lock);
+	and_creg(IER, ~(1 << prio));
+	raw_spin_unlock(&core_irq_lock);
+}
+
+static void unmask_core_irq(struct irq_data *data)
+{
+	unsigned int prio = data->hwirq;
+
+	raw_spin_lock(&core_irq_lock);
+	or_creg(IER, 1 << prio);
+	raw_spin_unlock(&core_irq_lock);
+}
+
+static struct irq_chip core_chip = {
+	.name		= "core",
+	.irq_mask	= mask_core_irq,
+	.irq_unmask	= unmask_core_irq,
+};
+
+static int prio_to_virq[NR_PRIORITY_IRQS];
+
+asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+
+	generic_handle_irq(prio_to_virq[prio]);
+
+	irq_exit();
+
+	set_irq_regs(old_regs);
+}
+
+static struct irq_domain *core_domain;
+
+static int core_domain_map(struct irq_domain *h, unsigned int virq,
+			   irq_hw_number_t hw)
+{
+	if (hw < 4 || hw >= NR_PRIORITY_IRQS)
+		return -EINVAL;
+
+	prio_to_virq[hw] = virq;
+
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
+	return 0;
+}
+
+static const struct irq_domain_ops core_domain_ops = {
+	.map = core_domain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+void __init init_IRQ(void)
+{
+	struct device_node *np;
+
+	/* Mask all priority IRQs */
+	and_creg(IER, ~0xfff0);
+
+	np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
+	if (np != NULL) {
+		/* create the core host */
+		core_domain = irq_domain_add_linear(np, NR_PRIORITY_IRQS,
+						    &core_domain_ops, NULL);
+		if (core_domain)
+			irq_set_default_host(core_domain);
+		of_node_put(np);
+	}
+
+	printk(KERN_INFO "Core interrupt controller initialized\n");
+
+	/* now we're ready for other SoC controllers */
+	megamod_pic_init();
+
+	/* Clear all general IRQ flags */
+	set_creg(ICR, 0xfff0);
+}
+
+void ack_bad_irq(int irq)
+{
+	printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+	irq_err_count++;
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+	return 0;
+}
diff --git a/arch/c6x/kernel/module.c b/arch/c6x/kernel/module.c
new file mode 100644
index 0000000..5fc03f1
--- /dev/null
+++ b/arch/c6x/kernel/module.c
@@ -0,0 +1,123 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2005, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Thomas Charleux (thomas.charleux@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+
+static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift)
+{
+	u32 opcode;
+	long ep = (long)ip & ~31;
+	long delta = ((long)dest - ep) >> 2;
+	long mask = (1 << maskbits) - 1;
+
+	if ((delta >> (maskbits - 1)) == 0 ||
+	    (delta >> (maskbits - 1)) == -1) {
+		opcode = *ip;
+		opcode &= ~(mask << shift);
+		opcode |= ((delta & mask) << shift);
+		*ip = opcode;
+
+		pr_debug("REL PCR_S%d[%p] dest[%p] opcode[%08x]\n",
+			 maskbits, ip, (void *)dest, opcode);
+
+		return 0;
+	}
+	pr_err("PCR_S%d reloc %p -> %p out of range!\n",
+	       maskbits, ip, (void *)dest);
+
+	return -1;
+}
+
+/*
+ * apply a RELA relocation
+ */
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+	Elf_Sym *sym;
+	u32 *location, opcode;
+	unsigned int i;
+	Elf32_Addr v;
+	Elf_Addr offset = 0;
+
+	pr_debug("Applying relocate section %u to %u with offset 0x%x\n",
+		 relsec, sechdrs[relsec].sh_info, offset);
+
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset - offset;
+
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+
+		/* this is the adjustment to be made */
+		v = sym->st_value + rel[i].r_addend;
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_C6000_ABS32:
+			pr_debug("RELA ABS32: [%p] = 0x%x\n", location, v);
+			*location = v;
+			break;
+		case R_C6000_ABS16:
+			pr_debug("RELA ABS16: [%p] = 0x%x\n", location, v);
+			*(u16 *)location = v;
+			break;
+		case R_C6000_ABS8:
+			pr_debug("RELA ABS8: [%p] = 0x%x\n", location, v);
+			*(u8 *)location = v;
+			break;
+		case R_C6000_ABS_L16:
+			opcode = *location;
+			opcode &= ~0x7fff80;
+			opcode |= ((v & 0xffff) << 7);
+			pr_debug("RELA ABS_L16[%p] v[0x%x] opcode[0x%x]\n",
+				 location, v, opcode);
+			*location = opcode;
+			break;
+		case R_C6000_ABS_H16:
+			opcode = *location;
+			opcode &= ~0x7fff80;
+			opcode |= ((v >> 9) & 0x7fff80);
+			pr_debug("RELA ABS_H16[%p] v[0x%x] opcode[0x%x]\n",
+				 location, v, opcode);
+			*location = opcode;
+			break;
+		case R_C6000_PCR_S21:
+			if (fixup_pcr(location, v, 21, 7))
+				return -ENOEXEC;
+			break;
+		case R_C6000_PCR_S12:
+			if (fixup_pcr(location, v, 12, 16))
+				return -ENOEXEC;
+			break;
+		case R_C6000_PCR_S10:
+			if (fixup_pcr(location, v, 10, 13))
+				return -ENOEXEC;
+			break;
+		default:
+			pr_err("module %s: Unknown RELA relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+
+	return 0;
+}
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
new file mode 100644
index 0000000..c4ecb24
--- /dev/null
+++ b/arch/c6x/kernel/process.c
@@ -0,0 +1,155 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/init_task.h>
+#include <linux/tick.h>
+#include <linux/mqueue.h>
+#include <linux/syscalls.h>
+#include <linux/reboot.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/syscalls.h>
+
+/* hooks for board specific support */
+void	(*c6x_restart)(void);
+void	(*c6x_halt)(void);
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+/*
+ * power off function, if any
+ */
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void arch_cpu_idle(void)
+{
+	unsigned long tmp;
+
+	/*
+	 * Put local_irq_enable and idle in same execute packet
+	 * to make them atomic and avoid race to idle with
+	 * interrupts enabled.
+	 */
+	asm volatile ("   mvc .s2 CSR,%0\n"
+		      "   or  .d2 1,%0,%0\n"
+		      "   mvc .s2 %0,CSR\n"
+		      "|| idle\n"
+		      : "=b"(tmp));
+}
+
+static void halt_loop(void)
+{
+	printk(KERN_EMERG "System Halted, OK to turn off power\n");
+	local_irq_disable();
+	while (1)
+		asm volatile("idle\n");
+}
+
+void machine_restart(char *__unused)
+{
+	if (c6x_restart)
+		c6x_restart();
+	halt_loop();
+}
+
+void machine_halt(void)
+{
+	if (c6x_halt)
+		c6x_halt();
+	halt_loop();
+}
+
+void machine_power_off(void)
+{
+	if (pm_power_off)
+		pm_power_off();
+	halt_loop();
+}
+
+void flush_thread(void)
+{
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
+{
+	/*
+	 * The binfmt loader will setup a "full" stack, but the C6X
+	 * operates an "empty" stack. So we adjust the usp so that
+	 * argc doesn't get destroyed if an interrupt is taken before
+	 * it is read from the stack.
+	 *
+	 * NB: Library startup code needs to match this.
+	 */
+	usp -= 8;
+
+	regs->pc  = pc;
+	regs->sp  = usp;
+	regs->tsr |= 0x40; /* set user mode */
+	current->thread.usp = usp;
+}
+
+/*
+ * Copy a new thread context in its stack.
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+		unsigned long ustk_size,
+		struct task_struct *p)
+{
+	struct pt_regs *childregs;
+
+	childregs = task_pt_regs(p);
+
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* case of  __kernel_thread: we return to supervisor space */
+		memset(childregs, 0, sizeof(struct pt_regs));
+		childregs->sp = (unsigned long)(childregs + 1);
+		p->thread.pc = (unsigned long) ret_from_kernel_thread;
+		childregs->a0 = usp;		/* function */
+		childregs->a1 = ustk_size;	/* argument */
+	} else {
+		/* Otherwise use the given stack */
+		*childregs = *current_pt_regs();
+		if (usp)
+			childregs->sp = usp;
+		p->thread.pc = (unsigned long) ret_from_fork;
+	}
+
+	/* Set usp/ksp */
+	p->thread.usp = childregs->sp;
+	thread_saved_ksp(p) = (unsigned long)childregs - 8;
+	p->thread.wchan	= p->thread.pc;
+#ifdef __DSBT__
+	{
+		unsigned long dp;
+
+		asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
+
+		thread_saved_dp(p) = dp;
+		if (usp == -1)
+			childregs->dp = dp;
+	}
+#endif
+	return 0;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	return p->thread.wchan;
+}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
new file mode 100644
index 0000000..8801dc9
--- /dev/null
+++ b/arch/c6x/kernel/ptrace.c
@@ -0,0 +1,147 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cacheflush.h>
+
+#define PT_REG_SIZE	  (sizeof(struct pt_regs))
+
+/*
+ * Called by kernel/ptrace.c when detaching.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	/* nothing to do */
+}
+
+/*
+ * Get a register number from live pt_regs for the specified task.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+	long *addr = (long *)task_pt_regs(task);
+
+	if (regno == PT_TSR || regno == PT_CSR)
+		return 0;
+
+	return addr[regno];
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task,
+			  int regno,
+			  unsigned long data)
+{
+	unsigned long *addr = (unsigned long *)task_pt_regs(task);
+
+	if (regno != PT_TSR && regno != PT_CSR)
+		addr[regno] = data;
+
+	return 0;
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				   regs,
+				   0, sizeof(*regs));
+}
+
+enum c6x_regset {
+	REGSET_GPR,
+};
+
+static const struct user_regset c6x_regsets[] = {
+	[REGSET_GPR] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = ELF_NGREG,
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = gpr_get,
+	},
+};
+
+static const struct user_regset_view user_c6x_native_view = {
+	.name		= "tic6x",
+	.e_machine	= EM_TI_C6000,
+	.regsets	= c6x_regsets,
+	.n		= ARRAY_SIZE(c6x_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &user_c6x_native_view;
+}
+
+/*
+ * Perform ptrace request
+ */
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	int ret = 0;
+
+	switch (request) {
+		/*
+		 * write the word at location addr.
+		 */
+	case PTRACE_POKETEXT:
+		ret = generic_ptrace_pokedata(child, addr, data);
+		if (ret == 0 && request == PTRACE_POKETEXT)
+			flush_icache_range(addr, addr + 4);
+		break;
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * handle tracing of system call entry
+ * - return the revised system call number or ULONG_MAX to cause ENOSYS
+ */
+asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
+{
+	if (tracehook_report_syscall_entry(regs))
+		/* tracing decided this syscall should not happen, so
+		 * We'll return a bogus call number to get an ENOSYS
+		 * error, but leave the original number in
+		 * regs->orig_a4
+		 */
+		return ULONG_MAX;
+
+	return regs->b0;
+}
+
+/*
+ * handle tracing of system call exit
+ */
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+	tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
new file mode 100644
index 0000000..786e36e
--- /dev/null
+++ b/arch/c6x/kernel/setup.c
@@ -0,0 +1,508 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/clkdev.h>
+#include <linux/initrd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+
+#include <asm/sections.h>
+#include <asm/div64.h>
+#include <asm/setup.h>
+#include <asm/dscr.h>
+#include <asm/clock.h>
+#include <asm/soc.h>
+#include <asm/special_insns.h>
+
+static const char *c6x_soc_name;
+
+struct screen_info screen_info;
+
+int c6x_num_cores;
+EXPORT_SYMBOL_GPL(c6x_num_cores);
+
+unsigned int c6x_silicon_rev;
+EXPORT_SYMBOL_GPL(c6x_silicon_rev);
+
+/*
+ * Device status register. This holds information
+ * about device configuration needed by some drivers.
+ */
+unsigned int c6x_devstat;
+EXPORT_SYMBOL_GPL(c6x_devstat);
+
+/*
+ * Some SoCs have fuse registers holding a unique MAC
+ * address. This is parsed out of the device tree with
+ * the resulting MAC being held here.
+ */
+unsigned char c6x_fuse_mac[6];
+
+unsigned long memory_start;
+unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
+
+unsigned long ram_start;
+unsigned long ram_end;
+
+/* Uncached memory for DMA consistent use (memdma=) */
+static unsigned long dma_start __initdata;
+static unsigned long dma_size __initdata;
+
+struct cpuinfo_c6x {
+	const char *cpu_name;
+	const char *cpu_voltage;
+	const char *mmu;
+	const char *fpu;
+	char *cpu_rev;
+	unsigned int core_id;
+	char __cpu_rev[5];
+};
+
+static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
+
+unsigned int ticks_per_ns_scaled;
+EXPORT_SYMBOL(ticks_per_ns_scaled);
+
+unsigned int c6x_core_freq;
+
+static void __init get_cpuinfo(void)
+{
+	unsigned cpu_id, rev_id, csr;
+	struct clk *coreclk = clk_get_sys(NULL, "core");
+	unsigned long core_khz;
+	u64 tmp;
+	struct cpuinfo_c6x *p;
+	struct device_node *node, *np;
+
+	p = &per_cpu(cpu_data, smp_processor_id());
+
+	if (!IS_ERR(coreclk))
+		c6x_core_freq = clk_get_rate(coreclk);
+	else {
+		printk(KERN_WARNING
+		       "Cannot find core clock frequency. Using 700MHz\n");
+		c6x_core_freq = 700000000;
+	}
+
+	core_khz = c6x_core_freq / 1000;
+
+	tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
+	do_div(tmp, 1000000);
+	ticks_per_ns_scaled = tmp;
+
+	csr = get_creg(CSR);
+	cpu_id = csr >> 24;
+	rev_id = (csr >> 16) & 0xff;
+
+	p->mmu = "none";
+	p->fpu = "none";
+	p->cpu_voltage = "unknown";
+
+	switch (cpu_id) {
+	case 0:
+		p->cpu_name = "C67x";
+		p->fpu = "yes";
+		break;
+	case 2:
+		p->cpu_name = "C62x";
+		break;
+	case 8:
+		p->cpu_name = "C64x";
+		break;
+	case 12:
+		p->cpu_name = "C64x";
+		break;
+	case 16:
+		p->cpu_name = "C64x+";
+		p->cpu_voltage = "1.2";
+		break;
+	case 21:
+		p->cpu_name = "C66X";
+		p->cpu_voltage = "1.2";
+		break;
+	default:
+		p->cpu_name = "unknown";
+		break;
+	}
+
+	if (cpu_id < 16) {
+		switch (rev_id) {
+		case 0x1:
+			if (cpu_id > 8) {
+				p->cpu_rev = "DM640/DM641/DM642/DM643";
+				p->cpu_voltage = "1.2 - 1.4";
+			} else {
+				p->cpu_rev = "C6201";
+				p->cpu_voltage = "2.5";
+			}
+			break;
+		case 0x2:
+			p->cpu_rev = "C6201B/C6202/C6211";
+			p->cpu_voltage = "1.8";
+			break;
+		case 0x3:
+			p->cpu_rev = "C6202B/C6203/C6204/C6205";
+			p->cpu_voltage = "1.5";
+			break;
+		case 0x201:
+			p->cpu_rev = "C6701 revision 0 (early CPU)";
+			p->cpu_voltage = "1.8";
+			break;
+		case 0x202:
+			p->cpu_rev = "C6701/C6711/C6712";
+			p->cpu_voltage = "1.8";
+			break;
+		case 0x801:
+			p->cpu_rev = "C64x";
+			p->cpu_voltage = "1.5";
+			break;
+		default:
+			p->cpu_rev = "unknown";
+		}
+	} else {
+		p->cpu_rev = p->__cpu_rev;
+		snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
+	}
+
+	p->core_id = get_coreid();
+
+	node = of_find_node_by_name(NULL, "cpus");
+	if (node) {
+		for_each_child_of_node(node, np)
+			if (!strcmp("cpu", np->name))
+				++c6x_num_cores;
+		of_node_put(node);
+	}
+
+	node = of_find_node_by_name(NULL, "soc");
+	if (node) {
+		if (of_property_read_string(node, "model", &c6x_soc_name))
+			c6x_soc_name = "unknown";
+		of_node_put(node);
+	} else
+		c6x_soc_name = "unknown";
+
+	printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
+	       p->core_id, p->cpu_name, p->cpu_rev,
+	       p->cpu_voltage, c6x_core_freq / 1000000);
+}
+
+/*
+ * Early parsing of the command line
+ */
+static u32 mem_size __initdata;
+
+/* "mem=" parsing. */
+static int __init early_mem(char *p)
+{
+	if (!p)
+		return -EINVAL;
+
+	mem_size = memparse(p, &p);
+	/* don't remove all of memory when handling "mem={invalid}" */
+	if (mem_size == 0)
+		return -EINVAL;
+
+	return 0;
+}
+early_param("mem", early_mem);
+
+/* "memdma=<size>[@<address>]" parsing. */
+static int __init early_memdma(char *p)
+{
+	if (!p)
+		return -EINVAL;
+
+	dma_size = memparse(p, &p);
+	if (*p == '@')
+		dma_start = memparse(p, &p);
+
+	return 0;
+}
+early_param("memdma", early_memdma);
+
+int __init c6x_add_memory(phys_addr_t start, unsigned long size)
+{
+	static int ram_found __initdata;
+
+	/* We only handle one bank (the one with PAGE_OFFSET) for now */
+	if (ram_found)
+		return -EINVAL;
+
+	if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
+		return 0;
+
+	ram_start = start;
+	ram_end = start + size;
+
+	ram_found = 1;
+	return 0;
+}
+
+/*
+ * Do early machine setup and device tree parsing. This is called very
+ * early on the boot process.
+ */
+notrace void __init machine_init(unsigned long dt_ptr)
+{
+	void *dtb = __va(dt_ptr);
+	void *fdt = _fdt_start;
+
+	/* interrupts must be masked */
+	set_creg(IER, 2);
+
+	/*
+	 * Set the Interrupt Service Table (IST) to the beginning of the
+	 * vector table.
+	 */
+	set_ist(_vectors_start);
+
+	/*
+	 * dtb is passed in from bootloader.
+	 * fdt is linked in blob.
+	 */
+	if (dtb && dtb != fdt)
+		fdt = dtb;
+
+	/* Do some early initialization based on the flat device tree */
+	early_init_dt_scan(fdt);
+
+	parse_early_param();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	int bootmap_size;
+	struct memblock_region *reg;
+
+	printk(KERN_INFO "Initializing kernel\n");
+
+	/* Initialize command line */
+	*cmdline_p = boot_command_line;
+
+	memory_end = ram_end;
+	memory_end &= ~(PAGE_SIZE - 1);
+
+	if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
+		memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
+
+	/* add block that this kernel can use */
+	memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+	/* reserve kernel text/data/bss */
+	memblock_reserve(PAGE_OFFSET,
+			 PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
+
+	if (dma_size) {
+		/* align to cacheability granularity */
+		dma_size = CACHE_REGION_END(dma_size);
+
+		if (!dma_start)
+			dma_start = memory_end - dma_size;
+
+		/* align to cacheability granularity */
+		dma_start = CACHE_REGION_START(dma_start);
+
+		/* reserve DMA memory taken from kernel memory */
+		if (memblock_is_region_memory(dma_start, dma_size))
+			memblock_reserve(dma_start, dma_size);
+	}
+
+	memory_start = PAGE_ALIGN((unsigned int) &_end);
+
+	printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
+	       memory_start, memory_end);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	/*
+	 * Reserve initrd memory if in kernel memory.
+	 */
+	if (initrd_start < initrd_end)
+		if (memblock_is_region_memory(initrd_start,
+					      initrd_end - initrd_start))
+			memblock_reserve(initrd_start,
+					 initrd_end - initrd_start);
+#endif
+
+	init_mm.start_code = (unsigned long) &_stext;
+	init_mm.end_code   = (unsigned long) &_etext;
+	init_mm.end_data   = memory_start;
+	init_mm.brk        = memory_start;
+
+	/*
+	 * Give all the memory to the bootmap allocator,  tell it to put the
+	 * boot mem_map at the start of memory
+	 */
+	bootmap_size = init_bootmem_node(NODE_DATA(0),
+					 memory_start >> PAGE_SHIFT,
+					 PAGE_OFFSET >> PAGE_SHIFT,
+					 memory_end >> PAGE_SHIFT);
+	memblock_reserve(memory_start, bootmap_size);
+
+	unflatten_device_tree();
+
+	c6x_cache_init();
+
+	/* Set the whole external memory as non-cacheable */
+	disable_caching(ram_start, ram_end - 1);
+
+	/* Set caching of external RAM used by Linux */
+	for_each_memblock(memory, reg)
+		enable_caching(CACHE_REGION_START(reg->base),
+			       CACHE_REGION_START(reg->base + reg->size - 1));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	/*
+	 * Enable caching for initrd which falls outside kernel memory.
+	 */
+	if (initrd_start < initrd_end) {
+		if (!memblock_is_region_memory(initrd_start,
+					       initrd_end - initrd_start))
+			enable_caching(CACHE_REGION_START(initrd_start),
+				       CACHE_REGION_START(initrd_end - 1));
+	}
+#endif
+
+	/*
+	 * Disable caching for dma coherent memory taken from kernel memory.
+	 */
+	if (dma_size && memblock_is_region_memory(dma_start, dma_size))
+		disable_caching(dma_start,
+				CACHE_REGION_START(dma_start + dma_size - 1));
+
+	/* Initialize the coherent memory allocator */
+	coherent_mem_init(dma_start, dma_size);
+
+	/*
+	 * Free all memory as a starting point.
+	 */
+	free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+	/*
+	 * Then reserve memory which is already being used.
+	 */
+	for_each_memblock(reserved, reg) {
+		pr_debug("reserved - 0x%08x-0x%08x\n",
+			 (u32) reg->base, (u32) reg->size);
+		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+	}
+
+	max_low_pfn = PFN_DOWN(memory_end);
+	min_low_pfn = PFN_UP(memory_start);
+	max_mapnr = max_low_pfn - min_low_pfn;
+
+	/* Get kmalloc into gear */
+	paging_init();
+
+	/*
+	 * Probe for Device State Configuration Registers.
+	 * We have to do this early in case timer needs to be enabled
+	 * through DSCR.
+	 */
+	dscr_probe();
+
+	/* We do this early for timer and core clock frequency */
+	c64x_setup_clocks();
+
+	/* Get CPU info */
+	get_cpuinfo();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+#endif
+}
+
+#define cpu_to_ptr(n) ((void *)((long)(n)+1))
+#define ptr_to_cpu(p) ((long)(p) - 1)
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	int n = ptr_to_cpu(v);
+	struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
+
+	if (n == 0) {
+		seq_printf(m,
+			   "soc\t\t: %s\n"
+			   "soc revision\t: 0x%x\n"
+			   "soc cores\t: %d\n",
+			   c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
+	}
+
+	seq_printf(m,
+		   "\n"
+		   "processor\t: %d\n"
+		   "cpu\t\t: %s\n"
+		   "core revision\t: %s\n"
+		   "core voltage\t: %s\n"
+		   "core id\t\t: %d\n"
+		   "mmu\t\t: %s\n"
+		   "fpu\t\t: %s\n"
+		   "cpu MHz\t\t: %u\n"
+		   "bogomips\t: %lu.%02lu\n\n",
+		   n,
+		   p->cpu_name, p->cpu_rev, p->cpu_voltage,
+		   p->core_id, p->mmu, p->fpu,
+		   (c6x_core_freq + 500000) / 1000000,
+		   (loops_per_jiffy/(500000/HZ)),
+		   (loops_per_jiffy/(5000/HZ))%100);
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	c_start,
+	c_stop,
+	c_next,
+	show_cpuinfo
+};
+
+static struct cpu cpu_devices[NR_CPUS];
+
+static int __init topology_init(void)
+{
+	int i;
+
+	for_each_present_cpu(i)
+		register_cpu(&cpu_devices[i], i);
+
+	return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
new file mode 100644
index 0000000..3c4bb5a
--- /dev/null
+++ b/arch/c6x/kernel/signal.c
@@ -0,0 +1,326 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+
+/*
+ * Do a signal return, undo the signal stack.
+ */
+
+#define RETCODE_SIZE (9 << 2)	/* 9 instructions = 36 bytes */
+
+struct rt_sigframe {
+	struct siginfo __user *pinfo;
+	void __user *puc;
+	struct siginfo info;
+	struct ucontext uc;
+	unsigned long retcode[RETCODE_SIZE >> 2];
+};
+
+static int restore_sigcontext(struct pt_regs *regs,
+			      struct sigcontext __user *sc)
+{
+	int err = 0;
+
+	/* The access_ok check was done by caller, so use __get_user here */
+#define COPY(x)  (err |= __get_user(regs->x, &sc->sc_##x))
+
+	COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+	COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+	COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+	COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+	COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+	COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+	COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+	COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+	COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+	COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+	COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+	COPY(csr); COPY(pc);
+
+#undef COPY
+
+	return err;
+}
+
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	sigset_t set;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	/*
+	 * Since we stacked the signal on a dword boundary,
+	 * 'sp' should be dword aligned here.  If it's
+	 * not, then the user is trying to mess with us.
+	 */
+	if (regs->sp & 7)
+		goto badframe;
+
+	frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8);
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	return regs->a4;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+			    unsigned long mask)
+{
+	int err = 0;
+
+	err |= __put_user(mask, &sc->sc_mask);
+
+	/* The access_ok check was done by caller, so use __put_user here */
+#define COPY(x) (err |= __put_user(regs->x, &sc->sc_##x))
+
+	COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+	COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+	COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+	COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+	COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+	COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+	COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+	COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+	COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+	COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+	COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+	COPY(csr); COPY(pc);
+
+#undef COPY
+
+	return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+					struct pt_regs *regs,
+					unsigned long framesize)
+{
+	unsigned long sp = sigsp(regs->sp, ksig);
+
+	/*
+	 * No matter what happens, 'sp' must be dword
+	 * aligned. Otherwise, nasty things will happen
+	 */
+	return (void __user *)((sp - framesize) & ~7);
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+			  struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	unsigned long __user *retcode;
+	int err = 0;
+
+	frame = get_sigframe(ksig, regs, sizeof(*frame));
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		return -EFAULT;
+
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+	/* Clear all the bits of the ucontext we don't use.  */
+	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+
+	err |= setup_sigcontext(&frame->uc.uc_mcontext,	regs, set->sig[0]);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	/* Set up to return from userspace */
+	retcode = (unsigned long __user *) &frame->retcode;
+
+	/* The access_ok check was done above, so use __put_user here */
+#define COPY(x) (err |= __put_user(x, retcode++))
+
+	COPY(0x0000002AUL | (__NR_rt_sigreturn << 7));
+				/* MVK __NR_rt_sigreturn,B0 */
+	COPY(0x10000000UL);	/* SWE */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+	COPY(0x00006000UL);	/* NOP 4 */
+
+#undef COPY
+
+	if (err)
+		return -EFAULT;
+
+	flush_icache_range((unsigned long) &frame->retcode,
+			   (unsigned long) &frame->retcode + RETCODE_SIZE);
+
+	retcode = (unsigned long __user *) &frame->retcode;
+
+	/* Change user context to branch to signal handler */
+	regs->sp = (unsigned long) frame - 8;
+	regs->b3 = (unsigned long) retcode;
+	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+
+	/* Give the signal number to the handler */
+	regs->a4 = ksig->sig;
+
+	/*
+	 * For realtime signals we must also set the second and third
+	 * arguments for the signal handler.
+	 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
+	 */
+	regs->b4 = (unsigned long)&frame->info;
+	regs->a6 = (unsigned long)&frame->uc;
+
+	return 0;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+	switch (regs->a4) {
+	case -ERESTARTNOHAND:
+		if (!has_handler)
+			goto do_restart;
+		regs->a4 = -EINTR;
+		break;
+
+	case -ERESTARTSYS:
+		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+			regs->a4 = -EINTR;
+			break;
+		}
+	/* fallthrough */
+	case -ERESTARTNOINTR:
+do_restart:
+		regs->a4 = regs->orig_a4;
+		regs->pc -= 4;
+		break;
+	}
+}
+
+/*
+ * handle the actual delivery of a signal to userspace
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
+			  int syscall)
+{
+	int ret;
+
+	/* Are we from a system call? */
+	if (syscall) {
+		/* If so, check system call restarting.. */
+		switch (regs->a4) {
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			regs->a4 = -EINTR;
+			break;
+
+		case -ERESTARTSYS:
+			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+				regs->a4 = -EINTR;
+				break;
+			}
+
+			/* fallthrough */
+		case -ERESTARTNOINTR:
+			regs->a4 = regs->orig_a4;
+			regs->pc -= 4;
+		}
+	}
+
+	/* Set up the stack frame */
+	ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
+	signal_setup_done(ret, ksig, 0);
+}
+
+/*
+ * handle a potential signal
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+	struct ksignal ksig;
+
+	/* we want the common case to go fast, which is why we may in certain
+	 * cases get here from kernel mode */
+	if (!user_mode(regs))
+		return;
+
+	if (get_signal(&ksig)) {
+		handle_signal(&ksig, regs, syscall);
+		return;
+	}
+
+	/* did we come from a system call? */
+	if (syscall) {
+		/* restart the system call - no handlers present */
+		switch (regs->a4) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs->a4 = regs->orig_a4;
+			regs->pc -= 4;
+			break;
+
+		case -ERESTART_RESTARTBLOCK:
+			regs->a4 = regs->orig_a4;
+			regs->b0 = __NR_restart_syscall;
+			regs->pc -= 4;
+			break;
+		}
+	}
+
+	/* if there's no signal to deliver, we just put the saved sigmask
+	 * back */
+	restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
+				 int syscall)
+{
+	/* deal with pending signal delivery */
+	if (thread_info_flags & (1 << TIF_SIGPENDING))
+		do_signal(regs, syscall);
+
+	if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+	}
+}
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
new file mode 100644
index 0000000..3ac7408
--- /dev/null
+++ b/arch/c6x/kernel/soc.c
@@ -0,0 +1,90 @@
+/*
+ *  Miscellaneous SoC-specific hooks.
+ *
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter <msalter@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <asm/setup.h>
+#include <asm/soc.h>
+
+struct soc_ops soc_ops;
+
+int soc_get_exception(void)
+{
+	if (!soc_ops.get_exception)
+		return -1;
+	return soc_ops.get_exception();
+}
+
+void soc_assert_event(unsigned int evt)
+{
+	if (soc_ops.assert_event)
+		soc_ops.assert_event(evt);
+}
+
+static u8 cmdline_mac[6];
+
+static int __init get_mac_addr_from_cmdline(char *str)
+{
+	int count, i, val;
+
+	for (count = 0; count < 6 && *str; count++, str += 3) {
+		if (!isxdigit(str[0]) || !isxdigit(str[1]))
+			return 0;
+		if (str[2] != ((count < 5) ? ':' : '\0'))
+			return 0;
+
+		for (i = 0, val = 0; i < 2; i++) {
+			val = val << 4;
+			val |= isdigit(str[i]) ?
+				str[i] - '0' : toupper(str[i]) - 'A' + 10;
+		}
+		cmdline_mac[count] = val;
+	}
+	return 1;
+}
+__setup("emac_addr=", get_mac_addr_from_cmdline);
+
+/*
+ * Setup the MAC address for SoC ethernet devices.
+ *
+ * Before calling this function, the ethernet driver will have
+ * initialized the addr with local-mac-address from the device
+ * tree (if found). Allow command line to override, but not
+ * the fused address.
+ */
+int soc_mac_addr(unsigned int index, u8 *addr)
+{
+	int i, have_dt_mac = 0, have_cmdline_mac = 0, have_fuse_mac = 0;
+
+	for (i = 0; i < 6; i++) {
+		if (cmdline_mac[i])
+			have_cmdline_mac = 1;
+		if (c6x_fuse_mac[i])
+			have_fuse_mac = 1;
+		if (addr[i])
+			have_dt_mac = 1;
+	}
+
+	/* cmdline overrides all */
+	if (have_cmdline_mac)
+		memcpy(addr, cmdline_mac, 6);
+	else if (!have_dt_mac) {
+		if (have_fuse_mac)
+			memcpy(addr, c6x_fuse_mac, 6);
+		else
+			eth_random_addr(addr);
+	}
+
+	/* adjust for specific EMAC device */
+	addr[5] += index * c6x_num_cores;
+	return 1;
+}
+EXPORT_SYMBOL_GPL(soc_mac_addr);
diff --git a/arch/c6x/kernel/switch_to.S b/arch/c6x/kernel/switch_to.S
new file mode 100644
index 0000000..09177ed
--- /dev/null
+++ b/arch/c6x/kernel/switch_to.S
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (C) 2011 Texas Instruments Incorporated
+ *  Author: Mark Salter (msalter@redhat.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+#define SP	B15
+
+	/*
+	 * void __switch_to(struct thread_info *prev,
+	 *      	    struct thread_info *next,
+	 *		    struct task_struct *tsk) ;
+	 */
+ENTRY(__switch_to)
+	LDDW	.D2T2	*+B4(THREAD_B15_14),B7:B6
+ ||	MV	.L2X	A4,B5	; prev
+ ||	MV	.L1X	B4,A5	; next
+ ||	MVC	.S2	RILC,B1
+
+	STW	.D2T2	B3,*+B5(THREAD_PC)
+ ||	STDW	.D1T1	A13:A12,*+A4(THREAD_A13_12)
+ ||	MVC	.S2	ILC,B0
+
+	LDW	.D2T2	*+B4(THREAD_PC),B3
+ ||	LDDW	.D1T1	*+A5(THREAD_A13_12),A13:A12
+
+	STDW	.D1T1	A11:A10,*+A4(THREAD_A11_10)
+ ||	STDW	.D2T2	B1:B0,*+B5(THREAD_RICL_ICL)
+#ifndef __DSBT__
+ ||	MVKL	.S2	current_ksp,B1
+#endif
+
+	STDW	.D2T2	B15:B14,*+B5(THREAD_B15_14)
+ ||	STDW	.D1T1	A15:A14,*+A4(THREAD_A15_14)
+#ifndef __DSBT__
+ ||	MVKH	.S2	current_ksp,B1
+#endif
+
+	;; Switch to next SP
+	MV	.S2	B7,SP
+#ifdef __DSBT__
+ ||	STW	.D2T2	B7,*+B14(current_ksp)
+#else
+ ||	STW	.D2T2	B7,*B1
+ ||	MV	.L2	B6,B14
+#endif
+ ||	LDDW	.D1T1	*+A5(THREAD_RICL_ICL),A1:A0
+
+	STDW	.D2T2	B11:B10,*+B5(THREAD_B11_10)
+ ||	LDDW	.D1T1	*+A5(THREAD_A15_14),A15:A14
+
+	STDW	.D2T2	B13:B12,*+B5(THREAD_B13_12)
+ ||	LDDW	.D1T1	*+A5(THREAD_A11_10),A11:A10
+
+	B	.S2	B3		; return in next E1
+ ||	LDDW	.D2T2	*+B4(THREAD_B13_12),B13:B12
+
+	LDDW	.D2T2	*+B4(THREAD_B11_10),B11:B10
+	NOP
+
+	MV	.L2X	A0,B0
+ ||	MV	.S1	A6,A4
+
+	MVC	.S2	B0,ILC
+ ||	MV	.L2X	A1,B1
+
+	MVC	.S2	B1,RILC
+ENDPROC(__switch_to)
diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c
new file mode 100644
index 0000000..a742ae2
--- /dev/null
+++ b/arch/c6x/kernel/sys_c6x.c
@@ -0,0 +1,74 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/syscalls.h>
+
+#ifdef CONFIG_ACCESS_CHECK
+int _access_ok(unsigned long addr, unsigned long size)
+{
+	if (!size)
+		return 1;
+
+	if (!addr || addr > (0xffffffffUL - (size - 1)))
+		goto _bad_access;
+
+	if (uaccess_kernel())
+		return 1;
+
+	if (memory_start <= addr && (addr + size - 1) < memory_end)
+		return 1;
+
+_bad_access:
+	pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
+		 current->pid, addr, size);
+	return 0;
+}
+EXPORT_SYMBOL(_access_ok);
+#endif
+
+/* sys_cache_sync -- sync caches over given range */
+asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
+{
+	L1D_cache_block_writeback_invalidate(s, e);
+	L1P_cache_block_invalidate(s, e);
+
+	return 0;
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/*
+ * Use trampolines
+ */
+#define sys_pread64		sys_pread_c6x
+#define sys_pwrite64		sys_pwrite_c6x
+#define sys_truncate64		sys_truncate64_c6x
+#define sys_ftruncate64		sys_ftruncate64_c6x
+#define sys_fadvise64		sys_fadvise64_c6x
+#define sys_fadvise64_64	sys_fadvise64_64_c6x
+#define sys_fallocate		sys_fallocate_c6x
+
+/* Use sys_mmap_pgoff directly */
+#define sys_mmap2 sys_mmap_pgoff
+
+/*
+ * Note that we can't include <linux/unistd.h> here since the header
+ * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
+ */
+void *sys_call_table[__NR_syscalls] = {
+	[0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
new file mode 100644
index 0000000..6a8e00a
--- /dev/null
+++ b/arch/c6x/kernel/time.c
@@ -0,0 +1,66 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+#include <asm/special_insns.h>
+#include <asm/timer64.h>
+
+static u32 sched_clock_multiplier;
+#define SCHED_CLOCK_SHIFT 16
+
+static u64 tsc_read(struct clocksource *cs)
+{
+	return get_cycles();
+}
+
+static struct clocksource clocksource_tsc = {
+	.name		= "timestamp",
+	.rating		= 300,
+	.read		= tsc_read,
+	.mask		= CLOCKSOURCE_MASK(64),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * scheduler clock - returns current time in nanoseconds.
+ */
+u64 sched_clock(void)
+{
+	u64 tsc = get_cycles();
+
+	return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+}
+
+void __init time_init(void)
+{
+	u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+	do_div(tmp, c6x_core_freq);
+	sched_clock_multiplier = tmp;
+
+	clocksource_register_hz(&clocksource_tsc, c6x_core_freq);
+
+	/* write anything into TSCL to enable counting */
+	set_creg(TSCL, 0);
+
+	/* probe for timer64 event timer */
+	timer64_init();
+}
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
new file mode 100644
index 0000000..5c60aea
--- /dev/null
+++ b/arch/c6x/kernel/traps.c
@@ -0,0 +1,410 @@
+/*
+ *  Port on Texas Instruments TMS320C6x architecture
+ *
+ *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/sched/debug.h>
+#include <linux/bug.h>
+
+#include <asm/soc.h>
+#include <asm/special_insns.h>
+#include <asm/traps.h>
+
+int (*c6x_nmi_handler)(struct pt_regs *regs);
+
+void __init trap_init(void)
+{
+	ack_exception(EXCEPT_TYPE_NXF);
+	ack_exception(EXCEPT_TYPE_EXC);
+	ack_exception(EXCEPT_TYPE_IXF);
+	ack_exception(EXCEPT_TYPE_SXF);
+	enable_exception();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	pr_err("\n");
+	show_regs_print_info(KERN_ERR);
+	pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
+	pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
+	pr_err("A0: %08lx  B0: %08lx\n", regs->a0, regs->b0);
+	pr_err("A1: %08lx  B1: %08lx\n", regs->a1, regs->b1);
+	pr_err("A2: %08lx  B2: %08lx\n", regs->a2, regs->b2);
+	pr_err("A3: %08lx  B3: %08lx\n", regs->a3, regs->b3);
+	pr_err("A4: %08lx  B4: %08lx\n", regs->a4, regs->b4);
+	pr_err("A5: %08lx  B5: %08lx\n", regs->a5, regs->b5);
+	pr_err("A6: %08lx  B6: %08lx\n", regs->a6, regs->b6);
+	pr_err("A7: %08lx  B7: %08lx\n", regs->a7, regs->b7);
+	pr_err("A8: %08lx  B8: %08lx\n", regs->a8, regs->b8);
+	pr_err("A9: %08lx  B9: %08lx\n", regs->a9, regs->b9);
+	pr_err("A10: %08lx  B10: %08lx\n", regs->a10, regs->b10);
+	pr_err("A11: %08lx  B11: %08lx\n", regs->a11, regs->b11);
+	pr_err("A12: %08lx  B12: %08lx\n", regs->a12, regs->b12);
+	pr_err("A13: %08lx  B13: %08lx\n", regs->a13, regs->b13);
+	pr_err("A14: %08lx  B14: %08lx\n", regs->a14, regs->dp);
+	pr_err("A15: %08lx  B15: %08lx\n", regs->a15, regs->sp);
+	pr_err("A16: %08lx  B16: %08lx\n", regs->a16, regs->b16);
+	pr_err("A17: %08lx  B17: %08lx\n", regs->a17, regs->b17);
+	pr_err("A18: %08lx  B18: %08lx\n", regs->a18, regs->b18);
+	pr_err("A19: %08lx  B19: %08lx\n", regs->a19, regs->b19);
+	pr_err("A20: %08lx  B20: %08lx\n", regs->a20, regs->b20);
+	pr_err("A21: %08lx  B21: %08lx\n", regs->a21, regs->b21);
+	pr_err("A22: %08lx  B22: %08lx\n", regs->a22, regs->b22);
+	pr_err("A23: %08lx  B23: %08lx\n", regs->a23, regs->b23);
+	pr_err("A24: %08lx  B24: %08lx\n", regs->a24, regs->b24);
+	pr_err("A25: %08lx  B25: %08lx\n", regs->a25, regs->b25);
+	pr_err("A26: %08lx  B26: %08lx\n", regs->a26, regs->b26);
+	pr_err("A27: %08lx  B27: %08lx\n", regs->a27, regs->b27);
+	pr_err("A28: %08lx  B28: %08lx\n", regs->a28, regs->b28);
+	pr_err("A29: %08lx  B29: %08lx\n", regs->a29, regs->b29);
+	pr_err("A30: %08lx  B30: %08lx\n", regs->a30, regs->b30);
+	pr_err("A31: %08lx  B31: %08lx\n", regs->a31, regs->b31);
+}
+
+void die(char *str, struct pt_regs *fp, int nr)
+{
+	console_verbose();
+	pr_err("%s: %08x\n", str, nr);
+	show_regs(fp);
+
+	pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
+	       current->comm, current->pid, (PAGE_SIZE +
+					     (unsigned long) current));
+
+	dump_stack();
+	while (1)
+		;
+}
+
+static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
+{
+	if (user_mode(fp))
+		return;
+
+	die(str, fp, nr);
+}
+
+
+/* Internal exceptions */
+static struct exception_info iexcept_table[10] = {
+	{ "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
+	{ "Oops - fetch packet", SIGBUS, BUS_ADRERR },
+	{ "Oops - execute packet", SIGILL, ILL_ILLOPC },
+	{ "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
+	{ "Oops - resource conflict", SIGILL, ILL_ILLOPC },
+	{ "Oops - resource access", SIGILL, ILL_PRVREG },
+	{ "Oops - privilege", SIGILL, ILL_PRVOPC },
+	{ "Oops - loops buffer", SIGILL, ILL_ILLOPC },
+	{ "Oops - software exception", SIGILL, ILL_ILLTRP },
+	{ "Oops - unknown exception", SIGILL, ILL_ILLOPC }
+};
+
+/* External exceptions */
+static struct exception_info eexcept_table[128] = {
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - external exception", SIGBUS, BUS_ADRERR },
+	{ "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+	{ "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
+};
+
+static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
+{
+	unsigned long addr = instruction_pointer(regs);
+
+	if (except_info->code != TRAP_BRKPT)
+		pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
+		       except_info->kernel_str, regs->pc,
+		       except_info->signo, except_info->code);
+
+	die_if_kernel(except_info->kernel_str, regs, addr);
+
+	force_sig_fault(except_info->signo, except_info->code,
+			(void __user *)addr, current);
+}
+
+/*
+ * Process an internal exception (non maskable)
+ */
+static int process_iexcept(struct pt_regs *regs)
+{
+	unsigned int iexcept_report = get_iexcept();
+	unsigned int iexcept_num;
+
+	ack_exception(EXCEPT_TYPE_IXF);
+
+	pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
+
+	while (iexcept_report) {
+		iexcept_num = __ffs(iexcept_report);
+		iexcept_report &= ~(1 << iexcept_num);
+		set_iexcept(iexcept_report);
+		if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
+			/* This is a breakpoint */
+			struct exception_info bkpt_exception = {
+				"Oops - undefined instruction",
+				  SIGTRAP, TRAP_BRKPT
+			};
+			do_trap(&bkpt_exception, regs);
+			iexcept_report &= ~(0xFF);
+			set_iexcept(iexcept_report);
+			continue;
+		}
+
+		do_trap(&iexcept_table[iexcept_num], regs);
+	}
+	return 0;
+}
+
+/*
+ * Process an external exception (maskable)
+ */
+static void process_eexcept(struct pt_regs *regs)
+{
+	int evt;
+
+	pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
+
+	while ((evt = soc_get_exception()) >= 0)
+		do_trap(&eexcept_table[evt], regs);
+
+	ack_exception(EXCEPT_TYPE_EXC);
+}
+
+/*
+ * Main exception processing
+ */
+asmlinkage int process_exception(struct pt_regs *regs)
+{
+	unsigned int type;
+	unsigned int type_num;
+	unsigned int ie_num = 9; /* default is unknown exception */
+
+	while ((type = get_except_type()) != 0) {
+		type_num = fls(type) - 1;
+
+		switch (type_num) {
+		case EXCEPT_TYPE_NXF:
+			ack_exception(EXCEPT_TYPE_NXF);
+			if (c6x_nmi_handler)
+				(c6x_nmi_handler)(regs);
+			else
+				pr_alert("NMI interrupt!\n");
+			break;
+
+		case EXCEPT_TYPE_IXF:
+			if (process_iexcept(regs))
+				return 1;
+			break;
+
+		case EXCEPT_TYPE_EXC:
+			process_eexcept(regs);
+			break;
+
+		case EXCEPT_TYPE_SXF:
+			ie_num = 8;
+		default:
+			ack_exception(type_num);
+			do_trap(&iexcept_table[ie_num], regs);
+			break;
+		}
+	}
+	return 0;
+}
+
+static int kstack_depth_to_print = 48;
+
+static void show_trace(unsigned long *stack, unsigned long *endstack)
+{
+	unsigned long addr;
+	int i;
+
+	pr_debug("Call trace:");
+	i = 0;
+	while (stack + 1 <= endstack) {
+		addr = *stack++;
+		/*
+		 * If the address is either in the text segment of the
+		 * kernel, or in the region which contains vmalloc'ed
+		 * memory, it *may* be the address of a calling
+		 * routine; if so, print it so that someone tracing
+		 * down the cause of the crash will be able to figure
+		 * out the call path that was taken.
+		 */
+		if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+			if (i % 5 == 0)
+				pr_debug("\n	    ");
+#endif
+			pr_debug(" [<%08lx>] %pS\n", addr, (void *)addr);
+			i++;
+		}
+	}
+	pr_debug("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+	unsigned long *p, *endstack;
+	int i;
+
+	if (!stack) {
+		if (task && task != current)
+			/* We know this is a kernel stack,
+			   so this is the start/end */
+			stack = (unsigned long *)thread_saved_ksp(task);
+		else
+			stack = (unsigned long *)&stack;
+	}
+	endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
+				     & -THREAD_SIZE);
+
+	pr_debug("Stack from %08lx:", (unsigned long)stack);
+	for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
+		if (p + 1 > endstack)
+			break;
+		if (i % 8 == 0)
+			pr_cont("\n	    ");
+		pr_cont(" %08lx", *p++);
+	}
+	pr_cont("\n");
+	show_trace(stack, endstack);
+}
+
+int is_valid_bugaddr(unsigned long addr)
+{
+	return __kernel_text_address(addr);
+}
diff --git a/arch/c6x/kernel/vectors.S b/arch/c6x/kernel/vectors.S
new file mode 100644
index 0000000..c95c66f
--- /dev/null
+++ b/arch/c6x/kernel/vectors.S
@@ -0,0 +1,81 @@
+;
+;  Port on Texas Instruments TMS320C6x architecture
+;
+;  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+;  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+;  This program is free software; you can redistribute it and/or modify
+;  it under the terms of the GNU General Public License version 2 as
+;  published by the Free Software Foundation.
+;
+;  This section handles all the interrupt vector routines.
+;  At RESET the processor sets up the DRAM timing parameters and
+;  branches to the label _c_int00 which handles initialization for the C code.
+;
+
+#define ALIGNMENT 5
+
+	.macro IRQVEC name, handler
+	.align ALIGNMENT
+	.hidden \name
+	.global \name
+\name:
+#ifdef CONFIG_C6X_BIG_KERNEL
+	STW	.D2T1	A0,*B15--[2]
+ ||	MVKL	.S1	\handler,A0
+	MVKH	.S1	\handler,A0
+	B	.S2X	A0
+	LDW	.D2T1	*++B15[2],A0
+	NOP	4
+	NOP
+	NOP
+	.endm
+#else /* CONFIG_C6X_BIG_KERNEL */
+	B	.S2	\handler
+	NOP
+	NOP
+	NOP
+	NOP
+	NOP
+	NOP
+	NOP
+	.endm
+#endif /* CONFIG_C6X_BIG_KERNEL */
+
+	   .sect ".vectors","ax"
+	   .align ALIGNMENT
+	   .global RESET
+	   .hidden RESET
+RESET:
+#ifdef CONFIG_C6X_BIG_KERNEL
+	   MVKL	.S1	_c_int00,A0		; branch to _c_int00
+	   MVKH	.S1	_c_int00,A0
+	   B	.S2X	A0
+#else
+	   B	.S2	_c_int00
+	   NOP
+	   NOP
+#endif
+	   NOP
+	   NOP
+	   NOP
+	   NOP
+	   NOP
+
+
+	   IRQVEC NMI,_nmi_handler		; NMI interrupt
+	   IRQVEC AINT,_bad_interrupt		; reserved
+	   IRQVEC MSGINT,_bad_interrupt		; reserved
+
+	   IRQVEC INT4,_int4_handler
+	   IRQVEC INT5,_int5_handler
+	   IRQVEC INT6,_int6_handler
+	   IRQVEC INT7,_int7_handler
+	   IRQVEC INT8,_int8_handler
+	   IRQVEC INT9,_int9_handler
+	   IRQVEC INT10,_int10_handler
+	   IRQVEC INT11,_int11_handler
+	   IRQVEC INT12,_int12_handler
+	   IRQVEC INT13,_int13_handler
+	   IRQVEC INT14,_int14_handler
+	   IRQVEC INT15,_int15_handler
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..1fba5b4
--- /dev/null
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ld script for the c6x kernel
+ *
+ *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ *  Mark Salter <msalter@redhat.com>
+ */
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+ENTRY(_c_int00)
+
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+#define	READONLY_SEGMENT_START	\
+	. = PAGE_OFFSET;
+#define	READWRITE_SEGMENT_START	\
+	. = ALIGN(128);		\
+	_data_lma = .;
+
+SECTIONS
+{
+	/*
+	 * Start kernel read only segment
+	 */
+	READONLY_SEGMENT_START
+
+	.vectors :
+	{
+		_vectors_start = .;
+		*(.vectors)
+		. = ALIGN(0x400);
+		_vectors_end = .;
+	}
+
+	/*
+	 * This section contains data which may be shared with other
+	 * cores. It needs to be a fixed offset from PAGE_OFFSET
+	 * regardless of kernel configuration.
+	 */
+	.virtio_ipc_dev :
+	{
+		*(.virtio_ipc_dev)
+	}
+
+	. = ALIGN(PAGE_SIZE);
+	__init_begin = .;
+	.init :
+	{
+		_sinittext = .;
+		HEAD_TEXT
+		INIT_TEXT
+		_einittext = .;
+	}
+
+	INIT_DATA_SECTION(16)
+
+	PERCPU_SECTION(128)
+
+	. = ALIGN(PAGE_SIZE);
+	__init_end = .;
+
+	.text :
+	{
+		_text = .;
+		_stext = .;
+		TEXT_TEXT
+		SCHED_TEXT
+		CPUIDLE_TEXT
+		LOCK_TEXT
+		IRQENTRY_TEXT
+		SOFTIRQENTRY_TEXT
+		KPROBES_TEXT
+		*(.fixup)
+		*(.gnu.warning)
+	}
+
+	EXCEPTION_TABLE(16)
+	NOTES
+
+	RO_DATA_SECTION(PAGE_SIZE)
+	.const :
+	{
+		*(.const .const.* .gnu.linkonce.r.*)
+		*(.switch)
+	}
+
+	. = ALIGN (8) ;
+	__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
+	{
+		_fdt_start = . ;	/* place for fdt blob */
+		*(__fdt_blob) ;		/* Any link-placed DTB */
+		BYTE(0);		/* section always has contents */
+	        . = _fdt_start + 0x4000;	/* Pad up to 16kbyte */
+		_fdt_end = . ;
+	}
+
+	_etext = .;
+
+	/*
+	 * Start kernel read-write segment.
+	 */
+	READWRITE_SEGMENT_START
+	_sdata = .;
+
+	.fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
+	{
+		INIT_TASK_DATA(THREAD_SIZE)
+		NOSAVE_DATA
+		PAGE_ALIGNED_DATA(PAGE_SIZE)
+		CACHELINE_ALIGNED_DATA(128)
+		READ_MOSTLY_DATA(128)
+		DATA_DATA
+		CONSTRUCTORS
+		*(.data1)
+		*(.fardata .fardata.*)
+		*(.data.debug_bpt)
+	}
+
+	.neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
+	{
+		*(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
+		*(.neardata .neardata.* .gnu.linkonce.s.*)
+		. = ALIGN(8);
+	}
+
+	BUG_TABLE
+
+	_edata = .;
+
+	__bss_start = .;
+	SBSS(8)
+	BSS(8)
+	.far :
+	{
+		. = ALIGN(8);
+		*(.dynfar)
+		*(.far .far.* .gnu.linkonce.b.*)
+		. = ALIGN(8);
+	}
+	__bss_stop = .;
+
+	_end = .;
+
+	DWARF_DEBUG
+
+	/DISCARD/ :
+	{
+		  EXIT_TEXT
+		  EXIT_DATA
+		  EXIT_CALL
+		  *(.discard)
+		  *(.discard.*)
+		  *(.interp)
+	}
+}