Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/nds32/kernel/.gitignore b/arch/nds32/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/nds32/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile
index 27cded3..394df3f 100644
--- a/arch/nds32/kernel/Makefile
+++ b/arch/nds32/kernel/Makefile
@@ -1,10 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Makefile for the linux kernel.
 #
 
 CPPFLAGS_vmlinux.lds	:= -DTEXTADDR=$(TEXTADDR)
 AFLAGS_head.o		:= -DTEXTADDR=$(TEXTADDR)
-
 # Object file lists.
 
 obj-y			:= ex-entry.o ex-exit.o ex-scall.o irq.o \
@@ -14,11 +14,15 @@
 
 obj-$(CONFIG_MODULES)		+= nds32_ksyms.o module.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+obj-$(CONFIG_FPU)		+= fpu.o
 obj-$(CONFIG_OF)		+= devtree.o
 obj-$(CONFIG_CACHE_L2)		+= atl2c.o
-
+obj-$(CONFIG_PERF_EVENTS) += perf_event_cpu.o
+obj-$(CONFIG_PM)		+= pm.o sleep.o
 extra-y := head.o vmlinux.lds
 
+CFLAGS_fpu.o += -mext-fpu-sp -mext-fpu-dp
+
 
 obj-y				+= vdso/
 
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
index 0a7bc69..aab98e4 100644
--- a/arch/nds32/kernel/cacheinfo.c
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -13,7 +13,7 @@
 	this_leaf->level = level;
 	this_leaf->type = type;
 	this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
-	this_leaf->number_of_sets = CACHE_SET(cache_type);;
+	this_leaf->number_of_sets = CACHE_SET(cache_type);
 	this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
 	this_leaf->size = this_leaf->number_of_sets *
 	    this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index d0dbd4f..4206d4b 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -3,327 +3,13 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/string.h>
 #include <linux/dma-noncoherent.h>
-#include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/highmem.h>
-#include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/proc-fns.h>
 
-/*
- * This is the page table (2MB) covering uncached, DMA consistent allocations
- */
-static pte_t *consistent_pte;
-static DEFINE_RAW_SPINLOCK(consistent_lock);
-
-/*
- * VM region handling support.
- *
- * This should become something generic, handling VM region allocations for
- * vmalloc and similar (ioremap, module space, etc).
- *
- * I envisage vmalloc()'s supporting vm_struct becoming:
- *
- *  struct vm_struct {
- *    struct vm_region	region;
- *    unsigned long	flags;
- *    struct page	**pages;
- *    unsigned int	nr_pages;
- *    unsigned long	phys_addr;
- *  };
- *
- * get_vm_area() would then call vm_region_alloc with an appropriate
- * struct vm_region head (eg):
- *
- *  struct vm_region vmalloc_head = {
- *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
- *	.vm_start	= VMALLOC_START,
- *	.vm_end		= VMALLOC_END,
- *  };
- *
- * However, vmalloc_head.vm_start is variable (typically, it is dependent on
- * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
- * would have to initialise this each time prior to calling vm_region_alloc().
- */
-struct arch_vm_region {
-	struct list_head vm_list;
-	unsigned long vm_start;
-	unsigned long vm_end;
-	struct page *vm_pages;
-};
-
-static struct arch_vm_region consistent_head = {
-	.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
-	.vm_start = CONSISTENT_BASE,
-	.vm_end = CONSISTENT_END,
-};
-
-static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head,
-					      size_t size, int gfp)
-{
-	unsigned long addr = head->vm_start, end = head->vm_end - size;
-	unsigned long flags;
-	struct arch_vm_region *c, *new;
-
-	new = kmalloc(sizeof(struct arch_vm_region), gfp);
-	if (!new)
-		goto out;
-
-	raw_spin_lock_irqsave(&consistent_lock, flags);
-
-	list_for_each_entry(c, &head->vm_list, vm_list) {
-		if ((addr + size) < addr)
-			goto nospc;
-		if ((addr + size) <= c->vm_start)
-			goto found;
-		addr = c->vm_end;
-		if (addr > end)
-			goto nospc;
-	}
-
-found:
-	/*
-	 * Insert this entry _before_ the one we found.
-	 */
-	list_add_tail(&new->vm_list, &c->vm_list);
-	new->vm_start = addr;
-	new->vm_end = addr + size;
-
-	raw_spin_unlock_irqrestore(&consistent_lock, flags);
-	return new;
-
-nospc:
-	raw_spin_unlock_irqrestore(&consistent_lock, flags);
-	kfree(new);
-out:
-	return NULL;
-}
-
-static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
-					     unsigned long addr)
-{
-	struct arch_vm_region *c;
-
-	list_for_each_entry(c, &head->vm_list, vm_list) {
-		if (c->vm_start == addr)
-			goto out;
-	}
-	c = NULL;
-out:
-	return c;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-		gfp_t gfp, unsigned long attrs)
-{
-	struct page *page;
-	struct arch_vm_region *c;
-	unsigned long order;
-	u64 mask = ~0ULL, limit;
-	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
-
-	if (!consistent_pte) {
-		pr_err("%s: not initialized\n", __func__);
-		dump_stack();
-		return NULL;
-	}
-
-	if (dev) {
-		mask = dev->coherent_dma_mask;
-
-		/*
-		 * Sanity check the DMA mask - it must be non-zero, and
-		 * must be able to be satisfied by a DMA allocation.
-		 */
-		if (mask == 0) {
-			dev_warn(dev, "coherent DMA mask is unset\n");
-			goto no_page;
-		}
-
-	}
-
-	/*
-	 * Sanity check the allocation size.
-	 */
-	size = PAGE_ALIGN(size);
-	limit = (mask + 1) & ~mask;
-	if ((limit && size >= limit) ||
-	    size >= (CONSISTENT_END - CONSISTENT_BASE)) {
-		pr_warn("coherent allocation too big "
-			"(requested %#x mask %#llx)\n", size, mask);
-		goto no_page;
-	}
-
-	order = get_order(size);
-
-	if (mask != 0xffffffff)
-		gfp |= GFP_DMA;
-
-	page = alloc_pages(gfp, order);
-	if (!page)
-		goto no_page;
-
-	/*
-	 * Invalidate any data that might be lurking in the
-	 * kernel direct-mapped region for device DMA.
-	 */
-	{
-		unsigned long kaddr = (unsigned long)page_address(page);
-		memset(page_address(page), 0, size);
-		cpu_dma_wbinval_range(kaddr, kaddr + size);
-	}
-
-	/*
-	 * Allocate a virtual address in the consistent mapping region.
-	 */
-	c = vm_region_alloc(&consistent_head, size,
-			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
-	if (c) {
-		pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
-		struct page *end = page + (1 << order);
-
-		c->vm_pages = page;
-
-		/*
-		 * Set the "dma handle"
-		 */
-		*handle = page_to_phys(page);
-
-		do {
-			BUG_ON(!pte_none(*pte));
-
-			/*
-			 * x86 does not mark the pages reserved...
-			 */
-			SetPageReserved(page);
-			set_pte(pte, mk_pte(page, prot));
-			page++;
-			pte++;
-		} while (size -= PAGE_SIZE);
-
-		/*
-		 * Free the otherwise unused pages.
-		 */
-		while (page < end) {
-			__free_page(page);
-			page++;
-		}
-
-		return (void *)c->vm_start;
-	}
-
-	if (page)
-		__free_pages(page, order);
-no_page:
-	*handle = ~0;
-	return NULL;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
-		dma_addr_t handle, unsigned long attrs)
-{
-	struct arch_vm_region *c;
-	unsigned long flags, addr;
-	pte_t *ptep;
-
-	size = PAGE_ALIGN(size);
-
-	raw_spin_lock_irqsave(&consistent_lock, flags);
-
-	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
-	if (!c)
-		goto no_area;
-
-	if ((c->vm_end - c->vm_start) != size) {
-		pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
-		       __func__, c->vm_end - c->vm_start, size);
-		dump_stack();
-		size = c->vm_end - c->vm_start;
-	}
-
-	ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
-	addr = c->vm_start;
-	do {
-		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
-		unsigned long pfn;
-
-		ptep++;
-		addr += PAGE_SIZE;
-
-		if (!pte_none(pte) && pte_present(pte)) {
-			pfn = pte_pfn(pte);
-
-			if (pfn_valid(pfn)) {
-				struct page *page = pfn_to_page(pfn);
-
-				/*
-				 * x86 does not mark the pages reserved...
-				 */
-				ClearPageReserved(page);
-
-				__free_page(page);
-				continue;
-			}
-		}
-
-		pr_crit("%s: bad page in kernel page table\n", __func__);
-	} while (size -= PAGE_SIZE);
-
-	flush_tlb_kernel_range(c->vm_start, c->vm_end);
-
-	list_del(&c->vm_list);
-
-	raw_spin_unlock_irqrestore(&consistent_lock, flags);
-
-	kfree(c);
-	return;
-
-no_area:
-	raw_spin_unlock_irqrestore(&consistent_lock, flags);
-	pr_err("%s: trying to free invalid coherent area: %p\n",
-	       __func__, cpu_addr);
-	dump_stack();
-}
-
-/*
- * Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void)
-{
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *pte;
-	int ret = 0;
-
-	do {
-		pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
-		pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
-		if (!pmd) {
-			pr_err("%s: no pmd tables\n", __func__);
-			ret = -ENOMEM;
-			break;
-		}
-		/* The first level mapping may be created in somewhere.
-		 * It's not necessary to warn here. */
-		/* WARN_ON(!pmd_none(*pmd)); */
-
-		pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
-		if (!pte) {
-			ret = -ENOMEM;
-			break;
-		}
-
-		consistent_pte = pte;
-	} while (0);
-
-	return ret;
-}
-
-core_initcall(consistent_init);
-
 static inline void cache_op(phys_addr_t paddr, size_t size,
 		void (*fn)(unsigned long start, unsigned long end))
 {
@@ -389,3 +75,8 @@
 		BUG();
 	}
 }
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+	cache_op(page_to_phys(page), size, cpu_dma_wbinval_range);
+}
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
index 21a1440..107d98a 100644
--- a/arch/nds32/kernel/ex-entry.S
+++ b/arch/nds32/kernel/ex-entry.S
@@ -7,6 +7,7 @@
 #include <asm/errno.h>
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/fpu.h>
 
 #ifdef CONFIG_HWZOL
 	.macro push_zol
@@ -15,12 +16,31 @@
 	mfusr	$r16, $LC
 	.endm
 #endif
+	.macro  skip_save_fucop_ctl
+#if defined(CONFIG_FPU)
+skip_fucop_ctl:
+	smw.adm $p0, [$sp], $p0, #0x1
+	j fucop_ctl_done
+#endif
+	.endm
 
 	.macro	save_user_regs
-
+#if defined(CONFIG_FPU)
+	sethi   $p0, hi20(has_fpu)
+	lbsi 	$p0, [$p0+lo12(has_fpu)]
+	beqz	$p0, skip_fucop_ctl
+	mfsr    $p0, $FUCOP_CTL
+	smw.adm $p0, [$sp], $p0, #0x1
+	bclr    $p0, $p0, #FUCOP_CTL_offCP0EN
+	mtsr    $p0, $FUCOP_CTL
+fucop_ctl_done:
+	/* move $SP to the bottom of pt_regs */
+	addi    $sp, $sp, -FUCOP_CTL_OFFSET
+#else
 	smw.adm $sp, [$sp], $sp, #0x1
 	/* move $SP to the bottom of pt_regs */
 	addi    $sp, $sp, -OSP_OFFSET
+#endif
 
 	/* push $r0 ~ $r25 */
 	smw.bim $r0, [$sp], $r25
@@ -79,6 +99,7 @@
 	.long	eh_syscall		!Syscall
 	.long	asm_do_IRQ		!IRQ
 
+	skip_save_fucop_ctl
 common_exception_handler:
 	save_user_regs
 	mfsr	$p0, $ITYPE
@@ -103,7 +124,6 @@
 	mtsr	$r21, $PSW
 	dsb
 	jr	$p1
-
 	/* syscall */
 1:
 	addi	$p1, $p0, #-NDS32_VECTOR_offEXCEPTION
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
index f00af92..1df02a7 100644
--- a/arch/nds32/kernel/ex-exit.S
+++ b/arch/nds32/kernel/ex-exit.S
@@ -8,6 +8,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/current.h>
+#include <asm/fpu.h>
 
 
 
@@ -22,10 +23,18 @@
 	.macro	restore_user_regs_first
 	setgie.d
 	isb
-
+#if defined(CONFIG_FPU)
+	addi    $sp, $sp, OSP_OFFSET
+	lmw.adm $r12, [$sp], $r25, #0x0
+	sethi   $p0, hi20(has_fpu)
+	lbsi 	$p0, [$p0+lo12(has_fpu)]
+	beqz	$p0, 2f
+	mtsr    $r25, $FUCOP_CTL
+2:
+#else
 	addi	$sp, $sp, FUCOP_CTL_OFFSET
-
 	lmw.adm $r12, [$sp], $r24, #0x0
+#endif
 	mtsr	$r12, $SP_USR
 	mtsr	$r13, $IPC
 #ifdef CONFIG_HWZOL
@@ -154,7 +163,7 @@
 	gie_disable
 	lwi	$t0, [tsk+#TSK_TI_PREEMPT]
 	bnez	$t0, no_work_pending
-need_resched:
+
 	lwi	$t0, [tsk+#TSK_TI_FLAGS]
 	andi	$p1, $t0, #_TIF_NEED_RESCHED
 	beqz	$p1, no_work_pending
@@ -164,7 +173,7 @@
 	beqz	$t0, no_work_pending
 
 	jal	preempt_schedule_irq
-	b	need_resched
+	b	no_work_pending
 #endif
 
 /*
diff --git a/arch/nds32/kernel/ex-scall.S b/arch/nds32/kernel/ex-scall.S
index 36aa87e..270050f 100644
--- a/arch/nds32/kernel/ex-scall.S
+++ b/arch/nds32/kernel/ex-scall.S
@@ -19,11 +19,13 @@
 
 	la	$p0, __entry_task
 	sw	$r1, [$p0]
-	move	$p1, $r0
-	addi	$p1, $p1, #THREAD_CPU_CONTEXT
+	addi	$p1, $r0, #THREAD_CPU_CONTEXT
 	smw.bi 	$r6, [$p1], $r14, #0xb		! push r6~r14, fp, lp, sp
 	move	$r25, $r1
-	addi	$r1, $r1, #THREAD_CPU_CONTEXT
+#if defined(CONFIG_FPU)
+	call	_switch_fpu
+#endif
+	addi	$r1, $r25, #THREAD_CPU_CONTEXT
 	lmw.bi 	$r6, [$r1], $r14, #0xb		! pop r6~r14, fp, lp, sp
 	ret
 
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
new file mode 100644
index 0000000..62bdafb
--- /dev/null
+++ b/arch/nds32/kernel/fpu.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2018 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/sched/signal.h>
+#include <asm/processor.h>
+#include <asm/user.h>
+#include <asm/io.h>
+#include <asm/bitfield.h>
+#include <asm/fpu.h>
+
+const struct fpu_struct init_fpuregs = {
+	.fd_regs = {[0 ... 31] = sNAN64},
+	.fpcsr = FPCSR_INIT,
+#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
+	.UDF_IEX_trap = 0
+#endif
+};
+
+void save_fpu(struct task_struct *tsk)
+{
+	unsigned int fpcfg, fpcsr;
+
+	enable_fpu();
+	fpcfg = ((__nds32__fmfcfg() & FPCFG_mskFREG) >> FPCFG_offFREG);
+	switch (fpcfg) {
+	case SP32_DP32_reg:
+		asm volatile ("fsdi $fd31, [%0+0xf8]\n\t"
+			      "fsdi $fd30, [%0+0xf0]\n\t"
+			      "fsdi $fd29, [%0+0xe8]\n\t"
+			      "fsdi $fd28, [%0+0xe0]\n\t"
+			      "fsdi $fd27, [%0+0xd8]\n\t"
+			      "fsdi $fd26, [%0+0xd0]\n\t"
+			      "fsdi $fd25, [%0+0xc8]\n\t"
+			      "fsdi $fd24, [%0+0xc0]\n\t"
+			      "fsdi $fd23, [%0+0xb8]\n\t"
+			      "fsdi $fd22, [%0+0xb0]\n\t"
+			      "fsdi $fd21, [%0+0xa8]\n\t"
+			      "fsdi $fd20, [%0+0xa0]\n\t"
+			      "fsdi $fd19, [%0+0x98]\n\t"
+			      "fsdi $fd18, [%0+0x90]\n\t"
+			      "fsdi $fd17, [%0+0x88]\n\t"
+			      "fsdi $fd16, [%0+0x80]\n\t"
+			      :	/* no output */
+			      : "r" (&tsk->thread.fpu)
+			      : "memory");
+		/* fall through */
+	case SP32_DP16_reg:
+		asm volatile ("fsdi $fd15, [%0+0x78]\n\t"
+			      "fsdi $fd14, [%0+0x70]\n\t"
+			      "fsdi $fd13, [%0+0x68]\n\t"
+			      "fsdi $fd12, [%0+0x60]\n\t"
+			      "fsdi $fd11, [%0+0x58]\n\t"
+			      "fsdi $fd10, [%0+0x50]\n\t"
+			      "fsdi $fd9,  [%0+0x48]\n\t"
+			      "fsdi $fd8,  [%0+0x40]\n\t"
+			      :	/* no output */
+			      : "r" (&tsk->thread.fpu)
+			      : "memory");
+		/* fall through */
+	case SP16_DP8_reg:
+		asm volatile ("fsdi $fd7,  [%0+0x38]\n\t"
+			      "fsdi $fd6,  [%0+0x30]\n\t"
+			      "fsdi $fd5,  [%0+0x28]\n\t"
+			      "fsdi $fd4,  [%0+0x20]\n\t"
+			      :	/* no output */
+			      : "r" (&tsk->thread.fpu)
+			      : "memory");
+		/* fall through */
+	case SP8_DP4_reg:
+		asm volatile ("fsdi $fd3,  [%1+0x18]\n\t"
+			      "fsdi $fd2,  [%1+0x10]\n\t"
+			      "fsdi $fd1,  [%1+0x8]\n\t"
+			      "fsdi $fd0,  [%1+0x0]\n\t"
+			      "fmfcsr	%0\n\t"
+			      "swi  %0, [%1+0x100]\n\t"
+			      : "=&r" (fpcsr)
+			      : "r"(&tsk->thread.fpu)
+			      : "memory");
+	}
+	disable_fpu();
+}
+
+void load_fpu(const struct fpu_struct *fpregs)
+{
+	unsigned int fpcfg, fpcsr;
+
+	enable_fpu();
+	fpcfg = ((__nds32__fmfcfg() & FPCFG_mskFREG) >> FPCFG_offFREG);
+	switch (fpcfg) {
+	case SP32_DP32_reg:
+		asm volatile ("fldi $fd31, [%0+0xf8]\n\t"
+			      "fldi $fd30, [%0+0xf0]\n\t"
+			      "fldi $fd29, [%0+0xe8]\n\t"
+			      "fldi $fd28, [%0+0xe0]\n\t"
+			      "fldi $fd27, [%0+0xd8]\n\t"
+			      "fldi $fd26, [%0+0xd0]\n\t"
+			      "fldi $fd25, [%0+0xc8]\n\t"
+			      "fldi $fd24, [%0+0xc0]\n\t"
+			      "fldi $fd23, [%0+0xb8]\n\t"
+			      "fldi $fd22, [%0+0xb0]\n\t"
+			      "fldi $fd21, [%0+0xa8]\n\t"
+			      "fldi $fd20, [%0+0xa0]\n\t"
+			      "fldi $fd19, [%0+0x98]\n\t"
+			      "fldi $fd18, [%0+0x90]\n\t"
+			      "fldi $fd17, [%0+0x88]\n\t"
+			      "fldi $fd16, [%0+0x80]\n\t"
+			      :	/* no output */
+			      : "r" (fpregs));
+		/* fall through */
+	case SP32_DP16_reg:
+		asm volatile ("fldi $fd15, [%0+0x78]\n\t"
+			      "fldi $fd14, [%0+0x70]\n\t"
+			      "fldi $fd13, [%0+0x68]\n\t"
+			      "fldi $fd12, [%0+0x60]\n\t"
+			      "fldi $fd11, [%0+0x58]\n\t"
+			      "fldi $fd10, [%0+0x50]\n\t"
+			      "fldi $fd9,  [%0+0x48]\n\t"
+			      "fldi $fd8,  [%0+0x40]\n\t"
+			      :	/* no output */
+			      : "r" (fpregs));
+		/* fall through */
+	case SP16_DP8_reg:
+		asm volatile ("fldi $fd7,  [%0+0x38]\n\t"
+			      "fldi $fd6,  [%0+0x30]\n\t"
+			      "fldi $fd5,  [%0+0x28]\n\t"
+			      "fldi $fd4,  [%0+0x20]\n\t"
+			      :	/* no output */
+			      : "r" (fpregs));
+		/* fall through */
+	case SP8_DP4_reg:
+		asm volatile ("fldi $fd3,  [%1+0x18]\n\t"
+			      "fldi $fd2,  [%1+0x10]\n\t"
+			      "fldi $fd1,  [%1+0x8]\n\t"
+			      "fldi $fd0,  [%1+0x0]\n\t"
+			      "lwi  %0, [%1+0x100]\n\t"
+			      "fmtcsr	%0\n\t":"=&r" (fpcsr)
+			      : "r"(fpregs));
+	}
+	disable_fpu();
+}
+void store_fpu_for_suspend(void)
+{
+#ifdef CONFIG_LAZY_FPU
+	if (last_task_used_math != NULL)
+		save_fpu(last_task_used_math);
+	last_task_used_math = NULL;
+#else
+	if (!used_math())
+		return;
+	unlazy_fpu(current);
+#endif
+	clear_fpu(task_pt_regs(current));
+}
+inline void do_fpu_context_switch(struct pt_regs *regs)
+{
+	/* Enable to use FPU. */
+
+	if (!user_mode(regs)) {
+		pr_err("BUG: FPU is used in kernel mode.\n");
+		BUG();
+		return;
+	}
+
+	enable_ptreg_fpu(regs);
+#ifdef CONFIG_LAZY_FPU	//Lazy FPU is used
+	if (last_task_used_math == current)
+		return;
+	if (last_task_used_math != NULL)
+		/* Other processes fpu state, save away */
+		save_fpu(last_task_used_math);
+	last_task_used_math = current;
+#endif
+	if (used_math()) {
+		load_fpu(&current->thread.fpu);
+	} else {
+		/* First time FPU user.  */
+		load_fpu(&init_fpuregs);
+#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
+		current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
+#endif
+		set_used_math();
+	}
+
+}
+
+inline void fill_sigfpe_signo(unsigned int fpcsr, int *signo)
+{
+	if (fpcsr & FPCSR_mskOVFT)
+		*signo = FPE_FLTOVF;
+#ifndef CONFIG_SUPPORT_DENORMAL_ARITHMETIC
+	else if (fpcsr & FPCSR_mskUDFT)
+		*signo = FPE_FLTUND;
+#endif
+	else if (fpcsr & FPCSR_mskIVOT)
+		*signo = FPE_FLTINV;
+	else if (fpcsr & FPCSR_mskDBZT)
+		*signo = FPE_FLTDIV;
+	else if (fpcsr & FPCSR_mskIEXT)
+		*signo = FPE_FLTRES;
+}
+
+inline void handle_fpu_exception(struct pt_regs *regs)
+{
+	unsigned int fpcsr;
+	int si_code = 0, si_signo = SIGFPE;
+#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
+	unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT|FPCSR_mskIEXT;
+#else
+	unsigned long redo_except = FPCSR_mskDNIT;
+#endif
+
+	lose_fpu();
+	fpcsr = current->thread.fpu.fpcsr;
+
+	if (fpcsr & redo_except) {
+		si_signo = do_fpuemu(regs, &current->thread.fpu);
+		fpcsr = current->thread.fpu.fpcsr;
+		if (!si_signo) {
+			current->thread.fpu.fpcsr &= ~(redo_except);
+			goto done;
+		}
+	} else if (fpcsr & FPCSR_mskRIT) {
+		if (!user_mode(regs))
+			do_exit(SIGILL);
+		si_signo = SIGILL;
+	}
+
+	switch (si_signo) {
+	case SIGFPE:
+		fill_sigfpe_signo(fpcsr, &si_code);
+		break;
+	case SIGILL:
+		show_regs(regs);
+		si_code = ILL_COPROC;
+		break;
+	case SIGBUS:
+		si_code = BUS_ADRERR;
+		break;
+	default:
+		break;
+	}
+
+	force_sig_fault(si_signo, si_code,
+			(void __user *)instruction_pointer(regs));
+done:
+	own_fpu();
+}
+
+bool do_fpu_exception(unsigned int subtype, struct pt_regs *regs)
+{
+	int done = true;
+	/* Coprocessor disabled exception */
+	if (subtype == FPU_DISABLE_EXCEPTION) {
+		preempt_disable();
+		do_fpu_context_switch(regs);
+		preempt_enable();
+	}
+	/* Coprocessor exception such as underflow and overflow */
+	else if (subtype == FPU_EXCEPTION)
+		handle_fpu_exception(regs);
+	else
+		done = false;
+	return done;
+}
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index 8a41372..fd2a54b 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -7,7 +7,6 @@
 #ifndef CONFIG_DYNAMIC_FTRACE
 extern void (*ftrace_trace_function)(unsigned long, unsigned long,
 				     struct ftrace_ops*, struct pt_regs*);
-extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 extern void ftrace_graph_caller(void);
 
 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index c5fdae1..fcefb62 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -7,7 +7,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
 #include <asm/thread_info.h>
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -123,21 +123,12 @@
         andi    $r0, $r0, MMU_CFG_mskTBS
         srli    $r6, $r6, MMU_CFG_offTBW
         srli    $r0, $r0, MMU_CFG_offTBS
-        /*
-         * we just map the kernel to the maximum way - 1 of tlb
-         * reserver one way for UART VA mapping
-         * it will cause page fault if UART mapping cover the kernel mapping
-         *
-         * direct mapping is not supported now.
-         */
-        li      $r2, 't'
-        beqz    $r6, __error                 ! MMU_CFG.TBW = 0 is direct mappin
+	addi    $r6, $r6, #0x1               ! MMU_CFG.TBW value -> meaning
         addi    $r0, $r0, #0x2               ! MMU_CFG.TBS value -> meaning
         sll     $r0, $r6, $r0                ! entries = k-way * n-set
         mul     $r6, $r0, $r5                ! max size = entries * page size
         /* check kernel image size */
         la      $r3, (_end - PAGE_OFFSET)
-        li      $r2, 's'
         bgt     $r3, $r6, __error
 
 	li      $r2, #(PHYS_OFFSET + TLB_DATA_kernel_text_attr)
@@ -160,7 +151,7 @@
 #endif
 	mtsr    $r3, $TLB_MISC
 
-	mfsr    $r0, $MISC_CTL      ! Enable BTB and RTP and shadow sp
+	mfsr    $r0, $MISC_CTL      ! Enable BTB, RTP, shadow sp, and HW_PRE
 	ori     $r0, $r0, #MISC_init
 	mtsr    $r0, $MISC_CTL
 
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c
index 5ecebd0..20719e4 100644
--- a/arch/nds32/kernel/nds32_ksyms.c
+++ b/arch/nds32/kernel/nds32_ksyms.c
@@ -23,9 +23,3 @@
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__arch_clear_user);
-
-/* cache handling */
-EXPORT_SYMBOL(cpu_icache_inval_all);
-EXPORT_SYMBOL(cpu_dcache_wbinval_all);
-EXPORT_SYMBOL(cpu_dma_inval_range);
-EXPORT_SYMBOL(cpu_dma_wb_range);
diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c
new file mode 100644
index 0000000..334c2a6
--- /dev/null
+++ b/arch/nds32/kernel/perf_event_cpu.c
@@ -0,0 +1,1521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2008-2017 Andes Technology Corporation
+ *
+ * Reference ARMv7: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <linux/sched/clock.h>
+#include <linux/percpu-defs.h>
+
+#include <asm/pmu.h>
+#include <asm/irq_regs.h>
+#include <asm/nds32.h>
+#include <asm/stacktrace.h>
+#include <asm/perf_event.h>
+#include <nds32_intrinsic.h>
+
+/* Set at runtime when we know what CPU type we are. */
+static struct nds32_pmu *cpu_pmu;
+
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static void nds32_pmu_start(struct nds32_pmu *cpu_pmu);
+static void nds32_pmu_stop(struct nds32_pmu *cpu_pmu);
+static struct platform_device_id cpu_pmu_plat_device_ids[] = {
+	{.name = "nds32-pfm"},
+	{},
+};
+
+static int nds32_pmu_map_cache_event(const unsigned int (*cache_map)
+				  [PERF_COUNT_HW_CACHE_MAX]
+				  [PERF_COUNT_HW_CACHE_OP_MAX]
+				  [PERF_COUNT_HW_CACHE_RESULT_MAX], u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >> 0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >> 8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+nds32_pmu_map_hw_event(const unsigned int (*event_map)[PERF_COUNT_HW_MAX],
+		       u64 config)
+{
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -ENOENT;
+
+	mapping = (*event_map)[config];
+	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int nds32_pmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+	int ev_type = (int)(config & raw_event_mask);
+	int idx = config >> 8;
+
+	switch (idx) {
+	case 0:
+		ev_type = PFM_OFFSET_MAGIC_0 + ev_type;
+		if (ev_type >= SPAV3_0_SEL_LAST || ev_type <= SPAV3_0_SEL_BASE)
+			return -ENOENT;
+		break;
+	case 1:
+		ev_type = PFM_OFFSET_MAGIC_1 + ev_type;
+		if (ev_type >= SPAV3_1_SEL_LAST || ev_type <= SPAV3_1_SEL_BASE)
+			return -ENOENT;
+		break;
+	case 2:
+		ev_type = PFM_OFFSET_MAGIC_2 + ev_type;
+		if (ev_type >= SPAV3_2_SEL_LAST || ev_type <= SPAV3_2_SEL_BASE)
+			return -ENOENT;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	return ev_type;
+}
+
+int
+nds32_pmu_map_event(struct perf_event *event,
+		    const unsigned int (*event_map)[PERF_COUNT_HW_MAX],
+		    const unsigned int (*cache_map)
+		    [PERF_COUNT_HW_CACHE_MAX]
+		    [PERF_COUNT_HW_CACHE_OP_MAX]
+		    [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask)
+{
+	u64 config = event->attr.config;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_HARDWARE:
+		return nds32_pmu_map_hw_event(event_map, config);
+	case PERF_TYPE_HW_CACHE:
+		return nds32_pmu_map_cache_event(cache_map, config);
+	case PERF_TYPE_RAW:
+		return nds32_pmu_map_raw_event(raw_event_mask, config);
+	}
+
+	return -ENOENT;
+}
+
+static int nds32_spav3_map_event(struct perf_event *event)
+{
+	return nds32_pmu_map_event(event, &nds32_pfm_perf_map,
+				&nds32_pfm_perf_cache_map, SOFTWARE_EVENT_MASK);
+}
+
+static inline u32 nds32_pfm_getreset_flags(void)
+{
+	/* Read overflow status */
+	u32 val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 old_val = val;
+
+	/* Write overflow bit to clear status, and others keep it 0 */
+	u32 ov_flag = PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2];
+
+	__nds32__mtsr(val | ov_flag, NDS32_SR_PFM_CTL);
+
+	return old_val;
+}
+
+static inline int nds32_pfm_has_overflowed(u32 pfm)
+{
+	u32 ov_flag = PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2];
+
+	return pfm & ov_flag;
+}
+
+static inline int nds32_pfm_counter_has_overflowed(u32 pfm, int idx)
+{
+	u32 mask = 0;
+
+	switch (idx) {
+	case 0:
+		mask = PFM_CTL_OVF[0];
+		break;
+	case 1:
+		mask = PFM_CTL_OVF[1];
+		break;
+	case 2:
+		mask = PFM_CTL_OVF[2];
+		break;
+	default:
+		pr_err("%s index wrong\n", __func__);
+		break;
+	}
+	return pfm & mask;
+}
+
+/*
+ * Set the next IRQ period, based on the hwc->period_left value.
+ * To be called with the event disabled in hw:
+ */
+int nds32_pmu_event_set_period(struct perf_event *event)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	/* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+	if (unlikely(period != hwc->last_period))
+		left = period - (hwc->last_period - left);
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (left > (s64)nds32_pmu->max_period)
+		left = nds32_pmu->max_period;
+
+	/*
+	 * The hw event starts counting from this event offset,
+	 * mark it to be able to extract future "deltas":
+	 */
+	local64_set(&hwc->prev_count, (u64)(-left));
+
+	nds32_pmu->write_counter(event, (u64)(-left) & nds32_pmu->max_period);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+static irqreturn_t nds32_pmu_handle_irq(int irq_num, void *dev)
+{
+	u32 pfm;
+	struct perf_sample_data data;
+	struct nds32_pmu *cpu_pmu = (struct nds32_pmu *)dev;
+	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+	struct pt_regs *regs;
+	int idx;
+	/*
+	 * Get and reset the IRQ flags
+	 */
+	pfm = nds32_pfm_getreset_flags();
+
+	/*
+	 * Did an overflow occur?
+	 */
+	if (!nds32_pfm_has_overflowed(pfm))
+		return IRQ_NONE;
+
+	/*
+	 * Handle the counter(s) overflow(s)
+	 */
+	regs = get_irq_regs();
+
+	nds32_pmu_stop(cpu_pmu);
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		/* Ignore if we don't have an event. */
+		if (!event)
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!nds32_pfm_counter_has_overflowed(pfm, idx))
+			continue;
+
+		hwc = &event->hw;
+		nds32_pmu_event_update(event);
+		perf_sample_data_init(&data, 0, hwc->last_period);
+		if (!nds32_pmu_event_set_period(event))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(event);
+	}
+	nds32_pmu_start(cpu_pmu);
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts disabled. For
+	 * platforms that can have the PMU interrupts raised as an NMI, this
+	 * will not work.
+	 */
+	irq_work_run();
+
+	return IRQ_HANDLED;
+}
+
+static inline int nds32_pfm_counter_valid(struct nds32_pmu *cpu_pmu, int idx)
+{
+	return ((idx >= 0) && (idx < cpu_pmu->num_events));
+}
+
+static inline int nds32_pfm_disable_counter(int idx)
+{
+	unsigned int val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 mask = 0;
+
+	mask = PFM_CTL_EN[idx];
+	val &= ~mask;
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+	return idx;
+}
+
+/*
+ * Add an event filter to a given event.
+ */
+static int nds32_pmu_set_event_filter(struct hw_perf_event *event,
+				      struct perf_event_attr *attr)
+{
+	unsigned long config_base = 0;
+	int idx = event->idx;
+	unsigned long no_kernel_tracing = 0;
+	unsigned long no_user_tracing = 0;
+	/* If index is -1, do not do anything */
+	if (idx == -1)
+		return 0;
+
+	no_kernel_tracing = PFM_CTL_KS[idx];
+	no_user_tracing = PFM_CTL_KU[idx];
+	/*
+	 * Default: enable both kernel and user mode tracing.
+	 */
+	if (attr->exclude_user)
+		config_base |= no_user_tracing;
+
+	if (attr->exclude_kernel)
+		config_base |= no_kernel_tracing;
+
+	/*
+	 * Install the filter into config_base as this is used to
+	 * construct the event type.
+	 */
+	event->config_base |= config_base;
+	return 0;
+}
+
+static inline void nds32_pfm_write_evtsel(int idx, u32 evnum)
+{
+	u32 offset = 0;
+	u32 ori_val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 ev_mask = 0;
+	u32 no_kernel_mask = 0;
+	u32 no_user_mask = 0;
+	u32 val;
+
+	offset = PFM_CTL_OFFSEL[idx];
+	/* Clear previous mode selection, and write new one */
+	no_kernel_mask = PFM_CTL_KS[idx];
+	no_user_mask = PFM_CTL_KU[idx];
+	ori_val &= ~no_kernel_mask;
+	ori_val &= ~no_user_mask;
+	if (evnum & no_kernel_mask)
+		ori_val |= no_kernel_mask;
+
+	if (evnum & no_user_mask)
+		ori_val |= no_user_mask;
+
+	/* Clear previous event selection */
+	ev_mask = PFM_CTL_SEL[idx];
+	ori_val &= ~ev_mask;
+	evnum &= SOFTWARE_EVENT_MASK;
+
+	/* undo the linear mapping */
+	evnum = get_converted_evet_hw_num(evnum);
+	val = ori_val | (evnum << offset);
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+}
+
+static inline int nds32_pfm_enable_counter(int idx)
+{
+	unsigned int val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 mask = 0;
+
+	mask = PFM_CTL_EN[idx];
+	val |= mask;
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+	return idx;
+}
+
+static inline int nds32_pfm_enable_intens(int idx)
+{
+	unsigned int val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 mask = 0;
+
+	mask = PFM_CTL_IE[idx];
+	val |= mask;
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+	return idx;
+}
+
+static inline int nds32_pfm_disable_intens(int idx)
+{
+	unsigned int val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	u32 mask = 0;
+
+	mask = PFM_CTL_IE[idx];
+	val &= ~mask;
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+	return idx;
+}
+
+static int event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	/* Other modes NDS32 does not support */
+	return attr->exclude_user || attr->exclude_kernel;
+}
+
+static void nds32_pmu_enable_event(struct perf_event *event)
+{
+	unsigned long flags;
+	unsigned int evnum = 0;
+	struct hw_perf_event *hwc = &event->hw;
+	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+	int idx = hwc->idx;
+
+	if (!nds32_pfm_counter_valid(cpu_pmu, idx)) {
+		pr_err("CPU enabling wrong pfm counter IRQ enable\n");
+		return;
+	}
+
+	/*
+	 * Enable counter and interrupt, and set the counter to count
+	 * the event that we're interested in.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	nds32_pfm_disable_counter(idx);
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!cpu_pmu->set_event_filter ||
+	     cpu_pmu->set_event_filter(hwc, &event->attr)) &&
+	     event_requires_mode_exclusion(&event->attr)) {
+		pr_notice
+		("NDS32 performance counters do not support mode exclusion\n");
+		hwc->config_base = 0;
+	}
+	/* Write event */
+	evnum = hwc->config_base;
+	nds32_pfm_write_evtsel(idx, evnum);
+
+	/*
+	 * Enable interrupt for this counter
+	 */
+	nds32_pfm_enable_intens(idx);
+
+	/*
+	 * Enable counter
+	 */
+	nds32_pfm_enable_counter(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void nds32_pmu_disable_event(struct perf_event *event)
+{
+	unsigned long flags;
+	struct hw_perf_event *hwc = &event->hw;
+	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+	int idx = hwc->idx;
+
+	if (!nds32_pfm_counter_valid(cpu_pmu, idx)) {
+		pr_err("CPU disabling wrong pfm counter IRQ enable %d\n", idx);
+		return;
+	}
+
+	/*
+	 * Disable counter and interrupt
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	nds32_pfm_disable_counter(idx);
+
+	/*
+	 * Disable interrupt for this counter
+	 */
+	nds32_pfm_disable_intens(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static inline u32 nds32_pmu_read_counter(struct perf_event *event)
+{
+	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+	u32 count = 0;
+
+	if (!nds32_pfm_counter_valid(cpu_pmu, idx)) {
+		pr_err("CPU reading wrong counter %d\n", idx);
+	} else {
+		switch (idx) {
+		case PFMC0:
+			count = __nds32__mfsr(NDS32_SR_PFMC0);
+			break;
+		case PFMC1:
+			count = __nds32__mfsr(NDS32_SR_PFMC1);
+			break;
+		case PFMC2:
+			count = __nds32__mfsr(NDS32_SR_PFMC2);
+			break;
+		default:
+			pr_err
+			    ("%s: CPU has no performance counters %d\n",
+			     __func__, idx);
+		}
+	}
+	return count;
+}
+
+static inline void nds32_pmu_write_counter(struct perf_event *event, u32 value)
+{
+	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (!nds32_pfm_counter_valid(cpu_pmu, idx)) {
+		pr_err("CPU writing wrong counter %d\n", idx);
+	} else {
+		switch (idx) {
+		case PFMC0:
+			__nds32__mtsr_isb(value, NDS32_SR_PFMC0);
+			break;
+		case PFMC1:
+			__nds32__mtsr_isb(value, NDS32_SR_PFMC1);
+			break;
+		case PFMC2:
+			__nds32__mtsr_isb(value, NDS32_SR_PFMC2);
+			break;
+		default:
+			pr_err
+			    ("%s: CPU has no performance counters %d\n",
+			     __func__, idx);
+		}
+	}
+}
+
+static int nds32_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+				   struct perf_event *event)
+{
+	int idx;
+	struct hw_perf_event *hwc = &event->hw;
+	/*
+	 * Current implementation maps cycles, instruction count and cache-miss
+	 * to specific counter.
+	 * However, multiple of the 3 counters are able to count these events.
+	 *
+	 *
+	 * SOFTWARE_EVENT_MASK mask for getting event num ,
+	 * This is defined by Jia-Rung, you can change the polocies.
+	 * However, do not exceed 8 bits. This is hardware specific.
+	 * The last number is SPAv3_2_SEL_LAST.
+	 */
+	unsigned long evtype = hwc->config_base & SOFTWARE_EVENT_MASK;
+
+	idx = get_converted_event_idx(evtype);
+	/*
+	 * Try to get the counter for correpsonding event
+	 */
+	if (evtype == SPAV3_0_SEL_TOTAL_CYCLES) {
+		if (!test_and_set_bit(idx, cpuc->used_mask))
+			return idx;
+		if (!test_and_set_bit(NDS32_IDX_COUNTER0, cpuc->used_mask))
+			return NDS32_IDX_COUNTER0;
+		if (!test_and_set_bit(NDS32_IDX_COUNTER1, cpuc->used_mask))
+			return NDS32_IDX_COUNTER1;
+	} else if (evtype == SPAV3_1_SEL_COMPLETED_INSTRUCTION) {
+		if (!test_and_set_bit(idx, cpuc->used_mask))
+			return idx;
+		else if (!test_and_set_bit(NDS32_IDX_COUNTER1, cpuc->used_mask))
+			return NDS32_IDX_COUNTER1;
+		else if (!test_and_set_bit
+			 (NDS32_IDX_CYCLE_COUNTER, cpuc->used_mask))
+			return NDS32_IDX_CYCLE_COUNTER;
+	} else {
+		if (!test_and_set_bit(idx, cpuc->used_mask))
+			return idx;
+	}
+	return -EAGAIN;
+}
+
+static void nds32_pmu_start(struct nds32_pmu *cpu_pmu)
+{
+	unsigned long flags;
+	unsigned int val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/* Enable all counters , NDS PFM has 3 counters */
+	val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	val |= (PFM_CTL_EN[0] | PFM_CTL_EN[1] | PFM_CTL_EN[2]);
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void nds32_pmu_stop(struct nds32_pmu *cpu_pmu)
+{
+	unsigned long flags;
+	unsigned int val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/* Disable all counters , NDS PFM has 3 counters */
+	val = __nds32__mfsr(NDS32_SR_PFM_CTL);
+	val &= ~(PFM_CTL_EN[0] | PFM_CTL_EN[1] | PFM_CTL_EN[2]);
+	val &= ~(PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr_isb(val, NDS32_SR_PFM_CTL);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void nds32_pmu_reset(void *info)
+{
+	u32 val = 0;
+
+	val |= (PFM_CTL_OVF[0] | PFM_CTL_OVF[1] | PFM_CTL_OVF[2]);
+	__nds32__mtsr(val, NDS32_SR_PFM_CTL);
+	__nds32__mtsr(0, NDS32_SR_PFM_CTL);
+	__nds32__mtsr(0, NDS32_SR_PFMC0);
+	__nds32__mtsr(0, NDS32_SR_PFMC1);
+	__nds32__mtsr(0, NDS32_SR_PFMC2);
+}
+
+static void nds32_pmu_init(struct nds32_pmu *cpu_pmu)
+{
+	cpu_pmu->handle_irq = nds32_pmu_handle_irq;
+	cpu_pmu->enable = nds32_pmu_enable_event;
+	cpu_pmu->disable = nds32_pmu_disable_event;
+	cpu_pmu->read_counter = nds32_pmu_read_counter;
+	cpu_pmu->write_counter = nds32_pmu_write_counter;
+	cpu_pmu->get_event_idx = nds32_pmu_get_event_idx;
+	cpu_pmu->start = nds32_pmu_start;
+	cpu_pmu->stop = nds32_pmu_stop;
+	cpu_pmu->reset = nds32_pmu_reset;
+	cpu_pmu->max_period = 0xFFFFFFFF;	/* Maximum counts */
+};
+
+static u32 nds32_read_num_pfm_events(void)
+{
+	/* NDS32 SPAv3 PMU support 3 counter */
+	return 3;
+}
+
+static int device_pmu_init(struct nds32_pmu *cpu_pmu)
+{
+	nds32_pmu_init(cpu_pmu);
+	/*
+	 * This name should be devive-specific name, whatever you like :)
+	 * I think "PMU" will be a good generic name.
+	 */
+	cpu_pmu->name = "nds32v3-pmu";
+	cpu_pmu->map_event = nds32_spav3_map_event;
+	cpu_pmu->num_events = nds32_read_num_pfm_events();
+	cpu_pmu->set_event_filter = nds32_pmu_set_event_filter;
+	return 0;
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct nds32_pmu *pmu)
+{
+	int ret;
+
+	get_cpu();
+	ret = -ENODEV;
+	/*
+	 * If ther are various CPU types with its own PMU, initialize with
+	 *
+	 * the corresponding one
+	 */
+	device_pmu_init(pmu);
+	put_cpu();
+	return ret;
+}
+
+static void nds32_pmu_enable(struct pmu *pmu)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(pmu);
+	struct pmu_hw_events *hw_events = nds32_pmu->get_hw_events();
+	int enabled = bitmap_weight(hw_events->used_mask,
+				    nds32_pmu->num_events);
+
+	if (enabled)
+		nds32_pmu->start(nds32_pmu);
+}
+
+static void nds32_pmu_disable(struct pmu *pmu)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(pmu);
+
+	nds32_pmu->stop(nds32_pmu);
+}
+
+static void nds32_pmu_release_hardware(struct nds32_pmu *nds32_pmu)
+{
+	nds32_pmu->free_irq(nds32_pmu);
+	pm_runtime_put_sync(&nds32_pmu->plat_device->dev);
+}
+
+static irqreturn_t nds32_pmu_dispatch_irq(int irq, void *dev)
+{
+	struct nds32_pmu *nds32_pmu = (struct nds32_pmu *)dev;
+	int ret;
+	u64 start_clock, finish_clock;
+
+	start_clock = local_clock();
+	ret = nds32_pmu->handle_irq(irq, dev);
+	finish_clock = local_clock();
+
+	perf_sample_event_took(finish_clock - start_clock);
+	return ret;
+}
+
+static int nds32_pmu_reserve_hardware(struct nds32_pmu *nds32_pmu)
+{
+	int err;
+	struct platform_device *pmu_device = nds32_pmu->plat_device;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	pm_runtime_get_sync(&pmu_device->dev);
+	err = nds32_pmu->request_irq(nds32_pmu, nds32_pmu_dispatch_irq);
+	if (err) {
+		nds32_pmu_release_hardware(nds32_pmu);
+		return err;
+	}
+
+	return 0;
+}
+
+static int
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+	       struct perf_event *event)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+
+	if (is_software_event(event))
+		return 1;
+
+	if (event->pmu != pmu)
+		return 0;
+
+	if (event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	return nds32_pmu->get_event_idx(hw_events, event) >= 0;
+}
+
+static int validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct pmu_hw_events fake_pmu;
+	DECLARE_BITMAP(fake_used_mask, MAX_COUNTERS);
+	/*
+	 * Initialize the fake PMU. We only need to populate the
+	 * used_mask for the purposes of validation.
+	 */
+	memset(fake_used_mask, 0, sizeof(fake_used_mask));
+
+	if (!validate_event(event->pmu, &fake_pmu, leader))
+		return -EINVAL;
+
+	for_each_sibling_event(sibling, leader) {
+		if (!validate_event(event->pmu, &fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(event->pmu, &fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping;
+
+	mapping = nds32_pmu->map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx = -1;
+	hwc->config_base = 0;
+	hwc->config = 0;
+	hwc->event_base = 0;
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!nds32_pmu->set_event_filter ||
+	     nds32_pmu->set_event_filter(hwc, &event->attr)) &&
+	    event_requires_mode_exclusion(&event->attr)) {
+		pr_debug
+			("NDS performance counters do not support mode exclusion\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base |= (unsigned long)mapping;
+
+	if (!hwc->sample_period) {
+		/*
+		 * For non-sampling runs, limit the sample_period to half
+		 * of the counter width. That way, the new counter value
+		 * is far less likely to overtake the previous one unless
+		 * you have some serious IRQ latency issues.
+		 */
+		hwc->sample_period = nds32_pmu->max_period >> 1;
+		hwc->last_period = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	if (event->group_leader != event) {
+		if (validate_group(event) != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nds32_pmu_event_init(struct perf_event *event)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	int err = 0;
+	atomic_t *active_events = &nds32_pmu->active_events;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (nds32_pmu->map_event(event) == -ENOENT)
+		return -ENOENT;
+
+	if (!atomic_inc_not_zero(active_events)) {
+		if (atomic_read(active_events) == 0) {
+			/* Register irq handler */
+			err = nds32_pmu_reserve_hardware(nds32_pmu);
+		}
+
+		if (!err)
+			atomic_inc(active_events);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+
+	return err;
+}
+
+static void nds32_start(struct perf_event *event, int flags)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	/*
+	 * NDS pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+	/* Set the period for the event. */
+	nds32_pmu_event_set_period(event);
+
+	nds32_pmu->enable(event);
+}
+
+static int nds32_pmu_add(struct perf_event *event, int flags)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = nds32_pmu->get_hw_events();
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = nds32_pmu->get_event_idx(hw_events, event);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	nds32_pmu->disable(event);
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		nds32_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+u64 nds32_pmu_event_update(struct perf_event *event)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = nds32_pmu->read_counter(event);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			    new_raw_count) != prev_raw_count) {
+		goto again;
+	}
+	/*
+	 * Whether overflow or not, "unsigned substraction"
+	 * will always get their delta
+	 */
+	delta = (new_raw_count - prev_raw_count) & nds32_pmu->max_period;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void nds32_stop(struct perf_event *event, int flags)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	/*
+	 * NDS pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in nds32_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		nds32_pmu->disable(event);
+		nds32_pmu_event_update(event);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static void nds32_pmu_del(struct perf_event *event, int flags)
+{
+	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = nds32_pmu->get_hw_events();
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	nds32_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
+static void nds32_pmu_read(struct perf_event *event)
+{
+	nds32_pmu_event_update(event);
+}
+
+/* Please refer to SPAv3 for more hardware specific details */
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *nds32_arch_formats_attr[] = {
+	&format_attr_event.attr,
+	NULL,
+};
+
+static struct attribute_group nds32_pmu_format_group = {
+	.name = "format",
+	.attrs = nds32_arch_formats_attr,
+};
+
+static ssize_t nds32_pmu_cpumask_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return 0;
+}
+
+static DEVICE_ATTR(cpus, 0444, nds32_pmu_cpumask_show, NULL);
+
+static struct attribute *nds32_pmu_common_attrs[] = {
+	&dev_attr_cpus.attr,
+	NULL,
+};
+
+static struct attribute_group nds32_pmu_common_group = {
+	.attrs = nds32_pmu_common_attrs,
+};
+
+static const struct attribute_group *nds32_pmu_attr_groups[] = {
+	&nds32_pmu_format_group,
+	&nds32_pmu_common_group,
+	NULL,
+};
+
+static void nds32_init(struct nds32_pmu *nds32_pmu)
+{
+	atomic_set(&nds32_pmu->active_events, 0);
+
+	nds32_pmu->pmu = (struct pmu) {
+		.pmu_enable = nds32_pmu_enable,
+		.pmu_disable = nds32_pmu_disable,
+		.attr_groups = nds32_pmu_attr_groups,
+		.event_init = nds32_pmu_event_init,
+		.add = nds32_pmu_add,
+		.del = nds32_pmu_del,
+		.start = nds32_start,
+		.stop = nds32_stop,
+		.read = nds32_pmu_read,
+	};
+}
+
+int nds32_pmu_register(struct nds32_pmu *nds32_pmu, int type)
+{
+	nds32_init(nds32_pmu);
+	pm_runtime_enable(&nds32_pmu->plat_device->dev);
+	pr_info("enabled with %s PMU driver, %d counters available\n",
+		nds32_pmu->name, nds32_pmu->num_events);
+	return perf_pmu_register(&nds32_pmu->pmu, nds32_pmu->name, type);
+}
+
+static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
+{
+	return this_cpu_ptr(&cpu_hw_events);
+}
+
+static int cpu_pmu_request_irq(struct nds32_pmu *cpu_pmu, irq_handler_t handler)
+{
+	int err, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_err("no irqs for PMUs defined\n");
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pmu_device, 0);
+	err = request_irq(irq, handler, IRQF_NOBALANCING, "nds32-pfm",
+			  cpu_pmu);
+	if (err) {
+		pr_err("unable to request IRQ%d for NDS PMU counters\n",
+		       irq);
+		return err;
+	}
+	return 0;
+}
+
+static void cpu_pmu_free_irq(struct nds32_pmu *cpu_pmu)
+{
+	int irq;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0)
+		free_irq(irq, cpu_pmu);
+}
+
+static void cpu_pmu_init(struct nds32_pmu *cpu_pmu)
+{
+	int cpu;
+	struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+
+	raw_spin_lock_init(&events->pmu_lock);
+
+	cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
+	cpu_pmu->request_irq = cpu_pmu_request_irq;
+	cpu_pmu->free_irq = cpu_pmu_free_irq;
+
+	/* Ensure the PMU has sane values out of reset. */
+	if (cpu_pmu->reset)
+		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+}
+
+const static struct of_device_id cpu_pmu_of_device_ids[] = {
+	{.compatible = "andestech,nds32v3-pmu",
+	 .data = device_pmu_init},
+	{},
+};
+
+static int cpu_pmu_device_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	int (*init_fn)(struct nds32_pmu *nds32_pmu);
+	struct device_node *node = pdev->dev.of_node;
+	struct nds32_pmu *pmu;
+	int ret = -ENODEV;
+
+	if (cpu_pmu) {
+		pr_notice("[perf] attempt to register multiple PMU devices!\n");
+		return -ENOSPC;
+	}
+
+	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+	if (!pmu)
+		return -ENOMEM;
+
+	of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node);
+	if (node && of_id) {
+		init_fn = of_id->data;
+		ret = init_fn(pmu);
+	} else {
+		ret = probe_current_pmu(pmu);
+	}
+
+	if (ret) {
+		pr_notice("[perf] failed to probe PMU!\n");
+		goto out_free;
+	}
+
+	cpu_pmu = pmu;
+	cpu_pmu->plat_device = pdev;
+	cpu_pmu_init(cpu_pmu);
+	ret = nds32_pmu_register(cpu_pmu, PERF_TYPE_RAW);
+
+	if (!ret)
+		return 0;
+
+out_free:
+	pr_notice("[perf] failed to register PMU devices!\n");
+	kfree(pmu);
+	return ret;
+}
+
+static struct platform_driver cpu_pmu_driver = {
+	.driver = {
+		   .name = "nds32-pfm",
+		   .of_match_table = cpu_pmu_of_device_ids,
+		   },
+	.probe = cpu_pmu_device_probe,
+	.id_table = cpu_pmu_plat_device_ids,
+};
+
+static int __init register_pmu_driver(void)
+{
+	int err = 0;
+
+	err = platform_driver_register(&cpu_pmu_driver);
+	if (err)
+		pr_notice("[perf] PMU initialization failed\n");
+	else
+		pr_notice("[perf] PMU initialization done\n");
+
+	return err;
+}
+
+device_initcall(register_pmu_driver);
+
+/*
+ * References: arch/nds32/kernel/traps.c:__dump()
+ * You will need to know the NDS ABI first.
+ */
+static int unwind_frame_kernel(struct stackframe *frame)
+{
+	int graph = 0;
+#ifdef CONFIG_FRAME_POINTER
+	/* 0x3 means misalignment */
+	if (!kstack_end((void *)frame->fp) &&
+	    !((unsigned long)frame->fp & 0x3) &&
+	    ((unsigned long)frame->fp >= TASK_SIZE)) {
+		/*
+		 *	The array index is based on the ABI, the below graph
+		 *	illustrate the reasons.
+		 *	Function call procedure: "smw" and "lmw" will always
+		 *	update SP and FP for you automatically.
+		 *
+		 *	Stack                                 Relative Address
+		 *	|  |                                          0
+		 *	----
+		 *	|LP| <-- SP(before smw)  <-- FP(after smw)   -1
+		 *	----
+		 *	|FP|                                         -2
+		 *	----
+		 *	|  | <-- SP(after smw)                       -3
+		 */
+		frame->lp = ((unsigned long *)frame->fp)[-1];
+		frame->fp = ((unsigned long *)frame->fp)[FP_OFFSET];
+		/* make sure CONFIG_FUNCTION_GRAPH_TRACER is turned on */
+		if (__kernel_text_address(frame->lp))
+			frame->lp = ftrace_graph_ret_addr
+						(NULL, &graph, frame->lp, NULL);
+
+		return 0;
+	} else {
+		return -EPERM;
+	}
+#else
+	/*
+	 * You can refer to arch/nds32/kernel/traps.c:__dump()
+	 * Treat "sp" as "fp", but the "sp" is one frame ahead of "fp".
+	 * And, the "sp" is not always correct.
+	 *
+	 *   Stack                                 Relative Address
+	 *   |  |                                          0
+	 *   ----
+	 *   |LP| <-- SP(before smw)                      -1
+	 *   ----
+	 *   |  | <-- SP(after smw)                       -2
+	 *   ----
+	 */
+	if (!kstack_end((void *)frame->sp)) {
+		frame->lp = ((unsigned long *)frame->sp)[1];
+		/* TODO: How to deal with the value in first
+		 * "sp" is not correct?
+		 */
+		if (__kernel_text_address(frame->lp))
+			frame->lp = ftrace_graph_ret_addr
+						(tsk, &graph, frame->lp, NULL);
+
+		frame->sp = ((unsigned long *)frame->sp) + 1;
+
+		return 0;
+	} else {
+		return -EPERM;
+	}
+#endif
+}
+
+static void notrace
+walk_stackframe(struct stackframe *frame,
+		int (*fn_record)(struct stackframe *, void *),
+		void *data)
+{
+	while (1) {
+		int ret;
+
+		if (fn_record(frame, data))
+			break;
+
+		ret = unwind_frame_kernel(frame);
+		if (ret < 0)
+			break;
+	}
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *fr, void *data)
+{
+	struct perf_callchain_entry_ctx *entry = data;
+
+	perf_callchain_store(entry, fr->lp);
+	return 0;
+}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long
+user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
+{
+	struct frame_tail buftail;
+	unsigned long lp = 0;
+	unsigned long *user_frame_tail =
+		(unsigned long *)(fp - (unsigned long)sizeof(buftail));
+
+	/* Check accessibility of one struct frame_tail beyond */
+	if (!access_ok(user_frame_tail, sizeof(buftail)))
+		return 0;
+	if (__copy_from_user_inatomic
+		(&buftail, user_frame_tail, sizeof(buftail)))
+		return 0;
+
+	/*
+	 * Refer to unwind_frame_kernel() for more illurstration
+	 */
+	lp = buftail.stack_lp;  /* ((unsigned long *)fp)[-1] */
+	fp = buftail.stack_fp;  /* ((unsigned long *)fp)[FP_OFFSET] */
+	perf_callchain_store(entry, lp);
+	return fp;
+}
+
+static unsigned long
+user_backtrace_opt_size(struct perf_callchain_entry_ctx *entry,
+			unsigned long fp)
+{
+	struct frame_tail_opt_size buftail;
+	unsigned long lp = 0;
+
+	unsigned long *user_frame_tail =
+		(unsigned long *)(fp - (unsigned long)sizeof(buftail));
+
+	/* Check accessibility of one struct frame_tail beyond */
+	if (!access_ok(user_frame_tail, sizeof(buftail)))
+		return 0;
+	if (__copy_from_user_inatomic
+		(&buftail, user_frame_tail, sizeof(buftail)))
+		return 0;
+
+	/*
+	 * Refer to unwind_frame_kernel() for more illurstration
+	 */
+	lp = buftail.stack_lp;  /* ((unsigned long *)fp)[-1] */
+	fp = buftail.stack_fp;  /* ((unsigned long *)fp)[FP_OFFSET] */
+
+	perf_callchain_store(entry, lp);
+	return fp;
+}
+
+/*
+ * This will be called when the target is in user mode
+ * This function will only be called when we use
+ * "PERF_SAMPLE_CALLCHAIN" in
+ * kernel/events/core.c:perf_prepare_sample()
+ *
+ * How to trigger perf_callchain_[user/kernel] :
+ * $ perf record -e cpu-clock --call-graph fp ./program
+ * $ perf report --call-graph
+ */
+unsigned long leaf_fp;
+void
+perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+		    struct pt_regs *regs)
+{
+	unsigned long fp = 0;
+	unsigned long gp = 0;
+	unsigned long lp = 0;
+	unsigned long sp = 0;
+	unsigned long *user_frame_tail;
+
+	leaf_fp = 0;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	perf_callchain_store(entry, regs->ipc);
+	fp = regs->fp;
+	gp = regs->gp;
+	lp = regs->lp;
+	sp = regs->sp;
+	if (entry->nr < PERF_MAX_STACK_DEPTH &&
+	    (unsigned long)fp && !((unsigned long)fp & 0x7) && fp > sp) {
+		user_frame_tail =
+			(unsigned long *)(fp - (unsigned long)sizeof(fp));
+
+		if (!access_ok(user_frame_tail, sizeof(fp)))
+			return;
+
+		if (__copy_from_user_inatomic
+			(&leaf_fp, user_frame_tail, sizeof(fp)))
+			return;
+
+		if (leaf_fp == lp) {
+			/*
+			 * Maybe this is non leaf function
+			 * with optimize for size,
+			 * or maybe this is the function
+			 * with optimize for size
+			 */
+			struct frame_tail buftail;
+
+			user_frame_tail =
+				(unsigned long *)(fp -
+					(unsigned long)sizeof(buftail));
+
+			if (!access_ok(user_frame_tail, sizeof(buftail)))
+				return;
+
+			if (__copy_from_user_inatomic
+				(&buftail, user_frame_tail, sizeof(buftail)))
+				return;
+
+			if (buftail.stack_fp == gp) {
+				/* non leaf function with optimize
+				 * for size condition
+				 */
+				struct frame_tail_opt_size buftail_opt_size;
+
+				user_frame_tail =
+					(unsigned long *)(fp - (unsigned long)
+						sizeof(buftail_opt_size));
+
+				if (!access_ok(user_frame_tail,
+					       sizeof(buftail_opt_size)))
+					return;
+
+				if (__copy_from_user_inatomic
+				   (&buftail_opt_size, user_frame_tail,
+				   sizeof(buftail_opt_size)))
+					return;
+
+				perf_callchain_store(entry, lp);
+				fp = buftail_opt_size.stack_fp;
+
+				while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+				       (unsigned long)fp &&
+						!((unsigned long)fp & 0x7) &&
+						fp > sp) {
+					sp = fp;
+					fp = user_backtrace_opt_size(entry, fp);
+				}
+
+			} else {
+				/* this is the function
+				 * without optimize for size
+				 */
+				fp = buftail.stack_fp;
+				perf_callchain_store(entry, lp);
+				while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+				       (unsigned long)fp &&
+						!((unsigned long)fp & 0x7) &&
+						fp > sp) {
+					sp = fp;
+					fp = user_backtrace(entry, fp);
+				}
+			}
+		} else {
+			/* this is leaf function */
+			fp = leaf_fp;
+			perf_callchain_store(entry, lp);
+
+			/* previous function callcahin  */
+			while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+			       (unsigned long)fp &&
+				   !((unsigned long)fp & 0x7) && fp > sp) {
+				sp = fp;
+				fp = user_backtrace(entry, fp);
+			}
+		}
+		return;
+	}
+}
+
+/* This will be called when the target is in kernel mode */
+void
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+		      struct pt_regs *regs)
+{
+	struct stackframe fr;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+	fr.fp = regs->fp;
+	fr.lp = regs->lp;
+	fr.sp = regs->sp;
+	walk_stackframe(&fr, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+	/* However, NDS32 does not support virtualization */
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+		return perf_guest_cbs->get_guest_ip();
+
+	return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+	int misc = 0;
+
+	/* However, NDS32 does not support virtualization */
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		if (perf_guest_cbs->is_user_mode())
+			misc |= PERF_RECORD_MISC_GUEST_USER;
+		else
+			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+	} else {
+		if (user_mode(regs))
+			misc |= PERF_RECORD_MISC_USER;
+		else
+			misc |= PERF_RECORD_MISC_KERNEL;
+	}
+
+	return misc;
+}
diff --git a/arch/nds32/kernel/pm.c b/arch/nds32/kernel/pm.c
new file mode 100644
index 0000000..ffa8040
--- /dev/null
+++ b/arch/nds32/kernel/pm.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2008-2017 Andes Technology Corporation
+
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <asm/suspend.h>
+#include <nds32_intrinsic.h>
+
+unsigned int resume_addr;
+unsigned int *phy_addr_sp_tmp;
+
+static void nds32_suspend2ram(void)
+{
+	pgd_t *pgdv;
+	pud_t *pudv;
+	pmd_t *pmdv;
+	pte_t *ptev;
+
+	pgdv = (pgd_t *)__va((__nds32__mfsr(NDS32_SR_L1_PPTB) &
+		L1_PPTB_mskBASE)) + pgd_index((unsigned int)cpu_resume);
+
+	pudv = pud_offset(pgdv, (unsigned int)cpu_resume);
+	pmdv = pmd_offset(pudv, (unsigned int)cpu_resume);
+	ptev = pte_offset_map(pmdv, (unsigned int)cpu_resume);
+
+	resume_addr = ((*ptev) & TLB_DATA_mskPPN)
+			| ((unsigned int)cpu_resume & 0x00000fff);
+
+	suspend2ram();
+}
+
+static void nds32_suspend_cpu(void)
+{
+	while (!(__nds32__mfsr(NDS32_SR_INT_PEND) & wake_mask))
+		__asm__ volatile ("standby no_wake_grant\n\t");
+}
+
+static int nds32_pm_valid(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_ON:
+	case PM_SUSPEND_STANDBY:
+	case PM_SUSPEND_MEM:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static int nds32_pm_enter(suspend_state_t state)
+{
+	pr_debug("%s:state:%d\n", __func__, state);
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		nds32_suspend_cpu();
+		return 0;
+	case PM_SUSPEND_MEM:
+		nds32_suspend2ram();
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct platform_suspend_ops nds32_pm_ops = {
+	.valid = nds32_pm_valid,
+	.enter = nds32_pm_enter,
+};
+
+static int __init nds32_pm_init(void)
+{
+	pr_debug("Enter %s\n", __func__);
+	suspend_set_ops(&nds32_pm_ops);
+	return 0;
+}
+late_initcall(nds32_pm_init);
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index 65fda98..9712fd4 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -9,15 +9,16 @@
 #include <linux/uaccess.h>
 #include <asm/elf.h>
 #include <asm/proc-fns.h>
+#include <asm/fpu.h>
 #include <linux/ptrace.h>
 #include <linux/reboot.h>
 
-extern void setup_mm_for_reboot(char mode);
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry *proc_dir_cpu;
-EXPORT_SYMBOL(proc_dir_cpu);
+#if IS_ENABLED(CONFIG_LAZY_FPU)
+struct task_struct *last_task_used_math;
 #endif
 
+extern void setup_mm_for_reboot(char mode);
+
 extern inline void arch_reset(char mode)
 {
 	if (mode == 's') {
@@ -120,20 +121,36 @@
 		regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
 	pr_info("  IRQs o%s  Segment %s\n",
 		interrupts_enabled(regs) ? "n" : "ff",
-		segment_eq(get_fs(), get_ds())? "kernel" : "user");
+		segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user");
 }
 
 EXPORT_SYMBOL(show_regs);
 
+void exit_thread(struct task_struct *tsk)
+{
+#if defined(CONFIG_FPU) && defined(CONFIG_LAZY_FPU)
+	if (last_task_used_math == tsk)
+		last_task_used_math = NULL;
+#endif
+}
+
 void flush_thread(void)
 {
+#if defined(CONFIG_FPU)
+	clear_fpu(task_pt_regs(current));
+	clear_used_math();
+# ifdef CONFIG_LAZY_FPU
+	if (last_task_used_math == current)
+		last_task_used_math = NULL;
+# endif
+#endif
 }
 
 DEFINE_PER_CPU(struct task_struct *, __entry_task);
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
-	    unsigned long stk_sz, struct task_struct *p)
+		unsigned long stk_sz, struct task_struct *p)
 {
 	struct pt_regs *childregs = task_pt_regs(p);
 
@@ -159,6 +176,22 @@
 	p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
 	p->thread.cpu_context.sp = (unsigned long)childregs;
 
+#if IS_ENABLED(CONFIG_FPU)
+	if (used_math()) {
+# if !IS_ENABLED(CONFIG_LAZY_FPU)
+		unlazy_fpu(current);
+# else
+		preempt_disable();
+		if (last_task_used_math == current)
+			save_fpu(current);
+		preempt_enable();
+# endif
+		p->thread.fpu = current->thread.fpu;
+		clear_fpu(task_pt_regs(p));
+		set_stopped_child_used_math(p);
+	}
+#endif
+
 #ifdef CONFIG_HWZOL
 	childregs->lb = 0;
 	childregs->le = 0;
@@ -168,12 +201,33 @@
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_FPU)
+struct task_struct *_switch_fpu(struct task_struct *prev, struct task_struct *next)
+{
+#if !IS_ENABLED(CONFIG_LAZY_FPU)
+	unlazy_fpu(prev);
+#endif
+	if (!(next->flags & PF_KTHREAD))
+		clear_fpu(task_pt_regs(next));
+	return prev;
+}
+#endif
+
 /*
  * fill in the fpe structure for a core dump...
  */
 int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
 {
 	int fpvalid = 0;
+#if IS_ENABLED(CONFIG_FPU)
+	struct task_struct *tsk = current;
+
+	fpvalid = tsk_used_math(tsk);
+	if (fpvalid) {
+		lose_fpu();
+		memcpy(fpu, &tsk->thread.fpu, sizeof(*fpu));
+	}
+#endif
 	return fpvalid;
 }
 
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 63a1a5e..31d29d9 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -2,9 +2,8 @@
 // Copyright (C) 2005-2017 Andes Technology Corporation
 
 #include <linux/cpu.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
 #include <linux/memblock.h>
+#include <linux/seq_file.h>
 #include <linux/console.h>
 #include <linux/screen_info.h>
 #include <linux/delay.h>
@@ -16,6 +15,7 @@
 #include <asm/proc-fns.h>
 #include <asm/cache_info.h>
 #include <asm/elf.h>
+#include <asm/fpu.h>
 #include <nds32_intrinsic.h>
 
 #define HWCAP_MFUSR_PC		0x000001
@@ -39,8 +39,10 @@
 #define HWCAP_FPU_DP		0x040000
 #define HWCAP_V2		0x080000
 #define HWCAP_DX_REGS		0x100000
+#define HWCAP_HWPRE		0x200000
 
 unsigned long cpu_id, cpu_rev, cpu_cfgid;
+bool has_fpu = false;
 char cpu_series;
 char *endianness = NULL;
 
@@ -71,8 +73,10 @@
 	"div",
 	"mac",
 	"l2c",
-	"dx_regs",
+	"fpu_dp",
 	"v2",
+	"dx_regs",
+	"hw_pre",
 	NULL,
 };
 
@@ -137,6 +141,11 @@
 		    (aliasing_num - 1) << PAGE_SHIFT;
 	}
 #endif
+#ifdef CONFIG_FPU
+	/* Disable fpu and enable when it is used. */
+	if (has_fpu)
+		disable_fpu();
+#endif
 }
 
 static void __init setup_cpuinfo(void)
@@ -181,9 +190,10 @@
 	if (cpu_cfgid & 0x0004)
 		elf_hwcap |= HWCAP_EXT2;
 
-	if (cpu_cfgid & 0x0008)
+	if (cpu_cfgid & 0x0008) {
 		elf_hwcap |= HWCAP_FPU;
-
+		has_fpu = true;
+	}
 	if (cpu_cfgid & 0x0010)
 		elf_hwcap |= HWCAP_STRING;
 
@@ -213,6 +223,11 @@
 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
 		elf_hwcap |= HWCAP_L2C;
 
+#ifdef CONFIG_HW_PRE
+	if (__nds32__mfsr(NDS32_SR_MISC_CTL) & MISC_CTL_makHWPRE_EN)
+		elf_hwcap |= HWCAP_HWPRE;
+#endif
+
 	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
 	if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
 		tmp |= CACHE_CTL_mskDC_EN;
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index 5d01f6e..330b19f 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -12,6 +12,7 @@
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/unistd.h>
+#include <asm/fpu.h>
 
 #include <asm/ptrace.h>
 #include <asm/vdso.h>
@@ -20,6 +21,60 @@
 	struct siginfo info;
 	struct ucontext uc;
 };
+#if IS_ENABLED(CONFIG_FPU)
+static inline int restore_sigcontext_fpu(struct pt_regs *regs,
+					 struct sigcontext __user *sc)
+{
+	struct task_struct *tsk = current;
+	unsigned long used_math_flag;
+	int ret = 0;
+
+	clear_used_math();
+	__get_user_error(used_math_flag, &sc->used_math_flag, ret);
+
+	if (!used_math_flag)
+		return 0;
+	set_used_math();
+
+#if IS_ENABLED(CONFIG_LAZY_FPU)
+	preempt_disable();
+	if (current == last_task_used_math) {
+		last_task_used_math = NULL;
+		disable_ptreg_fpu(regs);
+	}
+	preempt_enable();
+#else
+	clear_fpu(regs);
+#endif
+
+	return __copy_from_user(&tsk->thread.fpu, &sc->fpu,
+				sizeof(struct fpu_struct));
+}
+
+static inline int setup_sigcontext_fpu(struct pt_regs *regs,
+				       struct sigcontext __user *sc)
+{
+	struct task_struct *tsk = current;
+	int ret = 0;
+
+	__put_user_error(used_math(), &sc->used_math_flag, ret);
+
+	if (!used_math())
+		return ret;
+
+	preempt_disable();
+#if IS_ENABLED(CONFIG_LAZY_FPU)
+	if (last_task_used_math == tsk)
+		save_fpu(last_task_used_math);
+#else
+	unlazy_fpu(tsk);
+#endif
+	ret = __copy_to_user(&sc->fpu, &tsk->thread.fpu,
+			     sizeof(struct fpu_struct));
+	preempt_enable();
+	return ret;
+}
+#endif
 
 static int restore_sigframe(struct pt_regs *regs,
 			    struct rt_sigframe __user * sf)
@@ -69,7 +124,9 @@
 	__get_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
 	__get_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
 #endif
-
+#if IS_ENABLED(CONFIG_FPU)
+	err |= restore_sigcontext_fpu(regs, &sf->uc.uc_mcontext);
+#endif
 	/*
 	 * Avoid sys_rt_sigreturn() restarting.
 	 */
@@ -94,7 +151,7 @@
 
 	frame = (struct rt_sigframe __user *)regs->sp;
 
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+	if (!access_ok(frame, sizeof(*frame)))
 		goto badframe;
 
 	if (restore_sigframe(regs, frame))
@@ -106,7 +163,7 @@
 	return regs->uregs[0];
 
 badframe:
-	force_sig(SIGSEGV, current);
+	force_sig(SIGSEGV);
 	return 0;
 }
 
@@ -153,6 +210,9 @@
 	__put_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
 	__put_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
 #endif
+#if IS_ENABLED(CONFIG_FPU)
+	err |= setup_sigcontext_fpu(regs, &sf->uc.uc_mcontext);
+#endif
 
 	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no,
 			 err);
@@ -215,7 +275,7 @@
 	    get_sigframe(ksig, regs, sizeof(*frame));
 	int err = 0;
 
-	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+	if (!access_ok(frame, sizeof(*frame)))
 		return -EFAULT;
 
 	__put_user_error(0, &frame->uc.uc_flags, err);
@@ -256,6 +316,7 @@
 				regs->uregs[0] = -EINTR;
 				break;
 			}
+			/* Else, fall through */
 		case -ERESTARTNOINTR:
 			regs->uregs[0] = regs->orig_r0;
 			regs->ipc -= 4;
@@ -300,6 +361,7 @@
 		switch (regs->uregs[0]) {
 		case -ERESTART_RESTARTBLOCK:
 			regs->uregs[15] = __NR_restart_syscall;
+			/* Fall through */
 		case -ERESTARTNOHAND:
 		case -ERESTARTSYS:
 		case -ERESTARTNOINTR:
diff --git a/arch/nds32/kernel/sleep.S b/arch/nds32/kernel/sleep.S
new file mode 100644
index 0000000..ca4e61f
--- /dev/null
+++ b/arch/nds32/kernel/sleep.S
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <asm/memory.h>
+
+.data
+.global sp_tmp
+sp_tmp:
+.long
+
+.text
+.globl suspend2ram
+.globl cpu_resume
+
+suspend2ram:
+	pushm   $r0, $r31
+#if defined(CONFIG_HWZOL)
+	mfusr   $r0, $lc
+	mfusr   $r1, $le
+	mfusr   $r2, $lb
+#endif
+	mfsr	$r3, $mr0
+	mfsr    $r4, $mr1
+	mfsr    $r5, $mr4
+	mfsr    $r6, $mr6
+	mfsr    $r7, $mr7
+	mfsr    $r8, $mr8
+	mfsr    $r9, $ir0
+	mfsr    $r10, $ir1
+	mfsr    $r11, $ir2
+	mfsr    $r12, $ir3
+	mfsr    $r13, $ir9
+	mfsr    $r14, $ir10
+	mfsr    $r15, $ir12
+	mfsr    $r16, $ir13
+	mfsr    $r17, $ir14
+	mfsr    $r18, $ir15
+	pushm   $r0, $r19
+#if defined(CONFIG_FPU)
+	jal	store_fpu_for_suspend
+#endif
+	tlbop	FlushAll
+	isb
+
+	// transfer $sp from va to pa
+	sethi	$r0, hi20(PAGE_OFFSET)
+	ori	$r0, $r0, lo12(PAGE_OFFSET)
+	movi	$r2, PHYS_OFFSET
+	sub	$r1, $sp, $r0
+	add	$r2, $r1, $r2
+
+	// store pa($sp) to sp_tmp
+	sethi 	$r1, hi20(sp_tmp)
+	swi	$r2, [$r1 + lo12(sp_tmp)]
+
+	pushm	$r16, $r25
+	pushm	$r29, $r30
+#ifdef	CONFIG_CACHE_L2
+	jal	dcache_wb_all_level
+#else
+	jal	cpu_dcache_wb_all
+#endif
+	popm	$r29, $r30
+	popm	$r16, $r25
+
+	// get wake_mask and loop in standby
+	la	$r1, wake_mask
+	lwi	$r1, [$r1]
+self_loop:
+	standby wake_grant
+	mfsr	$r2, $ir15
+	and	$r2, $r1, $r2
+	beqz	$r2, self_loop
+
+	// set ipc to resume address
+	la	$r1, resume_addr
+	lwi	$r1, [$r1]
+	mtsr	$r1, $ipc
+	isb
+
+	// reset psw, turn off the address translation
+	li      $r2, 0x7000a
+	mtsr    $r2, $ipsw
+	isb
+
+	iret
+cpu_resume:
+	// translate the address of sp_tmp variable to pa
+	la	$r1, sp_tmp
+	sethi   $r0, hi20(PAGE_OFFSET)
+	ori     $r0, $r0, lo12(PAGE_OFFSET)
+	movi    $r2, PHYS_OFFSET
+	sub     $r1, $r1, $r0
+	add     $r1, $r1, $r2
+
+	// access the sp_tmp to get stack pointer
+	lwi	$sp, [$r1]
+
+	popm	$r0, $r19
+#if defined(CONFIG_HWZOL)
+	mtusr   $r0, $lb
+	mtusr   $r1, $lc
+	mtusr   $r2, $le
+#endif
+	mtsr	$r3, $mr0
+	mtsr    $r4, $mr1
+	mtsr    $r5, $mr4
+	mtsr    $r6, $mr6
+	mtsr    $r7, $mr7
+	mtsr    $r8, $mr8
+	// set original psw to ipsw
+	mtsr    $r9, $ir1
+
+	mtsr    $r11, $ir2
+	mtsr    $r12, $ir3
+
+	// set ipc to RR
+	la	$r13, RR
+	mtsr	$r13, $ir9
+
+	mtsr    $r14, $ir10
+	mtsr    $r15, $ir12
+	mtsr    $r16, $ir13
+	mtsr    $r17, $ir14
+	mtsr    $r18, $ir15
+	popm    $r0, $r31
+
+	isb
+	iret
+RR:
+	ret
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c
index 9de93ab..cb2d1e2 100644
--- a/arch/nds32/kernel/sys_nds32.c
+++ b/arch/nds32/kernel/sys_nds32.c
@@ -6,6 +6,8 @@
 
 #include <asm/cachectl.h>
 #include <asm/proc-fns.h>
+#include <asm/fpu.h>
+#include <asm/fp_udfiex_crtl.h>
 
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
 	       unsigned long, prot, unsigned long, flags,
@@ -48,3 +50,35 @@
 
 	return 0;
 }
+
+SYSCALL_DEFINE2(fp_udfiex_crtl, unsigned int, cmd, unsigned int, act)
+{
+#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
+	int old_udf_iex;
+
+	if (!used_math()) {
+		load_fpu(&init_fpuregs);
+		current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
+		set_used_math();
+	}
+
+	old_udf_iex = current->thread.fpu.UDF_IEX_trap;
+	act &= (FPCSR_mskUDFE | FPCSR_mskIEXE);
+
+	switch (cmd) {
+	case DISABLE_UDF_IEX_TRAP:
+		current->thread.fpu.UDF_IEX_trap &= ~act;
+		break;
+	case ENABLE_UDF_IEX_TRAP:
+		current->thread.fpu.UDF_IEX_trap |= act;
+		break;
+	case GET_UDF_IEX_TRAP:
+		break;
+	default:
+		return -EINVAL;
+	}
+	return old_udf_iex;
+#else
+	return -ENOTSUPP;
+#endif
+}
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index 1496aab..f4d386b 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -12,6 +12,7 @@
 
 #include <asm/proc-fns.h>
 #include <asm/unistd.h>
+#include <asm/fpu.h>
 
 #include <linux/ptrace.h>
 #include <nds32_intrinsic.h>
@@ -204,7 +205,7 @@
 	}
 
 	force_sig_fault(SIGILL, ILL_ILLTRP,
-			(void __user *)instruction_pointer(regs) - 4, current);
+			(void __user *)instruction_pointer(regs) - 4);
 	die_if_kernel("Oops - bad syscall", regs, n);
 	return regs->uregs[0];
 }
@@ -254,14 +255,15 @@
 	cpu_cache_wbinval_page(base, true);
 }
 
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
-		  int error_code, int si_code)
+static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
 {
+	struct task_struct *tsk = current;
+
 	tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
 	tsk->thread.error_code = error_code;
 
 	force_sig_fault(SIGTRAP, si_code,
-			(void __user *)instruction_pointer(regs), tsk);
+			(void __user *)instruction_pointer(regs));
 }
 
 void do_debug_trap(unsigned long entry, unsigned long addr,
@@ -273,7 +275,7 @@
 
 	if (user_mode(regs)) {
 		/* trap_signal */
-		send_sigtrap(current, regs, 0, TRAP_BRKPT);
+		send_sigtrap(regs, 0, TRAP_BRKPT);
 	} else {
 		/* kernel_trap */
 		if (!fixup_exception(regs))
@@ -287,7 +289,7 @@
 	show_regs(regs);
 	if (!user_mode(regs))
 		do_exit(SIGKILL);
-	force_sig(SIGKILL, current);
+	force_sig(SIGKILL);
 }
 
 void unhandled_exceptions(unsigned long entry, unsigned long addr,
@@ -298,7 +300,7 @@
 	show_regs(regs);
 	if (!user_mode(regs))
 		do_exit(SIGKILL);
-	force_sig(SIGKILL, current);
+	force_sig(SIGKILL);
 }
 
 extern int do_page_fault(unsigned long entry, unsigned long addr,
@@ -325,7 +327,7 @@
 	show_regs(regs);
 	if (!user_mode(regs))
 		do_exit(SIGILL);
-	force_sig(SIGILL, current);
+	force_sig(SIGILL);
 }
 
 #ifdef CONFIG_ALIGNMENT_TRAP
@@ -357,6 +359,21 @@
 	} else if (type == ETYPE_RESERVED_INSTRUCTION) {
 		/* Reserved instruction */
 		do_revinsn(regs);
+	} else if (type == ETYPE_COPROCESSOR) {
+		/* Coprocessor */
+#if IS_ENABLED(CONFIG_FPU)
+		unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
+		unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
+
+		if ((cpid == FPU_CPID) &&
+		    (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
+			unsigned int subtype = (itype & ITYPE_mskSTYPE);
+
+			if (true == do_fpu_exception(subtype, regs))
+				return;
+		}
+#endif
+		unhandled_exceptions(entry, addr, type, regs);
 	} else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
 		/* trap, used on v3 EDM target debugging workaround */
 		/*
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 016f158..90bcae6 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -220,6 +220,7 @@
 	vdso_data->xtime_coarse_sec = tk->xtime_sec;
 	vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
 	    tk->tkr_mono.shift;
+	vdso_data->hrtimer_res = hrtimer_resolution;
 	vdso_write_end(vdso_data);
 }
 
diff --git a/arch/nds32/kernel/vdso/.gitignore b/arch/nds32/kernel/vdso/.gitignore
new file mode 100644
index 0000000..f8b69d8
--- /dev/null
+++ b/arch/nds32/kernel/vdso/.gitignore
@@ -0,0 +1 @@
+vdso.lds
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile
index e6c50a7..7c3c1cc 100644
--- a/arch/nds32/kernel/vdso/Makefile
+++ b/arch/nds32/kernel/vdso/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Building a vDSO image for AArch64.
 #
@@ -11,10 +12,8 @@
 targets := $(obj-vdso) vdso.so vdso.so.dbg
 obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
 
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
-		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-ccflags-y += -fPIC -Wl,-shared -g
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib -fPIC -Wl,-shared -g \
+	-Wl,-soname=linux-vdso.so.1 -Wl,--hash-style=sysv
 
 # Disable gcov profiling for VDSO code
 GCOV_PROFILE := n
@@ -28,7 +27,7 @@
 $(obj)/vdso.o : $(obj)/vdso.so
 
 # Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
 	$(call if_changed,vdsold)
 
 
@@ -40,9 +39,7 @@
 # Generate VDSO offsets using helper script
 gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
 quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
-	$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+      cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
 
 include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
 	$(call if_changed,vdsosym)
@@ -65,7 +62,7 @@
 
 # Actual build commands
 quiet_cmd_vdsold = VDSOL   $@
-      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $(real-prereqs) -o $@
 quiet_cmd_vdsoas = VDSOA   $@
       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
 quiet_cmd_vdsocc = VDSOA   $@
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
index 038721a..b025818 100644
--- a/arch/nds32/kernel/vdso/gettimeofday.c
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -208,6 +208,8 @@
 
 notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
 {
+	struct vdso_data *vdata = __get_datapage();
+
 	if (res == NULL)
 		return 0;
 	switch (clk_id) {
@@ -215,7 +217,7 @@
 	case CLOCK_MONOTONIC:
 	case CLOCK_MONOTONIC_RAW:
 		res->tv_sec = 0;
-		res->tv_nsec = CLOCK_REALTIME_RES;
+		res->tv_nsec = vdata->hrtimer_res;
 		break;
 	case CLOCK_REALTIME_COARSE:
 	case CLOCK_MONOTONIC_COARSE: