Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 5bb887b..790f1eb 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -6,46 +6,80 @@
 #include <linux/mm.h>
 #include <asm/cache.h>
 
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-	unsigned long start;
-
-	start = (unsigned long) kmap_atomic(page);
-
-	cache_wbinv_range(start, start + PAGE_SIZE);
-
-	kunmap_atomic((void *)start);
-}
-
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
-			     unsigned long vaddr, int len)
-{
-	unsigned long kaddr;
-
-	kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
-
-	cache_wbinv_range(kaddr, kaddr + len);
-
-	kunmap_atomic((void *)kaddr);
-}
-
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 		      pte_t *pte)
 {
-	unsigned long addr, pfn;
+	unsigned long addr;
 	struct page *page;
 
-	pfn = pte_pfn(*pte);
-	if (unlikely(!pfn_valid(pfn)))
+	page = pfn_to_page(pte_pfn(*pte));
+	if (page == ZERO_PAGE(0))
 		return;
 
-	page = pfn_to_page(pfn);
-	if (page == ZERO_PAGE(0))
+	if (test_and_set_bit(PG_dcache_clean, &page->flags))
 		return;
 
 	addr = (unsigned long) kmap_atomic(page);
 
-	cache_wbinv_range(addr, addr + PAGE_SIZE);
+	dcache_wb_range(addr, addr + PAGE_SIZE);
+
+	if (vma->vm_flags & VM_EXEC)
+		icache_inv_range(addr, addr + PAGE_SIZE);
 
 	kunmap_atomic((void *) addr);
 }
+
+void flush_icache_deferred(struct mm_struct *mm)
+{
+	unsigned int cpu = smp_processor_id();
+	cpumask_t *mask = &mm->context.icache_stale_mask;
+
+	if (cpumask_test_cpu(cpu, mask)) {
+		cpumask_clear_cpu(cpu, mask);
+		/*
+		 * Ensure the remote hart's writes are visible to this hart.
+		 * This pairs with a barrier in flush_icache_mm.
+		 */
+		smp_mb();
+		local_icache_inv_all(NULL);
+	}
+}
+
+void flush_icache_mm_range(struct mm_struct *mm,
+		unsigned long start, unsigned long end)
+{
+	unsigned int cpu;
+	cpumask_t others, *mask;
+
+	preempt_disable();
+
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+	if (mm == current->mm) {
+		icache_inv_range(start, end);
+		preempt_enable();
+		return;
+	}
+#endif
+
+	/* Mark every hart's icache as needing a flush for this MM. */
+	mask = &mm->context.icache_stale_mask;
+	cpumask_setall(mask);
+
+	/* Flush this hart's I$ now, and mark it as flushed. */
+	cpu = smp_processor_id();
+	cpumask_clear_cpu(cpu, mask);
+	local_icache_inv_all(NULL);
+
+	/*
+	 * Flush the I$ of other harts concurrently executing, and mark them as
+	 * flushed.
+	 */
+	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+
+	if (mm != current->active_mm || !cpumask_empty(&others)) {
+		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
+		cpumask_clear(mask);
+	}
+
+	preempt_enable();
+}
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index b8db5e0..a565e00 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -13,24 +13,27 @@
 #define flush_cache_all()			do { } while (0)
 #define flush_cache_mm(mm)			do { } while (0)
 #define flush_cache_dup_mm(mm)			do { } while (0)
-
-#define flush_cache_range(vma, start, end) \
-	do { \
-		if (vma->vm_flags & VM_EXEC) \
-			icache_inv_all(); \
-	} while (0)
-
+#define flush_cache_range(vma, start, end)	do { } while (0)
 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do { } while (0)
+
+#define PG_dcache_clean		PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+	if (test_bit(PG_dcache_clean, &page->flags))
+		clear_bit(PG_dcache_clean, &page->flags);
+}
+
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_icache_page(vma, page)		do { } while (0)
 
 #define flush_icache_range(start, end)		cache_wbinv_range(start, end)
 
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
-			     unsigned long vaddr, int len);
+void flush_icache_mm_range(struct mm_struct *mm,
+			unsigned long start, unsigned long end);
+void flush_icache_deferred(struct mm_struct *mm);
 
 #define flush_cache_vmap(start, end)		do { } while (0)
 #define flush_cache_vunmap(start, end)		do { } while (0)
@@ -38,7 +41,13 @@
 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 do { \
 	memcpy(dst, src, len); \
-	cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
+	if (vma->vm_flags & VM_EXEC) { \
+		dcache_wb_range((unsigned long)dst, \
+				(unsigned long)dst + len); \
+		flush_icache_mm_range(current->mm, \
+				(unsigned long)dst, \
+				(unsigned long)dst + len); \
+		} \
 } while (0)
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 	memcpy(dst, src, len)
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
index ac8f65a..bedcc6f 100644
--- a/arch/csky/abiv2/inc/abi/entry.h
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -65,7 +65,6 @@
 .endm
 
 .macro	RESTORE_ALL
-	psrclr  ie
 	ldw	tls, (sp, 0)
 	ldw	lr, (sp, 4)
 	ldw	a0, (sp, 8)
@@ -102,6 +101,63 @@
 	rte
 .endm
 
+.macro SAVE_REGS_FTRACE
+	subi    sp, 152
+	stw	tls, (sp, 0)
+	stw	lr, (sp, 4)
+
+	mfcr	lr, psr
+	stw	lr, (sp, 12)
+
+	addi    lr, sp, 152
+	stw	lr, (sp, 16)
+
+	stw     a0, (sp, 20)
+	stw     a0, (sp, 24)
+	stw     a1, (sp, 28)
+	stw     a2, (sp, 32)
+	stw     a3, (sp, 36)
+
+	addi	sp, 40
+	stm	r4-r13, (sp)
+
+	addi    sp, 40
+	stm     r16-r30, (sp)
+#ifdef CONFIG_CPU_HAS_HILO
+	mfhi	lr
+	stw	lr, (sp, 60)
+	mflo	lr
+	stw	lr, (sp, 64)
+	mfcr	lr, cr14
+	stw	lr, (sp, 68)
+#endif
+	subi	sp, 80
+.endm
+
+.macro	RESTORE_REGS_FTRACE
+	ldw	tls, (sp, 0)
+
+#ifdef CONFIG_CPU_HAS_HILO
+	ldw	a0, (sp, 140)
+	mthi	a0
+	ldw	a0, (sp, 144)
+	mtlo	a0
+	ldw	a0, (sp, 148)
+	mtcr	a0, cr14
+#endif
+
+	ldw     a0, (sp, 24)
+	ldw     a1, (sp, 28)
+	ldw     a2, (sp, 32)
+	ldw     a3, (sp, 36)
+
+	addi	sp, 40
+	ldm	r4-r13, (sp)
+	addi    sp, 40
+	ldm     r16-r30, (sp)
+	addi    sp, 72
+.endm
+
 .macro SAVE_SWITCH_STACK
 	subi    sp, 64
 	stm	r4-r11, (sp)
@@ -243,9 +299,4 @@
 	jmpi	3f /* jump to va */
 3:
 .endm
-
-.macro ANDI_R3 rx, imm
-	lsri	\rx, 3
-	andi	\rx, (\imm >> 3)
-.endm
 #endif /* __ASM_CSKY_ENTRY_H */
diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S
index 326402e..d745e10 100644
--- a/arch/csky/abiv2/mcount.S
+++ b/arch/csky/abiv2/mcount.S
@@ -3,6 +3,8 @@
 
 #include <linux/linkage.h>
 #include <asm/ftrace.h>
+#include <abi/entry.h>
+#include <asm/asm-offsets.h>
 
 /*
  * csky-gcc with -pg will put the following asm after prologue:
@@ -44,6 +46,24 @@
 	jmp	t1
 .endm
 
+.macro mcount_enter_regs
+	subi	sp, 8
+	stw	lr, (sp, 0)
+	stw	r8, (sp, 4)
+	SAVE_REGS_FTRACE
+.endm
+
+.macro mcount_exit_regs
+	RESTORE_REGS_FTRACE
+	subi	sp, 152
+	ldw	t1, (sp, 4)
+	addi	sp, 152
+	ldw	r8, (sp, 4)
+	ldw	lr, (sp, 8)
+	addi	sp, 12
+	jmp	t1
+.endm
+
 .macro save_return_regs
 	subi	sp, 16
 	stw	a0, (sp, 0)
@@ -85,6 +105,8 @@
 	mov	a0, lr
 	subi	a0, 4
 	ldw	a1, (sp, 24)
+	lrw	a2, function_trace_op
+	ldw	a2, (a2, 0)
 
 	jsr	r26
 
@@ -122,6 +144,8 @@
 	ldw	a0, (sp, 16)
 	subi	a0, 4
 	ldw	a1, (sp, 24)
+	lrw	a2, function_trace_op
+	ldw	a2, (a2, 0)
 
 	nop
 GLOBAL(ftrace_call)
@@ -157,3 +181,31 @@
 	jmp	lr
 END(return_to_handler)
 #endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_regs_caller)
+	mcount_enter_regs
+
+	lrw	t1, PT_FRAME_SIZE
+	add	t1, sp
+
+	ldw	a0, (t1, 0)
+	subi	a0, 4
+	ldw	a1, (t1, 8)
+	lrw	a2, function_trace_op
+	ldw	a2, (a2, 0)
+	mov	a3, sp
+
+	nop
+GLOBAL(ftrace_regs_call)
+	nop32_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	nop
+GLOBAL(ftrace_graph_regs_call)
+	nop32_stub
+#endif
+
+	mcount_exit_regs
+ENDPROC(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */