Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a9ef057..89b9c85 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@
 		r4k_blast_scache = blast_scache128;
 }
 
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+	unsigned long sc_lsize = cpu_scache_line_size();
+
+	if (current_cpu_type() != CPU_LOONGSON3)
+		r4k_blast_scache_node = (void *)cache_noop;
+	else if (sc_lsize == 16)
+		r4k_blast_scache_node = blast_scache16_node;
+	else if (sc_lsize == 32)
+		r4k_blast_scache_node = blast_scache32_node;
+	else if (sc_lsize == 64)
+		r4k_blast_scache_node = blast_scache64_node;
+	else if (sc_lsize == 128)
+		r4k_blast_scache_node = blast_scache128_node;
+}
+
 static inline void local_r4k___flush_cache_all(void * args)
 {
 	switch (current_cpu_type()) {
 	case CPU_LOONGSON2:
-	case CPU_LOONGSON3:
 	case CPU_R4000SC:
 	case CPU_R4000MC:
 	case CPU_R4400SC:
@@ -480,6 +497,11 @@
 		r4k_blast_scache();
 		break;
 
+	case CPU_LOONGSON3:
+		/* Use get_ebase_cpunum() for both NUMA=y/n */
+		r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+		break;
+
 	case CPU_BMIPS5000:
 		r4k_blast_scache();
 		__sync();
@@ -518,6 +540,9 @@
 	unsigned int i;
 	const cpumask_t *mask = cpu_present_mask;
 
+	if (cpu_has_mmid)
+		return cpu_context(0, mm) != 0;
+
 	/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
 #ifdef CONFIG_SMP
 	/*
@@ -675,10 +700,7 @@
 	}
 	if (exec) {
 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
-			int cpu = smp_processor_id();
-
-			if (cpu_context(cpu, mm) != 0)
-				drop_mmu_context(mm, cpu);
+			drop_mmu_context(mm);
 		} else
 			vaddr ? r4k_blast_icache_page(addr) :
 				r4k_blast_icache_user_page(addr);
@@ -840,10 +862,14 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			blast_scache_range(addr, addr + size);
+		}
 		preempt_enable();
 		__sync();
 		return;
@@ -877,9 +903,12 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else {
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			/*
 			 * There is no clearly documented alignment requirement
 			 * for the cache instruction on MIPS processors and
@@ -908,119 +937,6 @@
 }
 #endif /* CONFIG_DMA_NONCOHERENT */
 
-struct flush_cache_sigtramp_args {
-	struct mm_struct *mm;
-	struct page *page;
-	unsigned long addr;
-};
-
-/*
- * While we're protected against bad userland addresses we don't care
- * very much about what happens in that case.  Usually a segmentation
- * fault will dump the process later on anyway ...
- */
-static void local_r4k_flush_cache_sigtramp(void *args)
-{
-	struct flush_cache_sigtramp_args *fcs_args = args;
-	unsigned long addr = fcs_args->addr;
-	struct page *page = fcs_args->page;
-	struct mm_struct *mm = fcs_args->mm;
-	int map_coherent = 0;
-	void *vaddr;
-
-	unsigned long ic_lsize = cpu_icache_line_size();
-	unsigned long dc_lsize = cpu_dcache_line_size();
-	unsigned long sc_lsize = cpu_scache_line_size();
-
-	/*
-	 * If owns no valid ASID yet, cannot possibly have gotten
-	 * this page into the cache.
-	 */
-	if (!has_valid_asid(mm, R4K_HIT))
-		return;
-
-	if (mm == current->active_mm) {
-		vaddr = NULL;
-	} else {
-		/*
-		 * Use kmap_coherent or kmap_atomic to do flushes for
-		 * another ASID than the current one.
-		 */
-		map_coherent = (cpu_has_dc_aliases &&
-				page_mapcount(page) &&
-				!Page_dcache_dirty(page));
-		if (map_coherent)
-			vaddr = kmap_coherent(page, addr);
-		else
-			vaddr = kmap_atomic(page);
-		addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
-	}
-
-	R4600_HIT_CACHEOP_WAR_IMPL;
-	if (!cpu_has_ic_fills_f_dc) {
-		if (dc_lsize)
-			vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
-			      : protected_writeback_dcache_line(
-							addr & ~(dc_lsize - 1));
-		if (!cpu_icache_snoops_remote_store && scache_size)
-			vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
-			      : protected_writeback_scache_line(
-							addr & ~(sc_lsize - 1));
-	}
-	if (ic_lsize)
-		vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
-		      : protected_flush_icache_line(addr & ~(ic_lsize - 1));
-
-	if (vaddr) {
-		if (map_coherent)
-			kunmap_coherent();
-		else
-			kunmap_atomic(vaddr);
-	}
-
-	if (MIPS4K_ICACHE_REFILL_WAR) {
-		__asm__ __volatile__ (
-			".set push\n\t"
-			".set noat\n\t"
-			".set "MIPS_ISA_LEVEL"\n\t"
-#ifdef CONFIG_32BIT
-			"la	$at,1f\n\t"
-#endif
-#ifdef CONFIG_64BIT
-			"dla	$at,1f\n\t"
-#endif
-			"cache	%0,($at)\n\t"
-			"nop; nop; nop\n"
-			"1:\n\t"
-			".set pop"
-			:
-			: "i" (Hit_Invalidate_I));
-	}
-	if (MIPS_CACHE_SYNC_WAR)
-		__asm__ __volatile__ ("sync");
-}
-
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
-	struct flush_cache_sigtramp_args args;
-	int npages;
-
-	down_read(&current->mm->mmap_sem);
-
-	npages = get_user_pages_fast(addr, 1, 0, &args.page);
-	if (npages < 1)
-		goto out;
-
-	args.mm = current->mm;
-	args.addr = addr;
-
-	r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
-
-	put_page(args.page);
-out:
-	up_read(&current->mm->mmap_sem);
-}
-
 static void r4k_flush_icache_all(void)
 {
 	if (cpu_has_vtag_icache)
@@ -1182,7 +1098,6 @@
 		c->options |= MIPS_CPU_CACHE_CDEX_P;
 		break;
 
-	case CPU_R5432:
 	case CPU_R5500:
 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
@@ -1218,7 +1133,6 @@
 	case CPU_R4400PC:
 	case CPU_R4400SC:
 	case CPU_R4400MC:
-	case CPU_R4300:
 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 		c->icache.ways = 1;
@@ -1251,6 +1165,7 @@
 
 	case CPU_VR4133:
 		write_c0_config(config & ~VR41_CONF_P4K);
+		/* fall through */
 	case CPU_VR4131:
 		/* Workaround for cache instruction bug of VR4131 */
 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
@@ -1352,7 +1267,7 @@
 					  c->dcache.ways *
 					  c->dcache.linesz;
 		c->dcache.waybit = 0;
-		if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+		if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
 			c->options |= MIPS_CPU_PREFETCH;
 		break;
 
@@ -1498,6 +1413,7 @@
 			c->dcache.flags |= MIPS_CACHE_PINDEX;
 			break;
 		}
+		/* fall through */
 	default:
 		if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
 			c->dcache.flags |= MIPS_CACHE_ALIASES;
@@ -1918,6 +1834,7 @@
 	r4k_blast_scache_page_setup();
 	r4k_blast_scache_page_indexed_setup();
 	r4k_blast_scache_setup();
+	r4k_blast_scache_node_setup();
 #ifdef CONFIG_EVA
 	r4k_blast_dcache_user_page_setup();
 	r4k_blast_icache_user_page_setup();
@@ -1946,7 +1863,6 @@
 
 	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
 
-	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
 	flush_icache_all	= r4k_flush_icache_all;
 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
 	flush_data_cache_page	= r4k_flush_data_cache_page;
@@ -1955,22 +1871,21 @@
 	__flush_icache_user_range	= r4k_flush_icache_user_range;
 	__local_flush_icache_user_range	= local_r4k_flush_icache_user_range;
 
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
-# if defined(CONFIG_DMA_PERDEV_COHERENT)
-	if (0) {
-# else
-	if ((coherentio == IO_COHERENCE_ENABLED) ||
-	    ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
-# endif
+#ifdef CONFIG_DMA_NONCOHERENT
+#ifdef CONFIG_DMA_MAYBE_COHERENT
+	if (coherentio == IO_COHERENCE_ENABLED ||
+	    (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
 		_dma_cache_wback_inv	= (void *)cache_noop;
 		_dma_cache_wback	= (void *)cache_noop;
 		_dma_cache_inv		= (void *)cache_noop;
-	} else {
+	} else
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
+	{
 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
 		_dma_cache_inv		= r4k_dma_cache_inv;
 	}
-#endif
+#endif /* CONFIG_DMA_NONCOHERENT */
 
 	build_clear_page();
 	build_copy_page();
@@ -2002,7 +1917,6 @@
 		/* I$ fills from D$ just by emptying the write buffers */
 		flush_cache_page = (void *)b5k_instruction_hazard;
 		flush_cache_range = (void *)b5k_instruction_hazard;
-		flush_cache_sigtramp = (void *)b5k_instruction_hazard;
 		local_flush_data_cache_page = (void *)b5k_instruction_hazard;
 		flush_data_cache_page = (void *)b5k_instruction_hazard;
 		flush_icache_range = (void *)b5k_instruction_hazard;
@@ -2021,7 +1935,6 @@
 		flush_cache_mm		= (void *)cache_noop;
 		flush_cache_page	= (void *)cache_noop;
 		flush_cache_range	= (void *)cache_noop;
-		flush_cache_sigtramp	= (void *)cache_noop;
 		flush_icache_all	= (void *)cache_noop;
 		flush_data_cache_page	= (void *)cache_noop;
 		local_flush_data_cache_page	= (void *)cache_noop;