Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 76b93a9..eac25ae 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -18,9 +18,9 @@
 config KVM
 	tristate "Kernel-based Virtual Machine (KVM) support"
 	depends on HAVE_KVM
+	depends on MIPS_FP_SUPPORT
 	select EXPORT_UASM
 	select PREEMPT_NOTIFIERS
-	select ANON_INODES
 	select KVM_GENERIC_DIRTYLOG_READ_PROTECT
 	select HAVE_KVM_VCPU_ASYNC_IOCTL
 	select KVM_MMIO
@@ -72,6 +72,6 @@
 
 	  If unsure, say N.
 
-source drivers/vhost/Kconfig
+source "drivers/vhost/Kconfig"
 
 endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index f436299..5812e61 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -14,7 +14,7 @@
 #include <linux/err.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f8e7725..d77b61b 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -16,7 +16,7 @@
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <asm/cacheflush.h>
 
 #include "commpage.h"
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4144bfa..754094b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -15,7 +15,7 @@
 #include <linux/kvm_host.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/random.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
@@ -140,6 +140,7 @@
 		/* These are unconditional and in j_format. */
 	case jal_op:
 		arch->gprs[31] = instpc + 8;
+		/* fall through */
 	case j_op:
 		epc += 4;
 		epc >>= 28;
@@ -1016,10 +1017,10 @@
 		 */
 		preempt_disable();
 		cpu = smp_processor_id();
-		get_new_mmu_context(kern_mm, cpu);
+		get_new_mmu_context(kern_mm);
 		for_each_possible_cpu(i)
 			if (i != cpu)
-				cpu_context(i, kern_mm) = 0;
+				set_cpu_context(i, kern_mm, 0);
 		preempt_enable();
 	}
 	kvm_write_c0_guest_entryhi(cop0, entryhi);
@@ -1090,8 +1091,8 @@
 		if (i == cpu)
 			continue;
 		if (user)
-			cpu_context(i, user_mm) = 0;
-		cpu_context(i, kern_mm) = 0;
+			set_cpu_context(i, user_mm, 0);
+		set_cpu_context(i, kern_mm, 0);
 	}
 
 	preempt_enable();
@@ -1141,9 +1142,7 @@
 	unsigned long pc = vcpu->arch.pc;
 	int index;
 
-	get_random_bytes(&index, sizeof(index));
-	index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
-
+	index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
 	tlb = &vcpu->arch.guest_tlb[index];
 
 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index aa0a1a0..7257e8b 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -13,7 +13,7 @@
 #include <linux/err.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f7ea8e2..1109924 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -18,7 +18,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sched/signal.h>
 #include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 
 #include <asm/fpu.h>
 #include <asm/page.h>
@@ -123,9 +123,9 @@
 	return 0;
 }
 
-void kvm_arch_check_processor_compat(void *rtn)
+int kvm_arch_check_processor_compat(void)
 {
-	*(int *)rtn = 0;
+	return 0;
 }
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
@@ -150,16 +150,6 @@
 	return 0;
 }
 
-bool kvm_arch_has_vcpu_debugfs(void)
-{
-	return false;
-}
-
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
 void kvm_mips_free_vcpus(struct kvm *kvm)
 {
 	unsigned int i;
@@ -1004,14 +994,37 @@
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
-	bool is_dirty = false;
+	bool flush = false;
 	int r;
 
 	mutex_lock(&kvm->slots_lock);
 
-	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+	r = kvm_get_dirty_log_protect(kvm, log, &flush);
 
-	if (is_dirty) {
+	if (flush) {
+		slots = kvm_memslots(kvm);
+		memslot = id_to_memslot(slots, log->slot);
+
+		/* Let implementation handle TLB/GVA invalidation */
+		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
+	}
+
+	mutex_unlock(&kvm->slots_lock);
+	return r;
+}
+
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+{
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
+	bool flush = false;
+	int r;
+
+	mutex_lock(&kvm->slots_lock);
+
+	r = kvm_clear_dirty_log_protect(kvm, log, &flush);
+
+	if (flush) {
 		slots = kvm_memslots(kvm);
 		memslot = id_to_memslot(slots, log->slot);
 
@@ -1099,6 +1112,9 @@
 	case KVM_CAP_MAX_VCPUS:
 		r = KVM_MAX_VCPUS;
 		break;
+	case KVM_CAP_MAX_VCPU_ID:
+		r = KVM_MAX_VCPU_ID;
+		break;
 	case KVM_CAP_MIPS_FPU:
 		/* We don't handle systems with inconsistent cpu_has_fpu */
 		r = !!raw_cpu_has_fpu;
@@ -1700,6 +1716,11 @@
 {
 	int ret;
 
+	if (cpu_has_mmid) {
+		pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
+		return -EOPNOTSUPP;
+	}
+
 	ret = kvm_mips_entry_setup();
 	if (ret)
 		return ret;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index d8dcdb3..97e538a 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -551,7 +551,7 @@
 	       (pte_dirty(old_pte) && !pte_dirty(hva_pte));
 }
 
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
 	unsigned long end = hva + PAGE_SIZE;
 	int ret;
@@ -559,6 +559,7 @@
 	ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
 	if (ret)
 		kvm_mips_callbacks->flush_shadow_all(kvm);
+	return 0;
 }
 
 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 6a0d704..73daa6a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1056,11 +1056,7 @@
 	 */
 	if (current->flags & PF_VCPU) {
 		mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
-		if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
-		    asid_version_mask(cpu))
-			get_new_mmu_context(mm, cpu);
-		write_c0_entryhi(cpu_asid(cpu, mm));
-		TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+		check_switch_mmu_context(mm);
 		kvm_mips_suspend_mm(cpu);
 		ehb();
 	}
@@ -1074,11 +1070,7 @@
 
 	if (current->flags & PF_VCPU) {
 		/* Restore normal Linux process memory map */
-		if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-		     asid_version_mask(cpu)))
-			get_new_mmu_context(current->mm, cpu);
-		write_c0_entryhi(cpu_asid(cpu, current->mm));
-		TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+		check_switch_mmu_context(current->mm);
 		kvm_mips_resume_mm(cpu);
 		ehb();
 	}
@@ -1106,14 +1098,14 @@
 		kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
 		kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
 		for_each_possible_cpu(i) {
-			cpu_context(i, kern_mm) = 0;
-			cpu_context(i, user_mm) = 0;
+			set_cpu_context(i, kern_mm, 0);
+			set_cpu_context(i, user_mm, 0);
 		}
 
 		/* Generate new ASID for current mode */
 		if (reload_asid) {
 			mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
-			get_new_mmu_context(mm, cpu);
+			get_new_mmu_context(mm);
 			htw_stop();
 			write_c0_entryhi(cpu_asid(cpu, mm));
 			TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
@@ -1219,7 +1211,7 @@
 		if (gasid != vcpu->arch.last_user_gasid) {
 			kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
 			for_each_possible_cpu(i)
-				cpu_context(i, user_mm) = 0;
+				set_cpu_context(i, user_mm, 0);
 			vcpu->arch.last_user_gasid = gasid;
 		}
 	}
@@ -1228,9 +1220,7 @@
 	 * Check if ASID is stale. This may happen due to a TLB flush request or
 	 * a lazy user MM invalidation.
 	 */
-	if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
-	    asid_version_mask(cpu))
-		get_new_mmu_context(mm, cpu);
+	check_mmu_context(mm);
 }
 
 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
@@ -1266,11 +1256,7 @@
 	cpu = smp_processor_id();
 
 	/* Restore normal Linux process memory map */
-	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-	     asid_version_mask(cpu)))
-		get_new_mmu_context(current->mm, cpu);
-	write_c0_entryhi(cpu_asid(cpu, current->mm));
-	TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+	check_switch_mmu_context(current->mm);
 	kvm_mips_resume_mm(cpu);
 
 	htw_start();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 7480503..dde2088 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2454,10 +2454,10 @@
 		 * Root ASID dealiases guest GPA mappings in the root TLB.
 		 * Allocate new root ASID if needed.
 		 */
-		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
-		    || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
-						asid_version_mask(cpu))
-			get_new_mmu_context(gpa_mm, cpu);
+		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
+			get_new_mmu_context(gpa_mm);
+		else
+			check_mmu_context(gpa_mm);
 	}
 }