Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index b22a356..3d6a7f5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -19,13 +19,13 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <linux/kvm_host.h>
@@ -39,40 +39,44 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
- { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
- { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
- { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
- { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
- { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
- { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
- { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
- { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
- { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
- { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
- { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
- { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
- { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
- { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
- { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
- { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
- { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+ VCPU_STAT("wait", wait_exits),
+ VCPU_STAT("cache", cache_exits),
+ VCPU_STAT("signal", signal_exits),
+ VCPU_STAT("interrupt", int_exits),
+ VCPU_STAT("cop_unusable", cop_unusable_exits),
+ VCPU_STAT("tlbmod", tlbmod_exits),
+ VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
+ VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
+ VCPU_STAT("addrerr_st", addrerr_st_exits),
+ VCPU_STAT("addrerr_ld", addrerr_ld_exits),
+ VCPU_STAT("syscall", syscall_exits),
+ VCPU_STAT("resvd_inst", resvd_inst_exits),
+ VCPU_STAT("break_inst", break_inst_exits),
+ VCPU_STAT("trap_inst", trap_inst_exits),
+ VCPU_STAT("msa_fpe", msa_fpe_exits),
+ VCPU_STAT("fpe", fpe_exits),
+ VCPU_STAT("msa_disabled", msa_disabled_exits),
+ VCPU_STAT("flush_dcache", flush_dcache_exits),
#ifdef CONFIG_KVM_MIPS_VZ
- { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
- { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
- { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
- { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
- { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
- { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
- { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
- { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
+ VCPU_STAT("vz_gpsi", vz_gpsi_exits),
+ VCPU_STAT("vz_gsfc", vz_gsfc_exits),
+ VCPU_STAT("vz_hc", vz_hc_exits),
+ VCPU_STAT("vz_grr", vz_grr_exits),
+ VCPU_STAT("vz_gva", vz_gva_exits),
+ VCPU_STAT("vz_ghfc", vz_ghfc_exits),
+ VCPU_STAT("vz_gpa", vz_gpa_exits),
+ VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
- { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
+#endif
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
{NULL}
};
@@ -80,13 +84,13 @@
int kvm_guest_mode_change_trace_reg(void)
{
- kvm_trace_guest_mode_change = 1;
+ kvm_trace_guest_mode_change = true;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
- kvm_trace_guest_mode_change = 0;
+ kvm_trace_guest_mode_change = false;
}
/*
@@ -118,16 +122,18 @@
kvm_mips_callbacks->hardware_disable();
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return 0;
}
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
@@ -149,6 +155,10 @@
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
+
return 0;
}
@@ -158,7 +168,7 @@
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_arch_vcpu_free(vcpu);
+ kvm_vcpu_destroy(vcpu);
}
mutex_lock(&kvm->lock);
@@ -190,12 +200,6 @@
return -ENOIOCTLCMD;
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
/* Flush whole GPA */
@@ -232,7 +236,7 @@
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -282,25 +286,42 @@
pr_debug("\tEND(%s)\n", symbol);
}
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ vcpu->arch.wait = 0;
+ rcuwait_wake_up(&vcpu->wait);
+
+ return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err, size;
void *gebase, *p, *handler, *refill_start, *refill_end;
int i;
- struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ kvm_debug("kvm @ %p: create cpu %d at %p\n",
+ vcpu->kvm, vcpu->vcpu_id, vcpu);
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
-
+ err = kvm_mips_callbacks->vcpu_init(vcpu);
if (err)
- goto out_free_cpu;
+ return err;
- kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
/*
* Allocate space for host mode exception handlers that handle
@@ -315,7 +336,7 @@
if (!gebase) {
err = -ENOMEM;
- goto out_uninit_cpu;
+ goto out_uninit_vcpu;
}
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
@@ -394,38 +415,33 @@
vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
- return vcpu;
+ /* Initial guest state */
+ err = kvm_mips_callbacks->vcpu_setup(vcpu);
+ if (err)
+ goto out_free_commpage;
+ return 0;
+
+out_free_commpage:
+ kfree(vcpu->arch.kseg0_commpage);
out_free_gebase:
kfree(gebase);
-
-out_uninit_cpu:
- kvm_vcpu_uninit(vcpu);
-
-out_free_cpu:
- kfree(vcpu);
-
-out:
- return ERR_PTR(err);
+out_uninit_vcpu:
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
+ return err;
}
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
hrtimer_cancel(&vcpu->arch.comparecount_timer);
- kvm_vcpu_uninit(vcpu);
-
kvm_mips_dump_stats(vcpu);
kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase);
kfree(vcpu->arch.kseg0_commpage);
- kfree(vcpu);
-}
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_arch_vcpu_free(vcpu);
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -434,7 +450,7 @@
return -ENOIOCTLCMD;
}
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -444,11 +460,11 @@
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
- kvm_mips_complete_mmio_load(vcpu, run);
+ kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
}
- if (run->immediate_exit)
+ if (vcpu->run->immediate_exit)
goto out;
lose_fpu(1);
@@ -465,7 +481,7 @@
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(run, vcpu);
+ r = kvm_mips_callbacks->vcpu_run(vcpu);
trace_kvm_out(vcpu);
guest_exit_irqoff();
@@ -484,7 +500,10 @@
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
@@ -493,10 +512,10 @@
else
dvcpu = vcpu->kvm->vcpus[irq->cpu];
- if (intr == 2 || intr == 3 || intr == 4) {
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
- } else if (intr == -2 || intr == -3 || intr == -4) {
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
@@ -506,8 +525,7 @@
dvcpu->arch.wait = 0;
- if (swq_has_sleeper(&dvcpu->wq))
- swake_up_one(&dvcpu->wq);
+ rcuwait_wake_up(&dvcpu->wait);
return 0;
}
@@ -973,69 +991,16 @@
return r;
}
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
-
- mutex_unlock(&kvm->slots_lock);
- return r;
}
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_clear_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
-
- mutex_unlock(&kvm->slots_lock);
- return r;
+ /* Let implementation handle TLB/GVA invalidation */
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
}
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
@@ -1214,58 +1179,12 @@
return 0;
}
-static void kvm_mips_comparecount_func(unsigned long data)
-{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
- kvm_mips_callbacks->queue_timer_int(vcpu);
-
- vcpu->arch.wait = 0;
- if (swq_has_sleeper(&vcpu->wq))
- swake_up_one(&vcpu->wq);
-}
-
-/* low level hrtimer wake routine */
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
- kvm_mips_comparecount_func((unsigned long) vcpu);
- return kvm_mips_count_timeout(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- int err;
-
- err = kvm_mips_callbacks->vcpu_init(vcpu);
- if (err)
- return err;
-
- hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
- return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
- kvm_mips_callbacks->vcpu_uninit(vcpu);
-}
-
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return 0;
}
-/* Initial guest state */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- return kvm_mips_callbacks->vcpu_setup(vcpu);
-}
-
static void kvm_mips_set_c0_status(void)
{
u32 status = read_c0_status();
@@ -1280,8 +1199,9 @@
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
@@ -1318,7 +1238,7 @@
* end up causing an exception to be delivered to the Guest
* Kernel
*/
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ er = kvm_mips_check_privilege(cause, opc, vcpu);
if (er == EMULATE_PRIV_FAIL) {
goto skip_emul;
} else if (er == EMULATE_FAIL) {
@@ -1467,7 +1387,7 @@
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- kvm_mips_callbacks->vcpu_reenter(run, vcpu);
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
@@ -1714,6 +1634,34 @@
.notifier_call = kvm_mips_csr_die_notify,
};
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
static int __init kvm_mips_init(void)
{
int ret;
@@ -1732,6 +1680,9 @@
if (ret)
return ret;
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
register_die_notifier(&kvm_mips_csr_die_notifier);
return 0;