summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-03-11 09:56:44 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2024-03-11 09:56:54 -0400
commit233d0bc4d81ca1abf60158bd39b90be97d85840a (patch)
tree9efd8e6873052553912f08964c207dd48773d17a /arch/loongarch/kvm
parent7d8942d8e73843de35b3737b8be50f8fef6796bb (diff)
parentb99f783106ea5b2f8c9d74f4d3b1e2f77af9ec6e (diff)
downloadlinux-233d0bc4d81ca1abf60158bd39b90be97d85840a.tar.gz
linux-233d0bc4d81ca1abf60158bd39b90be97d85840a.tar.bz2
linux-233d0bc4d81ca1abf60158bd39b90be97d85840a.zip
Merge tag 'loongarch-kvm-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD
LoongArch KVM changes for v6.9 * Set reserved bits as zero in CPUCFG. * Start SW timer only when vcpu is blocking. * Do not restart SW timer when it is expired. * Remove unnecessary CSR register saving during enter guest.
Diffstat (limited to 'arch/loongarch/kvm')
-rw-r--r--arch/loongarch/kvm/switch.S6
-rw-r--r--arch/loongarch/kvm/timer.c43
-rw-r--r--arch/loongarch/kvm/vcpu.c106
3 files changed, 72 insertions, 83 deletions
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index ba976509bfe8..3634431db18a 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -213,12 +213,6 @@ SYM_FUNC_START(kvm_enter_guest)
/* Save host GPRs */
kvm_save_host_gpr a2
- /* Save host CRMD, PRMD to stack */
- csrrd a3, LOONGARCH_CSR_CRMD
- st.d a3, a2, PT_CRMD
- csrrd a3, LOONGARCH_CSR_PRMD
- st.d a3, a2, PT_PRMD
-
addi.d a2, a1, KVM_VCPU_ARCH
st.d sp, a2, KVM_ARCH_HSP
st.d tp, a2, KVM_ARCH_HTP
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index 111328f60872..bcc6b6d063d9 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
}
-/*
- * Push timer forward on timeout.
- * Handle an hrtimer event by push the hrtimer forward a period.
- */
-static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
-{
- unsigned long cfg, period;
-
- /* Add periodic tick to current expire time */
- cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
- if (cfg & CSR_TCFG_PERIOD) {
- period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
- hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
- return HRTIMER_RESTART;
- } else
- return HRTIMER_NORESTART;
-}
-
/* Low level hrtimer wake routine */
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
{
@@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
kvm_queue_irq(vcpu, INT_TI);
rcuwait_wake_up(&vcpu->wait);
- return kvm_count_timeout(vcpu);
+ return HRTIMER_NORESTART;
}
/*
@@ -93,7 +75,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
/*
* Freeze the soft-timer and sync the guest stable timer with it.
*/
- hrtimer_cancel(&vcpu->arch.swtimer);
+ if (kvm_vcpu_is_blocking(vcpu))
+ hrtimer_cancel(&vcpu->arch.swtimer);
/*
* From LoongArch Reference Manual Volume 1 Chapter 7.6.2
@@ -168,26 +151,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
* Here judge one-shot timer fired by checking whether TVAL is larger
* than TCFG
*/
- if (ticks < cfg) {
+ if (ticks < cfg)
delta = tick_to_ns(vcpu, ticks);
- expire = ktime_add_ns(ktime_get(), delta);
- vcpu->arch.expire = expire;
+ else
+ delta = 0;
+
+ expire = ktime_add_ns(ktime_get(), delta);
+ vcpu->arch.expire = expire;
+ if (kvm_vcpu_is_blocking(vcpu)) {
/*
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
* the same physical cpu in next time
*/
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
- } else if (vcpu->stat.generic.blocking) {
- /*
- * Inject timer interrupt so that halt polling can dectect and exit.
- * VCPU is scheduled out already and sleeps in rcuwait queue and
- * will not poll pending events again. kvm_queue_irq() is not enough,
- * hrtimer swtimer should be used here.
- */
- expire = ktime_add_ns(ktime_get(), 10);
- vcpu->arch.expire = expire;
- hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
}
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 27701991886d..3a8779065f73 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -298,74 +298,92 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
return ret;
}
-static int _kvm_get_cpucfg(int id, u64 *v)
+static int _kvm_get_cpucfg_mask(int id, u64 *v)
{
- int ret = 0;
-
- if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
+ if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
return -EINVAL;
switch (id) {
- case 2:
- /* Return CPUCFG2 features which have been supported by KVM */
+ case LOONGARCH_CPUCFG0:
+ *v = GENMASK(31, 0);
+ return 0;
+ case LOONGARCH_CPUCFG1:
+ /* CPUCFG1_MSGINT is not supported by KVM */
+ *v = GENMASK(25, 0);
+ return 0;
+ case LOONGARCH_CPUCFG2:
+ /* CPUCFG2 features unconditionally supported by KVM */
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
- CPUCFG2_LAM;
+ CPUCFG2_LSPW | CPUCFG2_LAM;
/*
- * If LSX is supported by CPU, it is also supported by KVM,
- * as we implement it.
+ * For the ISA extensions listed below, if one is supported
+ * by the host, then it is also supported by KVM.
*/
if (cpu_has_lsx)
*v |= CPUCFG2_LSX;
- /*
- * if LASX is supported by CPU, it is also supported by KVM,
- * as we implement it.
- */
if (cpu_has_lasx)
*v |= CPUCFG2_LASX;
- break;
+ return 0;
+ case LOONGARCH_CPUCFG3:
+ *v = GENMASK(16, 0);
+ return 0;
+ case LOONGARCH_CPUCFG4:
+ case LOONGARCH_CPUCFG5:
+ *v = GENMASK(31, 0);
+ return 0;
+ case LOONGARCH_CPUCFG16:
+ *v = GENMASK(16, 0);
+ return 0;
+ case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
+ *v = GENMASK(30, 0);
+ return 0;
default:
- ret = -EINVAL;
- break;
+ /*
+ * CPUCFG bits should be zero if reserved by HW or not
+ * supported by KVM.
+ */
+ *v = 0;
+ return 0;
}
- return ret;
}
static int kvm_check_cpucfg(int id, u64 val)
{
- u64 mask;
- int ret = 0;
-
- if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
- return -EINVAL;
+ int ret;
+ u64 mask = 0;
- if (_kvm_get_cpucfg(id, &mask))
+ ret = _kvm_get_cpucfg_mask(id, &mask);
+ if (ret)
return ret;
+ if (val & ~mask)
+ /* Unsupported features and/or the higher 32 bits should not be set */
+ return -EINVAL;
+
switch (id) {
- case 2:
- /* CPUCFG2 features checking */
- if (val & ~mask)
- /* The unsupported features should not be set */
- ret = -EINVAL;
- else if (!(val & CPUCFG2_LLFTP))
- /* The LLFTP must be set, as guest must has a constant timer */
- ret = -EINVAL;
- else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
- /* Single and double float point must both be set when enable FP */
- ret = -EINVAL;
- else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
- /* FP should be set when enable LSX */
- ret = -EINVAL;
- else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
- /* LSX, FP should be set when enable LASX, and FP has been checked before. */
- ret = -EINVAL;
- break;
+ case LOONGARCH_CPUCFG2:
+ if (!(val & CPUCFG2_LLFTP))
+ /* Guests must have a constant timer */
+ return -EINVAL;
+ if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
+ /* Single and double float point must both be set when FP is enabled */
+ return -EINVAL;
+ if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
+ /* LSX architecturally implies FP but val does not satisfy that */
+ return -EINVAL;
+ if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
+ /* LASX architecturally implies LSX and FP but val does not satisfy that */
+ return -EINVAL;
+ return 0;
default:
- break;
+ /*
+ * Values for the other CPUCFG IDs are not being further validated
+ * besides the mask check above.
+ */
+ return 0;
}
- return ret;
}
static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
@@ -566,7 +584,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
- ret = _kvm_get_cpucfg(attr->attr, &val);
+ ret = _kvm_get_cpucfg_mask(attr->attr, &val);
if (ret)
return ret;