diff options
author | Sean Christopherson <seanjc@google.com> | 2021-10-08 19:12:07 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-12-08 04:24:51 -0500 |
commit | fac4268894394213127e43856f41d10f29131e69 (patch) | |
tree | 27e741d4283ea10e9b6d131cc753be9e0ee491a3 /virt | |
parent | 91b99ea7065786d0bff1c9281b002455dbaeb08b (diff) | |
download | linux-fac4268894394213127e43856f41d10f29131e69.tar.gz linux-fac4268894394213127e43856f41d10f29131e69.tar.bz2 linux-fac4268894394213127e43856f41d10f29131e69.zip |
KVM: Split out a kvm_vcpu_block() helper from kvm_vcpu_halt()
Factor out the "block" part of kvm_vcpu_halt() so that x86 can emulate
non-halt wait/sleep/block conditions that should not be subjected to
halt-polling.
No functional change intended.
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-15-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 52 |
1 files changed, 36 insertions, 16 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0d301c95fa1a..370b95ad5f03 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3272,6 +3272,35 @@ out: return ret; } +/* + * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is + * pending. This is mostly used when halting a vCPU, but may also be used + * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. + */ +bool kvm_vcpu_block(struct kvm_vcpu *vcpu) +{ + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + bool waited = false; + + kvm_arch_vcpu_blocking(vcpu); + + prepare_to_rcuwait(wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (kvm_vcpu_check_block(vcpu) < 0) + break; + + waited = true; + schedule(); + } + finish_rcuwait(wait); + + kvm_arch_vcpu_unblocking(vcpu); + + return waited; +} + static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) { @@ -3294,9 +3323,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } +/* + * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt + * polling is enabled, busy wait for a short time before blocking to avoid the + * expensive block+unblock sequence if a wake event arrives soon after the vCPU + * is halted. + */ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; @@ -3319,21 +3353,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) } while (kvm_vcpu_can_poll(cur, stop)); } - kvm_arch_vcpu_blocking(vcpu); - - prepare_to_rcuwait(wait); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - - if (kvm_vcpu_check_block(vcpu) < 0) - break; - - waited = true; - schedule(); - } - finish_rcuwait(wait); - - kvm_arch_vcpu_unblocking(vcpu); + waited = kvm_vcpu_block(vcpu); cur = ktime_get(); if (waited) { |