aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2025-08-05 12:05:25 -0700
committerSean Christopherson <seanjc@google.com>2025-08-19 11:59:42 -0700
commit8bb8b60c95c55c13f9924f3f090232e14d035d43 (patch)
tree1287e1b9a388eab3a8284d227491635cef88d63a /arch/x86/kvm/x86.c
parentKVM: x86/pmu: Rename check_pmu_event_filter() to pmc_is_event_allowed() (diff)
downloadlinux-8bb8b60c95c55c13f9924f3f090232e14d035d43.tar.gz
linux-8bb8b60c95c55c13f9924f3f090232e14d035d43.zip
KVM: x86: Push acquisition of SRCU in fastpath into kvm_pmu_trigger_event()
Acquire SRCU in the VM-Exit fastpath if and only if KVM needs to check the PMU event filter, to further trim the amount of code that is executed with SRCU protection in the fastpath. Counter-intuitively, holding SRCU can do more harm than good due to masking potential bugs, and introducing a new SRCU-protected asset to code reachable via kvm_skip_emulated_instruction() would be quite notable, i.e. definitely worth auditing. E.g. the primary user of kvm->srcu is KVM's memslots, accessing memslots all but guarantees guest memory may be accessed, accessing guest memory can fault, and page faults might sleep, which isn't allowed while IRQs are disabled. Not acquiring SRCU means the (hypothetical) illegal sleep would be flagged when running with PROVE_RCU=y, even if DEBUG_ATOMIC_SLEEP=n. Note, performance is NOT a motivating factor, as SRCU lock/unlock only adds ~15 cycles of latency to fastpath VM-Exits. I.e. overhead isn't a concern _if_ SRCU protection needs to be extended beyond PMU events, e.g. to honor userspace MSR filters. Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250805190526.1453366-18-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c44d3d64270b..093bfc8d00b3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2138,7 +2138,6 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u64 data = kvm_read_edx_eax(vcpu);
u32 msr = kvm_rcx_read(vcpu);
- int r;
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
@@ -2153,13 +2152,12 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}
- kvm_vcpu_srcu_read_lock(vcpu);
- r = kvm_skip_emulated_instruction(vcpu);
- kvm_vcpu_srcu_read_unlock(vcpu);
-
trace_kvm_msr_write(msr, data);
- return r ? EXIT_FASTPATH_REENTER_GUEST : EXIT_FASTPATH_EXIT_USERSPACE;
+ if (!kvm_skip_emulated_instruction(vcpu))
+ return EXIT_FASTPATH_EXIT_USERSPACE;
+
+ return EXIT_FASTPATH_REENTER_GUEST;
}
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
@@ -11254,13 +11252,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_halt);
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
{
- int ret;
-
- kvm_vcpu_srcu_read_lock(vcpu);
- ret = kvm_emulate_halt(vcpu);
- kvm_vcpu_srcu_read_unlock(vcpu);
-
- if (!ret)
+ if (!kvm_emulate_halt(vcpu))
return EXIT_FASTPATH_EXIT_USERSPACE;
if (kvm_vcpu_running(vcpu))