summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-02-05 13:01:21 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-20 22:48:39 +0100
commit4e103134b862314dc2f2f18f2fb0ab972adc3f5f (patch)
treedccc458d31608d0d922d8af8265bdf938dc35e23 /arch/x86
parenta21136345cb6f1a5b7f576701b6a454da5b6e606 (diff)
downloadlinux-0-day-4e103134b862314dc2f2f18f2fb0ab972adc3f5f.tar.gz
linux-0-day-4e103134b862314dc2f2f18f2fb0ab972adc3f5f.tar.xz
KVM: x86/mmu: Zap only the relevant pages when removing a memslot
Modify kvm_mmu_invalidate_zap_pages_in_memslot(), a.k.a. the x86 MMU's handler for kvm_arch_flush_shadow_memslot(), to zap only the pages/PTEs that actually belong to the memslot being removed. This improves performance, especially why the deleted memslot has only a few shadow entries, or even no entries. E.g. a microbenchmark to access regular memory while concurrently reading PCI ROM to trigger memslot deletion showed a 5% improvement in throughput. Cc: Xiao Guangrong <guangrong.xiao@gmail.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1cce120f06ae0..b81e2cad0237a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5622,7 +5622,38 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ struct kvm_mmu_page *sp;
+ LIST_HEAD(invalid_list);
+ unsigned long i;
+ bool flush;
+ gfn_t gfn;
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (list_empty(&kvm->arch.active_mmu_pages))
+ goto out_unlock;
+
+ flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
+
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+
+ for_each_valid_sp(kvm, sp, gfn) {
+ if (sp->gfn != gfn)
+ continue;
+
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ }
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+ flush = false;
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_init_vm(struct kvm *kvm)