summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-02-05 13:01:28 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-20 22:48:43 +0100
commit43d2b14b105fb00b8864c7b0ee7043cc1cc4a969 (patch)
treef8326119f330c2ca8d016ac1122841897f25a1bc /arch/x86
parent210f494261e1e84ad1f15877baa1c615afe3b342 (diff)
downloadlinux-0-day-43d2b14b105fb00b8864c7b0ee7043cc1cc4a969.tar.gz
linux-0-day-43d2b14b105fb00b8864c7b0ee7043cc1cc4a969.tar.xz
Revert "KVM: MMU: zap pages in batch"
Unwinding optimizations related to obsolete pages is a step towards removing x86 KVM's fast invalidate mechanism, i.e. this is one part of a revert all patches from the series that introduced the mechanism[1]. This reverts commit e7d11c7a894986a13817c1c001e1e7668c5c4eb4. [1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com Cc: Xiao Guangrong <guangrong.xiao@gmail.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e733262027eda..cb9fd69d26326 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5846,18 +5846,14 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
-#define BATCH_ZAP_PAGES 10
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
- int batch = 0;
restart:
list_for_each_entry_safe_reverse(sp, node,
&kvm->arch.active_mmu_pages, link) {
- int ret;
-
/*
* No obsolete page exists before new created page since
* active_mmu_pages is the FIFO list.
@@ -5866,6 +5862,28 @@ restart:
break;
/*
+ * Do not repeatedly zap a root page to avoid unnecessary
+ * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+ * progress:
+ * vcpu 0 vcpu 1
+ * call vcpu_enter_guest():
+ * 1): handle KVM_REQ_MMU_RELOAD
+ * and require mmu-lock to
+ * load mmu
+ * repeat:
+ * 1): zap root page and
+ * send KVM_REQ_MMU_RELOAD
+ *
+ * 2): if (cond_resched_lock(mmu-lock))
+ *
+ * 2): hold mmu-lock and load mmu
+ *
+ * 3): see KVM_REQ_MMU_RELOAD bit
+ * on vcpu->requests is set
+ * then return 1 to call
+ * vcpu_enter_guest() again.
+ * goto repeat;
+ *
* Since we are reversely walking the list and the invalid
* list will be moved to the head, skip the invalid page
* can help us to avoid the infinity list walking.
@@ -5873,18 +5891,13 @@ restart:
if (sp->role.invalid)
continue;
- if (batch >= BATCH_ZAP_PAGES &&
- (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
- batch = 0;
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock);
goto restart;
}
- ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- batch += ret;
-
- if (ret)
+ if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
}