summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMichal Luczaj <mhal@rbox.co>2022-10-13 21:12:27 +0000
committerDavid Woodhouse <dwmw@amazon.co.uk>2022-11-30 19:25:23 +0000
commit2a0b128a906ab28b1ab41ceedcaf462b6f74f1aa (patch)
treef9d26c2f45a8ec2e18e515b348b2c051d8241957 /virt
parente308c24a358d1e79951b16c387cbc6c6593639a5 (diff)
downloadlinux-2a0b128a906ab28b1ab41ceedcaf462b6f74f1aa.tar.gz
linux-2a0b128a906ab28b1ab41ceedcaf462b6f74f1aa.tar.xz
KVM: Clean up hva_to_pfn_retry()
Make hva_to_pfn_retry() use kvm instance cached in gfn_to_pfn_cache. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Michal Luczaj <mhal@rbox.co> Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/pfncache.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index decf4fdde668..9d506de6c150 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -138,7 +138,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
return kvm->mmu_invalidate_seq != mmu_seq;
}
-static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
@@ -158,7 +158,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false;
do {
- mmu_seq = kvm->mmu_invalidate_seq;
+ mmu_seq = gpc->kvm->mmu_invalidate_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);
@@ -216,7 +216,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
* attempting to refresh.
*/
WARN_ON_ONCE(gpc->valid);
- } while (mmu_notifier_retry_cache(kvm, mmu_seq));
+ } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
gpc->valid = true;
gpc->pfn = new_pfn;
@@ -294,7 +294,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
* drop the lock and do the HVA to PFN lookup again.
*/
if (!gpc->valid || old_uhva != gpc->uhva) {
- ret = hva_to_pfn_retry(kvm, gpc);
+ ret = hva_to_pfn_retry(gpc);
} else {
/*
* If the HVA→PFN mapping was already valid, don't unmap it.