From b3ae2096974b12c3af2ad1a4e7716b084949867f Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 2 Jul 2012 17:56:33 +0900 Subject: KVM: Introduce kvm_unmap_hva_range() for kvm_mmu_notifier_invalidate_range_start() When we tested KVM under memory pressure, with THP enabled on the host, we noticed that MMU notifier took a long time to invalidate huge pages. Since the invalidation was done with mmu_lock held, it not only wasted the CPU but also made the host harder to respond. This patch mitigates this by using kvm_handle_hva_range(). Signed-off-by: Takuya Yoshikawa Cc: Alexander Graf Cc: Paul Mackerras Signed-off-by: Marcelo Tosatti --- virt/kvm/kvm_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b3ce91c623e2..e2b1a159e5df 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -332,8 +332,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - for (; start < end; start += PAGE_SIZE) - need_tlb_flush |= kvm_unmap_hva(kvm, start); + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) -- cgit v1.2.3 From 903816fa4d016e20ec71a1a97700cfcdda115580 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 17 Jul 2012 21:54:11 +0800 Subject: KVM: using get_fault_pfn to get the fault pfn Using get_fault_pfn to cleanup the code Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 6 ++---- include/linux/kvm_host.h | 5 +---- virt/kvm/kvm_main.c | 13 ++++--------- 3 files changed, 7 insertions(+), 17 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 685a48557389..f85cc21ae95d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2513,10 +2513,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, unsigned long hva; slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); - if (!slot) { - get_page(fault_page); - return page_to_pfn(fault_page); - } + if (!slot) + return get_fault_pfn(); hva = gfn_to_hva_memslot(slot, gfn); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6f6c18a03c50..1a7f838d30c6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -383,15 +383,11 @@ id_to_memslot(struct kvm_memslots *slots, int id) static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } extern struct page *bad_page; -extern struct page *fault_page; - extern pfn_t bad_pfn; -extern pfn_t fault_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); int is_hwpoison_pfn(pfn_t pfn); -int is_fault_pfn(pfn_t pfn); int is_noslot_pfn(pfn_t pfn); int is_invalid_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); @@ -441,6 +437,7 @@ void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_accessed(pfn_t pfn); void kvm_get_pfn(pfn_t pfn); +pfn_t get_fault_pfn(void); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e2b1a159e5df..0fbbf2d21603 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -103,8 +103,8 @@ static bool largepages_enabled = true; static struct page *hwpoison_page; static pfn_t hwpoison_pfn; -struct page *fault_page; -pfn_t fault_pfn; +static struct page *fault_page; +static pfn_t fault_pfn; inline int kvm_is_mmio_pfn(pfn_t pfn) { @@ -949,12 +949,6 @@ int is_hwpoison_pfn(pfn_t pfn) } EXPORT_SYMBOL_GPL(is_hwpoison_pfn); -int is_fault_pfn(pfn_t pfn) -{ - return pfn == fault_pfn; -} -EXPORT_SYMBOL_GPL(is_fault_pfn); - int is_noslot_pfn(pfn_t pfn) { return pfn == bad_pfn; @@ -1038,11 +1032,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_hva); -static pfn_t get_fault_pfn(void) +pfn_t get_fault_pfn(void) { get_page(fault_page); return fault_pfn; } +EXPORT_SYMBOL_GPL(get_fault_pfn); int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int write, struct page **page) -- cgit v1.2.3 From ca0565f5736e67af3172d188577b57e303dd082a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 17 Jul 2012 21:54:52 +0800 Subject: KVM: make bad_pfn static to kvm_main.c bad_pfn is not used out of kvm_main.c, so mark it static, also move it near hwpoison_pfn and fault_pfn Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1a7f838d30c6..5f956cde1374 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -383,7 +383,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } extern struct page *bad_page; -extern pfn_t bad_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0fbbf2d21603..f955eee92aa9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -100,6 +100,9 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; +struct page *bad_page; +static pfn_t bad_pfn; + static struct page *hwpoison_page; static pfn_t hwpoison_pfn; @@ -2691,9 +2694,6 @@ static struct syscore_ops kvm_syscore_ops = { .resume = kvm_resume, }; -struct page *bad_page; -pfn_t bad_pfn; - static inline struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) { -- cgit v1.2.3 From d566104853361cc377c61f70e41c1ad3d44b86c6 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 17 Jul 2012 21:56:16 +0800 Subject: KVM: remove the unused parameter of gfn_to_pfn_memslot The parameter, 'kvm', is not used in gfn_to_pfn_memslot, we can happily remove it Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/e500_tlb.c | 2 +- arch/x86/kvm/mmu.c | 2 +- include/linux/kvm_host.h | 5 ++--- virt/kvm/iommu.c | 10 +++++----- virt/kvm/kvm_main.c | 15 +++++++-------- 5 files changed, 16 insertions(+), 18 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index c510fc961302..c8f6c5826742 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -520,7 +520,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (likely(!pfnmap)) { unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); - pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); + pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", (long)gfn); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f85cc21ae95d..4f77f7ac6d25 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2518,7 +2518,7 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, hva = gfn_to_hva_memslot(slot, gfn); - return hva_to_pfn_atomic(vcpu->kvm, hva); + return hva_to_pfn_atomic(hva); } static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e8d13a072d24..db9aa917840a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -418,15 +418,14 @@ void kvm_release_page_dirty(struct page *page); void kvm_set_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); -pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); +pfn_t hva_to_pfn_atomic(unsigned long addr); pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, bool write_fault, bool *writable); pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); -pfn_t gfn_to_pfn_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn); +pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_release_pfn_dirty(pfn_t); void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index e9fff9830bf0..c03f1fb26701 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -42,13 +42,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); -static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, unsigned long size) +static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, + unsigned long size) { gfn_t end_gfn; pfn_t pfn; - pfn = gfn_to_pfn_memslot(kvm, slot, gfn); + pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; @@ -56,7 +56,7 @@ static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, return pfn; while (gfn < end_gfn) - gfn_to_pfn_memslot(kvm, slot, gfn++); + gfn_to_pfn_memslot(slot, gfn++); return pfn; } @@ -105,7 +105,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ - pfn = kvm_pin_pages(kvm, slot, gfn, page_size); + pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_pfn(pfn)) { gfn += 1; continue; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f955eee92aa9..68dda513cd72 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1062,8 +1062,8 @@ static inline int check_user_page_hwpoison(unsigned long addr) return rc == -EHWPOISON; } -static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, - bool *async, bool write_fault, bool *writable) +static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable) { struct page *page[1]; int npages = 0; @@ -1143,9 +1143,9 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, return pfn; } -pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) +pfn_t hva_to_pfn_atomic(unsigned long addr) { - return hva_to_pfn(kvm, addr, true, NULL, true, NULL); + return hva_to_pfn(addr, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); @@ -1163,7 +1163,7 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, return page_to_pfn(bad_page); } - return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); + return hva_to_pfn(addr, atomic, async, write_fault, writable); } pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) @@ -1192,11 +1192,10 @@ pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); -pfn_t gfn_to_pfn_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn) +pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { unsigned long addr = gfn_to_hva_memslot(slot, gfn); - return hva_to_pfn(kvm, addr, false, NULL, true, NULL); + return hva_to_pfn(addr, false, NULL, true, NULL); } int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, -- cgit v1.2.3 From 4c088493c8d07e4e27bad53a99dcfdc14cdf45f8 Mon Sep 17 00:00:00 2001 From: Raghavendra K T Date: Wed, 18 Jul 2012 19:07:46 +0530 Subject: KVM: Note down when cpu relax intercepted or pause loop exited Noting pause loop exited vcpu or cpu relax intercepted helps in filtering right candidate to yield. Wrong selection of vcpu; i.e., a vcpu that just did a pl-exit or cpu relax intercepted may contribute to performance degradation. Signed-off-by: Raghavendra K T Reviewed-by: Marcelo Tosatti Reviewed-by: Rik van Riel Tested-by: Christian Borntraeger # on s390x Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 34 ++++++++++++++++++++++++++++++++++ virt/kvm/kvm_main.c | 5 +++++ 2 files changed, 39 insertions(+) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index db9aa917840a..361b36fe7ecc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -183,6 +183,18 @@ struct kvm_vcpu { } async_pf; #endif +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT + /* + * Cpu relax intercept or pause loop exit optimization + * in_spin_loop: set when a vcpu does a pause loop exit + * or cpu relax intercepted. + * dy_eligible: indicates whether vcpu is eligible for directed yield. + */ + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; +#endif struct kvm_vcpu_arch arch; }; @@ -898,5 +910,27 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) } } +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT + +static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) +{ + vcpu->spin_loop.in_spin_loop = val; +} +static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) +{ + vcpu->spin_loop.dy_eligible = val; +} + +#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ + +static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) +{ +} + +static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) +{ +} + +#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 68dda513cd72..0892b75eeedd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -239,6 +239,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) } vcpu->run = page_address(page); + kvm_vcpu_set_in_spin_loop(vcpu, false); + kvm_vcpu_set_dy_eligible(vcpu, false); + r = kvm_arch_vcpu_init(vcpu); if (r < 0) goto fail_free_run; @@ -1585,6 +1588,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) int pass; int i; + kvm_vcpu_set_in_spin_loop(me, true); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something @@ -1610,6 +1614,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) } } } + kvm_vcpu_set_in_spin_loop(me, false); } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); -- cgit v1.2.3 From 06e48c510aa37f6e791602e6420422ea7071fe94 Mon Sep 17 00:00:00 2001 From: Raghavendra K T Date: Thu, 19 Jul 2012 15:17:52 +0530 Subject: KVM: Choose better candidate for directed yield Currently, on a large vcpu guests, there is a high probability of yielding to the same vcpu who had recently done a pause-loop exit or cpu relax intercepted. Such a yield can lead to the vcpu spinning again and hence degrade the performance. The patchset keeps track of the pause loop exit/cpu relax interception and gives chance to a vcpu which: (a) Has not done pause loop exit or cpu relax intercepted at all (probably he is preempted lock-holder) (b) Was skipped in last iteration because it did pause loop exit or cpu relax intercepted, and probably has become eligible now (next eligible lock holder) Signed-off-by: Raghavendra K T Reviewed-by: Marcelo Tosatti Reviewed-by: Rik van Riel Tested-by: Christian Borntraeger # on s390x Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 5 +++++ virt/kvm/kvm_main.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 361b36fe7ecc..74a78d09c454 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -931,6 +931,11 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } +static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) +{ + return true; +} + #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0892b75eeedd..1e10ebe1a370 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1579,6 +1579,43 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) } EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT +/* + * Helper that checks whether a VCPU is eligible for directed yield. + * Most eligible candidate to yield is decided by following heuristics: + * + * (a) VCPU which has not done pl-exit or cpu relax intercepted recently + * (preempted lock holder), indicated by @in_spin_loop. + * Set at the beiginning and cleared at the end of interception/PLE handler. + * + * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get + * chance last time (mostly it has become eligible now since we have probably + * yielded to lockholder in last iteration. This is done by toggling + * @dy_eligible each time a VCPU checked for eligibility.) + * + * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding + * to preempted lock-holder could result in wrong VCPU selection and CPU + * burning. Giving priority for a potential lock-holder increases lock + * progress. + * + * Since algorithm is based on heuristics, accessing another VCPU data without + * locking does not harm. It may result in trying to yield to same VCPU, fail + * and continue with next VCPU and so on. + */ +bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) +{ + bool eligible; + + eligible = !vcpu->spin_loop.in_spin_loop || + (vcpu->spin_loop.in_spin_loop && + vcpu->spin_loop.dy_eligible); + + if (vcpu->spin_loop.in_spin_loop) + kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); + + return eligible; +} +#endif void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; @@ -1607,6 +1644,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (waitqueue_active(&vcpu->wq)) continue; + if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) + continue; if (kvm_vcpu_yield_to(vcpu)) { kvm->last_boosted_vcpu = i; yielded = 1; @@ -1615,6 +1654,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) } } kvm_vcpu_set_in_spin_loop(me, false); + + /* Ensure vcpu is not eligible during next spinloop */ + kvm_vcpu_set_dy_eligible(me, false); } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); -- cgit v1.2.3 From a2766325cf9f9e36d1225145f1ce1b066f001837 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 26 Jul 2012 11:58:59 +0800 Subject: KVM: remove dummy pages Currently, kvm allocates some pages and use them as error indicators, it wastes memory and is not good for scalability Base on Avi's suggestion, we use the error codes instead of these pages to indicate the error conditions Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 3 +- virt/kvm/async_pf.c | 3 +- virt/kvm/kvm_main.c | 121 +++++++++++++++++++---------------------------- 3 files changed, 52 insertions(+), 75 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1993eb1cb2cd..4e60d3695e4e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -423,6 +423,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm); int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int nr_pages); +struct page *get_bad_page(void); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); void kvm_release_page_clean(struct page *page); @@ -576,7 +577,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -int kvm_is_mmio_pfn(pfn_t pfn); +bool kvm_is_mmio_pfn(pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index ebae24b62c90..79722782d9d7 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -203,8 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) if (!work) return -ENOMEM; - work->page = bad_page; - get_page(bad_page); + work->page = get_bad_page(); INIT_LIST_HEAD(&work->queue); /* for list_del to work */ spin_lock(&vcpu->async_pf.lock); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0014ee99dc7f..de89497fe4c7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -100,17 +100,11 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; -struct page *bad_page; -static pfn_t bad_pfn; - -static struct page *hwpoison_page; -static pfn_t hwpoison_pfn; - -static struct page *fault_page; -static pfn_t fault_pfn; - -inline int kvm_is_mmio_pfn(pfn_t pfn) +bool kvm_is_mmio_pfn(pfn_t pfn) { + if (is_error_pfn(pfn)) + return false; + if (pfn_valid(pfn)) { int reserved; struct page *tail = pfn_to_page(pfn); @@ -939,34 +933,55 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages); int is_error_page(struct page *page) { - return page == bad_page || page == hwpoison_page || page == fault_page; + return IS_ERR(page); } EXPORT_SYMBOL_GPL(is_error_page); int is_error_pfn(pfn_t pfn) { - return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; + return IS_ERR_VALUE(pfn); } EXPORT_SYMBOL_GPL(is_error_pfn); +static pfn_t get_bad_pfn(void) +{ + return -ENOENT; +} + +pfn_t get_fault_pfn(void) +{ + return -EFAULT; +} +EXPORT_SYMBOL_GPL(get_fault_pfn); + +static pfn_t get_hwpoison_pfn(void) +{ + return -EHWPOISON; +} + int is_hwpoison_pfn(pfn_t pfn) { - return pfn == hwpoison_pfn; + return pfn == -EHWPOISON; } EXPORT_SYMBOL_GPL(is_hwpoison_pfn); int is_noslot_pfn(pfn_t pfn) { - return pfn == bad_pfn; + return pfn == -ENOENT; } EXPORT_SYMBOL_GPL(is_noslot_pfn); int is_invalid_pfn(pfn_t pfn) { - return pfn == hwpoison_pfn || pfn == fault_pfn; + return !is_noslot_pfn(pfn) && is_error_pfn(pfn); } EXPORT_SYMBOL_GPL(is_invalid_pfn); +struct page *get_bad_page(void) +{ + return ERR_PTR(-ENOENT); +} + static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -1038,13 +1053,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_hva); -pfn_t get_fault_pfn(void) -{ - get_page(fault_page); - return fault_pfn; -} -EXPORT_SYMBOL_GPL(get_fault_pfn); - int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int write, struct page **page) { @@ -1122,8 +1130,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { up_read(¤t->mm->mmap_sem); - get_page(hwpoison_page); - return page_to_pfn(hwpoison_page); + return get_hwpoison_pfn(); } vma = find_vma_intersection(current->mm, addr, addr+1); @@ -1161,10 +1168,8 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, *async = false; addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) { - get_page(bad_page); - return page_to_pfn(bad_page); - } + if (kvm_is_error_hva(addr)) + return get_bad_pfn(); return hva_to_pfn(addr, atomic, async, write_fault, writable); } @@ -1218,37 +1223,45 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, } EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); +static struct page *kvm_pfn_to_page(pfn_t pfn) +{ + WARN_ON(kvm_is_mmio_pfn(pfn)); + + if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) + return get_bad_page(); + + return pfn_to_page(pfn); +} + struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { pfn_t pfn; pfn = gfn_to_pfn(kvm, gfn); - if (!kvm_is_mmio_pfn(pfn)) - return pfn_to_page(pfn); - - WARN_ON(kvm_is_mmio_pfn(pfn)); - get_page(bad_page); - return bad_page; + return kvm_pfn_to_page(pfn); } EXPORT_SYMBOL_GPL(gfn_to_page); void kvm_release_page_clean(struct page *page) { - kvm_release_pfn_clean(page_to_pfn(page)); + if (!is_error_page(page)) + kvm_release_pfn_clean(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); void kvm_release_page_dirty(struct page *page) { + WARN_ON(is_error_page(page)); + kvm_release_pfn_dirty(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_dirty); @@ -2771,33 +2784,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (r) goto out_fail; - bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - - if (bad_page == NULL) { - r = -ENOMEM; - goto out; - } - - bad_pfn = page_to_pfn(bad_page); - - hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - - if (hwpoison_page == NULL) { - r = -ENOMEM; - goto out_free_0; - } - - hwpoison_pfn = page_to_pfn(hwpoison_page); - - fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - - if (fault_page == NULL) { - r = -ENOMEM; - goto out_free_0; - } - - fault_pfn = page_to_pfn(fault_page); - if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; @@ -2872,12 +2858,6 @@ out_free_1: out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: - if (fault_page) - __free_page(fault_page); - if (hwpoison_page) - __free_page(hwpoison_page); - __free_page(bad_page); -out: kvm_arch_exit(); out_fail: return r; @@ -2897,8 +2877,5 @@ void kvm_exit(void) kvm_arch_hardware_unsetup(); kvm_arch_exit(); free_cpumask_var(cpus_hardware_enabled); - __free_page(fault_page); - __free_page(hwpoison_page); - __free_page(bad_page); } EXPORT_SYMBOL_GPL(kvm_exit); -- cgit v1.2.3 From 23d43cf998275bc97437931c0cdee1df2c1aa3ca Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 24 Jul 2012 08:51:20 -0400 Subject: KVM: Move KVM_IRQ_LINE to arch-generic code Handle KVM_IRQ_LINE and KVM_IRQ_LINE_STATUS in the generic kvm_vm_ioctl() function and call into kvm_vm_ioctl_irq_line(). This is even more relevant when KVM/ARM also uses this ioctl. Signed-off-by: Christoffer Dall Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 33 ++++++++++----------------------- arch/x86/kvm/x86.c | 33 ++++++++++----------------------- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 23 +++++++++++++++++++++++ 4 files changed, 44 insertions(+), 46 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index bd77cb507c1c..eac65380bd20 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -924,6 +924,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return 0; } +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event) +{ + if (!irqchip_in_kernel(kvm)) + return -ENXIO; + + irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event->irq, irq_event->level); + return 0; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -963,29 +973,6 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; } break; - case KVM_IRQ_LINE_STATUS: - case KVM_IRQ_LINE: { - struct kvm_irq_level irq_event; - - r = -EFAULT; - if (copy_from_user(&irq_event, argp, sizeof irq_event)) - goto out; - r = -ENXIO; - if (irqchip_in_kernel(kvm)) { - __s32 status; - status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event.irq, irq_event.level); - if (ioctl == KVM_IRQ_LINE_STATUS) { - r = -EFAULT; - irq_event.status = status; - if (copy_to_user(argp, &irq_event, - sizeof irq_event)) - goto out; - } - r = 0; - } - break; - } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d9d08edbf29..b6379e55ee27 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3165,6 +3165,16 @@ out: return r; } +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event) +{ + if (!irqchip_in_kernel(kvm)) + return -ENXIO; + + irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event->irq, irq_event->level); + return 0; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -3271,29 +3281,6 @@ long kvm_arch_vm_ioctl(struct file *filp, create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; - case KVM_IRQ_LINE_STATUS: - case KVM_IRQ_LINE: { - struct kvm_irq_level irq_event; - - r = -EFAULT; - if (copy_from_user(&irq_event, argp, sizeof irq_event)) - goto out; - r = -ENXIO; - if (irqchip_in_kernel(kvm)) { - __s32 status; - status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event.irq, irq_event.level); - if (ioctl == KVM_IRQ_LINE_STATUS) { - r = -EFAULT; - irq_event.status = status; - if (copy_to_user(argp, &irq_event, - sizeof irq_event)) - goto out; - } - r = 0; - } - break; - } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4e60d3695e4e..dbc65f9d6a2b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -498,6 +498,7 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc); +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index de89497fe4c7..bcf973ec98ff 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2148,6 +2148,29 @@ static long kvm_vm_ioctl(struct file *filp, r = kvm_send_userspace_msi(kvm, &msi); break; } +#endif +#ifdef __KVM_HAVE_IRQ_LINE + case KVM_IRQ_LINE_STATUS: + case KVM_IRQ_LINE: { + struct kvm_irq_level irq_event; + + r = -EFAULT; + if (copy_from_user(&irq_event, argp, sizeof irq_event)) + goto out; + + r = kvm_vm_ioctl_irq_line(kvm, &irq_event); + if (r) + goto out; + + r = -EFAULT; + if (ioctl == KVM_IRQ_LINE_STATUS) { + if (copy_to_user(argp, &irq_event, sizeof irq_event)) + goto out; + } + + r = 0; + break; + } #endif default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); -- cgit v1.2.3 From d89cc617b954aff4030fce178f7d86f59aaf713d Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 1 Aug 2012 18:03:28 +0900 Subject: KVM: Push rmap into kvm_arch_memory_slot Two reasons: - x86 can integrate rmap and rmap_pde and remove heuristics in __gfn_to_rmap(). - Some architectures do not need rmap. Since rmap is one of the most memory consuming stuff in KVM, ppc'd better restrict the allocation to Book3S HV. Signed-off-by: Takuya Yoshikawa Acked-by: Paul Mackerras Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s_64_mmu_hv.c | 6 ++-- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 4 +-- arch/powerpc/kvm/powerpc.c | 8 ++++++ arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 5 +--- arch/x86/kvm/x86.c | 55 +++++++++++++++++++++---------------- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 11 +------- 9 files changed, 49 insertions(+), 44 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 572ad0141268..a29e0918172a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -221,6 +221,7 @@ struct revmap_entry { #define KVMPPC_GOT_PAGE 0x80 struct kvm_arch_memory_slot { + unsigned long *rmap; }; struct kvm_arch { diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 3c635c0616b0..d95d11322a15 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -705,7 +705,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, goto out_unlock; hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; - rmap = &memslot->rmap[gfn - memslot->base_gfn]; + rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; lock_rmap(rmap); /* Check if we might have been invalidated; let the guest retry if so */ @@ -788,7 +788,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, for (; gfn < gfn_end; ++gfn) { gfn_t gfn_offset = gfn - memslot->base_gfn; - ret = handler(kvm, &memslot->rmap[gfn_offset], gfn); + ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); retval |= ret; } } @@ -1036,7 +1036,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) unsigned long *rmapp, *map; preempt_disable(); - rmapp = memslot->rmap; + rmapp = memslot->arch.rmap; map = memslot->dirty_bitmap; for (i = 0; i < memslot->npages; ++i) { if (kvm_test_clear_dirty(kvm, rmapp)) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 5c70d19494f9..56ac1a5d9912 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -84,7 +84,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return; - rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); + rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); lock_rmap(rmap); head = *rmap & KVMPPC_RMAP_INDEX; @@ -180,7 +180,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, if (!slot_is_aligned(memslot, psize)) return H_PARAMETER; slot_fn = gfn - memslot->base_gfn; - rmap = &memslot->rmap[slot_fn]; + rmap = &memslot->arch.rmap[slot_fn]; if (!kvm->arch.using_mmu_notifiers) { physp = kvm->arch.slot_phys[memslot->id]; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 87f4dc886076..879b14a61403 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -302,10 +302,18 @@ long kvm_arch_dev_ioctl(struct file *filp, void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { + if (!dont || free->arch.rmap != dont->arch.rmap) { + vfree(free->arch.rmap); + free->arch.rmap = NULL; + } } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { + slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); + if (!slot->arch.rmap) + return -ENOMEM; + return 0; } diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 48e713188469..1309e69b57fa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -504,7 +504,7 @@ struct kvm_lpage_info { }; struct kvm_arch_memory_slot { - unsigned long *rmap_pde[KVM_NR_PAGE_SIZES - 1]; + unsigned long *rmap[KVM_NR_PAGE_SIZES]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ee768bb2367f..aa9a987ddefb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -970,11 +970,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, { unsigned long idx; - if (likely(level == PT_PAGE_TABLE_LEVEL)) - return &slot->rmap[gfn - slot->base_gfn]; - idx = gfn_to_index(gfn, slot->base_gfn, level); - return &slot->arch.rmap_pde[level - PT_DIRECTORY_LEVEL][idx]; + return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index abc039d78428..ebf2109318e0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6303,14 +6303,18 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free, { int i; - for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { - if (!dont || free->arch.rmap_pde[i] != dont->arch.rmap_pde[i]) { - kvm_kvfree(free->arch.rmap_pde[i]); - free->arch.rmap_pde[i] = NULL; + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { + kvm_kvfree(free->arch.rmap[i]); + free->arch.rmap[i] = NULL; } - if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) { - kvm_kvfree(free->arch.lpage_info[i]); - free->arch.lpage_info[i] = NULL; + if (i == 0) + continue; + + if (!dont || free->arch.lpage_info[i - 1] != + dont->arch.lpage_info[i - 1]) { + kvm_kvfree(free->arch.lpage_info[i - 1]); + free->arch.lpage_info[i - 1] = NULL; } } } @@ -6319,28 +6323,30 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { int i; - for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; - int level = i + 2; + int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; - slot->arch.rmap_pde[i] = - kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap_pde[i])); - if (!slot->arch.rmap_pde[i]) + slot->arch.rmap[i] = + kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); + if (!slot->arch.rmap[i]) goto out_free; + if (i == 0) + continue; - slot->arch.lpage_info[i] = - kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i])); - if (!slot->arch.lpage_info[i]) + slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * + sizeof(*slot->arch.lpage_info[i - 1])); + if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) - slot->arch.lpage_info[i][0].write_count = 1; + slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) - slot->arch.lpage_info[i][lpages - 1].write_count = 1; + slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each @@ -6352,18 +6358,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) unsigned long j; for (j = 0; j < lpages; ++j) - slot->arch.lpage_info[i][j].write_count = 1; + slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: - for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { - kvm_kvfree(slot->arch.rmap_pde[i]); - kvm_kvfree(slot->arch.lpage_info[i]); - slot->arch.rmap_pde[i] = NULL; - slot->arch.lpage_info[i] = NULL; + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + kvm_kvfree(slot->arch.rmap[i]); + slot->arch.rmap[i] = NULL; + if (i == 0) + continue; + + kvm_kvfree(slot->arch.lpage_info[i - 1]); + slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index dbc65f9d6a2b..3c16f0f1fe35 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -213,7 +213,6 @@ struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; unsigned long flags; - unsigned long *rmap; unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bcf973ec98ff..14ec567816ab 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -550,16 +550,12 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) static void kvm_free_physmem_slot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { - if (!dont || free->rmap != dont->rmap) - vfree(free->rmap); - if (!dont || free->dirty_bitmap != dont->dirty_bitmap) kvm_destroy_dirty_bitmap(free); kvm_arch_free_memslot(free, dont); free->npages = 0; - free->rmap = NULL; } void kvm_free_physmem(struct kvm *kvm) @@ -768,11 +764,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (npages && !old.npages) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; -#ifndef CONFIG_S390 - new.rmap = vzalloc(npages * sizeof(*new.rmap)); - if (!new.rmap) - goto out_free; -#endif /* not defined CONFIG_S390 */ + if (kvm_arch_create_memslot(&new, npages)) goto out_free; } @@ -831,7 +823,6 @@ int __kvm_set_memory_region(struct kvm *kvm, /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { - new.rmap = NULL; new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } -- cgit v1.2.3 From 6c8ee57be9350c5c2cafdd6a99d0462d528676e2 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:37:54 +0800 Subject: KVM: introduce KVM_PFN_ERR_FAULT After that, the exported and un-inline function, get_fault_pfn, can be removed Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 +- include/linux/kvm_host.h | 3 ++- virt/kvm/kvm_main.c | 12 +++--------- 3 files changed, 6 insertions(+), 11 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aa9a987ddefb..9cf90c8d5843 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2512,7 +2512,7 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); if (!slot) - return get_fault_pfn(); + return KVM_PFN_ERR_FAULT; hva = gfn_to_hva_memslot(slot, gfn); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3c16f0f1fe35..ef5554f47486 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -48,6 +48,8 @@ #define KVM_MAX_MMIO_FRAGMENTS \ (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) +#define KVM_PFN_ERR_FAULT (-EFAULT) + /* * vcpu->requests bit members */ @@ -443,7 +445,6 @@ void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_accessed(pfn_t pfn); void kvm_get_pfn(pfn_t pfn); -pfn_t get_fault_pfn(void); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 14ec567816ab..ef0491645a10 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -939,12 +939,6 @@ static pfn_t get_bad_pfn(void) return -ENOENT; } -pfn_t get_fault_pfn(void) -{ - return -EFAULT; -} -EXPORT_SYMBOL_GPL(get_fault_pfn); - static pfn_t get_hwpoison_pfn(void) { return -EHWPOISON; @@ -1115,7 +1109,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, struct vm_area_struct *vma; if (atomic) - return get_fault_pfn(); + return KVM_PFN_ERR_FAULT; down_read(¤t->mm->mmap_sem); if (npages == -EHWPOISON || @@ -1127,7 +1121,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, vma = find_vma_intersection(current->mm, addr, addr+1); if (vma == NULL) - pfn = get_fault_pfn(); + pfn = KVM_PFN_ERR_FAULT; else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -1135,7 +1129,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, } else { if (async && (vma->vm_flags & VM_WRITE)) *async = true; - pfn = get_fault_pfn(); + pfn = KVM_PFN_ERR_FAULT; } up_read(¤t->mm->mmap_sem); } else -- cgit v1.2.3 From e6c1502b3f933ace20c711ce34ab696f5a67086d Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:38:36 +0800 Subject: KVM: introduce KVM_PFN_ERR_HWPOISON Then, get_hwpoison_pfn and is_hwpoison_pfn can be removed Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 +- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 13 +------------ 3 files changed, 3 insertions(+), 14 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9cf90c8d5843..d3cdf69da513 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2649,7 +2649,7 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) { kvm_release_pfn_clean(pfn); - if (is_hwpoison_pfn(pfn)) { + if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ef5554f47486..840f44a096c9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -49,6 +49,7 @@ (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) #define KVM_PFN_ERR_FAULT (-EFAULT) +#define KVM_PFN_ERR_HWPOISON (-EHWPOISON) /* * vcpu->requests bit members @@ -395,7 +396,6 @@ extern struct page *bad_page; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); -int is_hwpoison_pfn(pfn_t pfn); int is_noslot_pfn(pfn_t pfn); int is_invalid_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ef0491645a10..7fce2d5787ae 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -939,17 +939,6 @@ static pfn_t get_bad_pfn(void) return -ENOENT; } -static pfn_t get_hwpoison_pfn(void) -{ - return -EHWPOISON; -} - -int is_hwpoison_pfn(pfn_t pfn) -{ - return pfn == -EHWPOISON; -} -EXPORT_SYMBOL_GPL(is_hwpoison_pfn); - int is_noslot_pfn(pfn_t pfn) { return pfn == -ENOENT; @@ -1115,7 +1104,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { up_read(¤t->mm->mmap_sem); - return get_hwpoison_pfn(); + return KVM_PFN_ERR_HWPOISON; } vma = find_vma_intersection(current->mm, addr, addr+1); -- cgit v1.2.3 From 950e95097b1c6573ef5e21061ccb56964278c45b Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:39:19 +0800 Subject: KVM: introduce KVM_PFN_ERR_BAD Then, remove get_bad_pfn Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 840f44a096c9..b2cf3109822e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -50,6 +50,7 @@ #define KVM_PFN_ERR_FAULT (-EFAULT) #define KVM_PFN_ERR_HWPOISON (-EHWPOISON) +#define KVM_PFN_ERR_BAD (-ENOENT) /* * vcpu->requests bit members diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7fce2d5787ae..988b2339d846 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -934,11 +934,6 @@ int is_error_pfn(pfn_t pfn) } EXPORT_SYMBOL_GPL(is_error_pfn); -static pfn_t get_bad_pfn(void) -{ - return -ENOENT; -} - int is_noslot_pfn(pfn_t pfn) { return pfn == -ENOENT; @@ -1143,7 +1138,7 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) - return get_bad_pfn(); + return KVM_PFN_ERR_BAD; return hva_to_pfn(addr, atomic, async, write_fault, writable); } -- cgit v1.2.3 From 83f09228d068911ac8797ae8d6febef886698936 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:39:59 +0800 Subject: KVM: inline is_*_pfn functions These functions are exported and can not inline, move them to kvm_host.h to eliminate the overload of function call Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 19 ++++++++++++++++--- virt/kvm/kvm_main.c | 18 ------------------ 2 files changed, 16 insertions(+), 21 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b2cf3109822e..19d91ceaf5e6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -52,6 +53,21 @@ #define KVM_PFN_ERR_HWPOISON (-EHWPOISON) #define KVM_PFN_ERR_BAD (-ENOENT) +static inline int is_error_pfn(pfn_t pfn) +{ + return IS_ERR_VALUE(pfn); +} + +static inline int is_noslot_pfn(pfn_t pfn) +{ + return pfn == -ENOENT; +} + +static inline int is_invalid_pfn(pfn_t pfn) +{ + return !is_noslot_pfn(pfn) && is_error_pfn(pfn); +} + /* * vcpu->requests bit members */ @@ -396,9 +412,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) extern struct page *bad_page; int is_error_page(struct page *page); -int is_error_pfn(pfn_t pfn); -int is_noslot_pfn(pfn_t pfn); -int is_invalid_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 988b2339d846..eb73e5f13678 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -928,24 +928,6 @@ int is_error_page(struct page *page) } EXPORT_SYMBOL_GPL(is_error_page); -int is_error_pfn(pfn_t pfn) -{ - return IS_ERR_VALUE(pfn); -} -EXPORT_SYMBOL_GPL(is_error_pfn); - -int is_noslot_pfn(pfn_t pfn) -{ - return pfn == -ENOENT; -} -EXPORT_SYMBOL_GPL(is_noslot_pfn); - -int is_invalid_pfn(pfn_t pfn) -{ - return !is_noslot_pfn(pfn) && is_error_pfn(pfn); -} -EXPORT_SYMBOL_GPL(is_invalid_pfn); - struct page *get_bad_page(void) { return ERR_PTR(-ENOENT); -- cgit v1.2.3 From 6cede2e6794be6b0649f62d3681e0c4aff5a9270 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:41:22 +0800 Subject: KVM: introduce KVM_ERR_PTR_BAD_PAGE It is used to eliminate the overload of function call and cleanup the code Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 9 +++++++-- virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 13 +------------ 3 files changed, 9 insertions(+), 15 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e2dcc7cb2284..ce7c32950f4e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -68,6 +68,13 @@ static inline int is_invalid_pfn(pfn_t pfn) return !is_noslot_pfn(pfn) && is_error_pfn(pfn); } +#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) + +static inline int is_error_page(struct page *page) +{ + return IS_ERR(page); +} + /* * vcpu->requests bit members */ @@ -409,7 +416,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) return slot; } -int is_error_page(struct page *page); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, @@ -436,7 +442,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm); int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int nr_pages); -struct page *get_bad_page(void); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); void kvm_release_page_clean(struct page *page); diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 79722782d9d7..56f553391896 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -203,7 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) if (!work) return -ENOMEM; - work->page = get_bad_page(); + work->page = KVM_ERR_PTR_BAD_PAGE; INIT_LIST_HEAD(&work->queue); /* for list_del to work */ spin_lock(&vcpu->async_pf.lock); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index eb73e5f13678..93d3c6e063c8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -922,17 +922,6 @@ void kvm_disable_largepages(void) } EXPORT_SYMBOL_GPL(kvm_disable_largepages); -int is_error_page(struct page *page) -{ - return IS_ERR(page); -} -EXPORT_SYMBOL_GPL(is_error_page); - -struct page *get_bad_page(void) -{ - return ERR_PTR(-ENOENT); -} - static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -1179,7 +1168,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) WARN_ON(kvm_is_mmio_pfn(pfn)); if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) - return get_bad_page(); + return KVM_ERR_PTR_BAD_PAGE; return pfn_to_page(pfn); } -- cgit v1.2.3 From cb9aaa30b133574b646d9d4766ef08a843211393 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:42:10 +0800 Subject: KVM: do not release the error pfn After commit a2766325cf9f9, the error pfn is replaced by the error code, it need not be released anymore [ The patch has been compiling tested for powerpc ] Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/powerpc/kvm/e500_tlb.c | 1 - arch/x86/kvm/mmu.c | 7 +++---- arch/x86/kvm/mmu_audit.c | 4 +--- arch/x86/kvm/paging_tmpl.h | 8 ++------ virt/kvm/iommu.c | 1 - virt/kvm/kvm_main.c | 14 ++++++++------ 6 files changed, 14 insertions(+), 21 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index c8f6c5826742..09ce5ac128f8 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (is_error_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", (long)gfn); - kvm_release_pfn_clean(pfn); return; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d3cdf69da513..9651c2cd0005 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, rmap_recycle(vcpu, sptep, gfn); } } - kvm_release_pfn_clean(pfn); + + if (!is_error_pfn(pfn)) + kvm_release_pfn_clean(pfn); } static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) @@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) { - kvm_release_pfn_clean(pfn); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); return 0; @@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, if (!async) return false; /* *pfn has correct page already */ - kvm_release_pfn_clean(*pfn); - if (!prefault && can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(gva, gfn); if (kvm_find_async_pf_gfn(vcpu, gfn)) { diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index ca403f9bb0f2..daff69e21150 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); - if (is_error_pfn(pfn)) { - kvm_release_pfn_clean(pfn); + if (is_error_pfn(pfn)) return; - } hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index bb7cf01cae76..bf8c42bf50fe 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); - if (mmu_invalid_pfn(pfn)) { - kvm_release_pfn_clean(pfn); + if (mmu_invalid_pfn(pfn)) return; - } /* * we call mmu_set_spte() with host_writable = true because that @@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, gfn = gpte_to_gfn(gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, pte_access & ACC_WRITE_MASK); - if (mmu_invalid_pfn(pfn)) { - kvm_release_pfn_clean(pfn); + if (mmu_invalid_pfn(pfn)) break; - } mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 6a67bea4019c..037cb6730e68 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) */ pfn = kvm_pin_pages(slot, gfn, page_size); if (is_error_pfn(pfn)) { - kvm_release_pfn_clean(pfn); gfn += 1; continue; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 93d3c6e063c8..eafba99d1070 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -102,9 +102,6 @@ static bool largepages_enabled = true; bool kvm_is_mmio_pfn(pfn_t pfn) { - if (is_error_pfn(pfn)) - return false; - if (pfn_valid(pfn)) { int reserved; struct page *tail = pfn_to_page(pfn); @@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); static struct page *kvm_pfn_to_page(pfn_t pfn) { - WARN_ON(kvm_is_mmio_pfn(pfn)); + if (is_error_pfn(pfn)) + return KVM_ERR_PTR_BAD_PAGE; - if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) + if (kvm_is_mmio_pfn(pfn)) { + WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; + } return pfn_to_page(pfn); } @@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + WARN_ON(is_error_pfn(pfn)); + + if (!kvm_is_mmio_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); -- cgit v1.2.3 From 32cad84f44d186654492f1a50a1424c8906ccbd9 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 3 Aug 2012 15:42:52 +0800 Subject: KVM: do not release the error page After commit a2766325cf9f9, the error page is replaced by the error code, it need not be released anymore [ The patch has been compiling tested for powerpc ] Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/powerpc/kvm/44x_tlb.c | 1 - arch/powerpc/kvm/book3s_pr.c | 4 +--- arch/x86/kvm/svm.c | 1 - arch/x86/kvm/vmx.c | 5 ++--- arch/x86/kvm/x86.c | 9 +++------ include/linux/kvm_host.h | 2 +- virt/kvm/async_pf.c | 4 ++-- virt/kvm/kvm_main.c | 5 +++-- 8 files changed, 12 insertions(+), 19 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 33aa715dab28..5dd3ab469976 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -319,7 +319,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, if (is_error_page(new_page)) { printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", (unsigned long long)gfn); - kvm_release_page_clean(new_page); return; } hpaddr = page_to_phys(new_page); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index a1baec340f7e..05c28f59f77f 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -242,10 +242,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) int i; hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); - if (is_error_page(hpage)) { - kvm_release_page_clean(hpage); + if (is_error_page(hpage)) return; - } hpage_offset = pte->raddr & ~PAGE_MASK; hpage_offset &= ~0xFFFULL; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 687d0c30e559..31be4a557447 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2105,7 +2105,6 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) return kmap(page); error: - kvm_release_page_clean(page); kvm_inject_gp(&svm->vcpu, 0); return NULL; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d6e4cbc42b8e..cc8ad9836927 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -596,10 +596,9 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) { struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); - if (is_error_page(page)) { - kvm_release_page_clean(page); + if (is_error_page(page)) return NULL; - } + return page; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ebf2109318e0..7953a9e7cb17 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1639,10 +1639,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); - if (is_error_page(vcpu->arch.time_page)) { - kvm_release_page_clean(vcpu->arch.time_page); + if (is_error_page(vcpu->arch.time_page)) vcpu->arch.time_page = NULL; - } + break; } case MSR_KVM_ASYNC_PF_EN: @@ -3945,10 +3944,8 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); - if (is_error_page(page)) { - kvm_release_page_clean(page); + if (is_error_page(page)) goto emul_write; - } kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ce7c32950f4e..07226f820e6c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -457,7 +457,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); -void kvm_release_pfn_dirty(pfn_t); +void kvm_release_pfn_dirty(pfn_t pfn); void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_accessed(pfn_t pfn); diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 56f553391896..ea475cd03511 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -111,7 +111,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) list_entry(vcpu->async_pf.done.next, typeof(*work), link); list_del(&work->link); - if (work->page) + if (!is_error_page(work->page)) kvm_release_page_clean(work->page); kmem_cache_free(async_pf_cache, work); } @@ -138,7 +138,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) list_del(&work->queue); vcpu->async_pf.queued--; - if (work->page) + if (!is_error_page(work->page)) kvm_release_page_clean(work->page); kmem_cache_free(async_pf_cache, work); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index eafba99d1070..a2e85af847c1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1186,8 +1186,9 @@ EXPORT_SYMBOL_GPL(gfn_to_page); void kvm_release_page_clean(struct page *page) { - if (!is_error_page(page)) - kvm_release_pfn_clean(page_to_pfn(page)); + WARN_ON(is_error_page(page)); + + kvm_release_pfn_clean(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_clean); -- cgit v1.2.3 From a50d64d6590f470b712758598a077ef4fd0fc40a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 10:58:13 +0800 Subject: KVM: fix missing check for memslot flags Check flags when memslot is registered from userspace as Avi's suggestion Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a2e85af847c1..7b94d70a323f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -678,6 +678,14 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) slots->generation++; } +static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) +{ + if (mem->flags & ~KVM_MEM_LOG_DIRTY_PAGES) + return -EINVAL; + + return 0; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -698,6 +706,10 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; + r = check_memory_region_flags(mem); + if (r) + goto out; + r = -EINVAL; /* General sanity checks */ if (mem->memory_size & (PAGE_SIZE - 1)) -- cgit v1.2.3 From 037d92dc5d4691ae7cf44699c55ca83b1b441992 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 10:59:12 +0800 Subject: KVM: introduce gfn_to_pfn_memslot_atomic It can instead of hva_to_pfn_atomic Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 5 +---- include/linux/kvm_host.h | 3 ++- virt/kvm/kvm_main.c | 14 ++++++++------ 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9651c2cd0005..5548971ae80d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2510,15 +2510,12 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { struct kvm_memory_slot *slot; - unsigned long hva; slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); if (!slot) return KVM_PFN_ERR_FAULT; - hva = gfn_to_hva_memslot(slot, gfn); - - return hva_to_pfn_atomic(hva); + return gfn_to_pfn_memslot_atomic(slot, gfn); } static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d4bd4d41e355..52c86e4f6d8c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -462,7 +462,6 @@ void kvm_release_page_dirty(struct page *page); void kvm_set_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); -pfn_t hva_to_pfn_atomic(unsigned long addr); pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, bool write_fault, bool *writable); @@ -470,6 +469,8 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); +pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); + void kvm_release_pfn_dirty(pfn_t pfn); void kvm_release_pfn_clean(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7b94d70a323f..543f9b7e5aa2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1102,12 +1102,6 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, return pfn; } -pfn_t hva_to_pfn_atomic(unsigned long addr) -{ - return hva_to_pfn(addr, true, NULL, true, NULL); -} -EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); - static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) { @@ -1155,6 +1149,14 @@ pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) return hva_to_pfn(addr, false, NULL, true, NULL); } +pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) +{ + unsigned long addr = gfn_to_hva_memslot(slot, gfn); + + return hva_to_pfn(addr, true, NULL, true, NULL); +} +EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); + int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int nr_pages) { -- cgit v1.2.3 From 86ab8cffb498077e926957f099b064db3751c1de Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 10:59:53 +0800 Subject: KVM: introduce gfn_to_hva_read/kvm_read_hva/kvm_read_hva_atomic This set of functions is only used to read data from host space, in the later patch, we will only get a readonly hva in gfn_to_hva_read, and the function name is a good hint to let gfn_to_hva_read to pair with kvm_read_hva()/kvm_read_hva_atomic() Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 543f9b7e5aa2..6e3ea15397d4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1002,6 +1002,25 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_hva); +/* + * The hva returned by this function is only allowed to be read. + * It should pair with kvm_read_hva() or kvm_read_hva_atomic(). + */ +static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); +} + +static int kvm_read_hva(void *data, void __user *hva, int len) +{ + return __copy_from_user(data, hva, len); +} + +static int kvm_read_hva_atomic(void *data, void __user *hva, int len) +{ + return __copy_from_user_inatomic(data, hva, len); +} + int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int write, struct page **page) { @@ -1274,10 +1293,10 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int r; unsigned long addr; - addr = gfn_to_hva(kvm, gfn); + addr = gfn_to_hva_read(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; - r = __copy_from_user(data, (void __user *)addr + offset, len); + r = kvm_read_hva(data, (void __user *)addr + offset, len); if (r) return -EFAULT; return 0; @@ -1312,11 +1331,11 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, gfn_t gfn = gpa >> PAGE_SHIFT; int offset = offset_in_page(gpa); - addr = gfn_to_hva(kvm, gfn); + addr = gfn_to_hva_read(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; pagefault_disable(); - r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); + r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); pagefault_enable(); if (r) return -EFAULT; -- cgit v1.2.3 From 2fc843117d64b31c20d36c6b625c0626c9a41a41 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 11:00:22 +0800 Subject: KVM: reorganize hva_to_pfn We do too many things in hva_to_pfn, this patch reorganize the code, let it be better readable Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 159 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 97 insertions(+), 62 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6e3ea15397d4..aa4a38ad9d9a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1041,83 +1041,118 @@ static inline int check_user_page_hwpoison(unsigned long addr) return rc == -EHWPOISON; } -static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, - bool write_fault, bool *writable) +/* + * The atomic path to get the writable pfn which will be stored in @pfn, + * true indicates success, otherwise false is returned. + */ +static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; - int npages = 0; - pfn_t pfn; + int npages; - /* we can do it either atomically or asynchronously, not both */ - BUG_ON(atomic && async); + if (!(async || atomic)) + return false; - BUG_ON(!write_fault && !writable); + npages = __get_user_pages_fast(addr, 1, 1, page); + if (npages == 1) { + *pfn = page_to_pfn(page[0]); - if (writable) - *writable = true; + if (writable) + *writable = true; + return true; + } + + return false; +} - if (atomic || async) - npages = __get_user_pages_fast(addr, 1, 1, page); +/* + * The slow path to get the pfn of the specified host virtual address, + * 1 indicates success, -errno is returned if error is detected. + */ +static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, + bool *writable, pfn_t *pfn) +{ + struct page *page[1]; + int npages = 0; - if (unlikely(npages != 1) && !atomic) { - might_sleep(); + might_sleep(); - if (writable) - *writable = write_fault; - - if (async) { - down_read(¤t->mm->mmap_sem); - npages = get_user_page_nowait(current, current->mm, - addr, write_fault, page); - up_read(¤t->mm->mmap_sem); - } else - npages = get_user_pages_fast(addr, 1, write_fault, - page); - - /* map read fault as writable if possible */ - if (unlikely(!write_fault) && npages == 1) { - struct page *wpage[1]; - - npages = __get_user_pages_fast(addr, 1, 1, wpage); - if (npages == 1) { - *writable = true; - put_page(page[0]); - page[0] = wpage[0]; - } - npages = 1; + if (writable) + *writable = write_fault; + + if (async) { + down_read(¤t->mm->mmap_sem); + npages = get_user_page_nowait(current, current->mm, + addr, write_fault, page); + up_read(¤t->mm->mmap_sem); + } else + npages = get_user_pages_fast(addr, 1, write_fault, + page); + if (npages != 1) + return npages; + + /* map read fault as writable if possible */ + if (unlikely(!write_fault)) { + struct page *wpage[1]; + + npages = __get_user_pages_fast(addr, 1, 1, wpage); + if (npages == 1) { + *writable = true; + put_page(page[0]); + page[0] = wpage[0]; } + + npages = 1; } + *pfn = page_to_pfn(page[0]); + return npages; +} + +static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable) +{ + struct vm_area_struct *vma; + pfn_t pfn = 0; + int npages; - if (unlikely(npages != 1)) { - struct vm_area_struct *vma; + /* we can do it either atomically or asynchronously, not both */ + BUG_ON(atomic && async); - if (atomic) - return KVM_PFN_ERR_FAULT; + BUG_ON(!write_fault && !writable); - down_read(¤t->mm->mmap_sem); - if (npages == -EHWPOISON || - (!async && check_user_page_hwpoison(addr))) { - up_read(¤t->mm->mmap_sem); - return KVM_PFN_ERR_HWPOISON; - } + if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) + return pfn; - vma = find_vma_intersection(current->mm, addr, addr+1); - - if (vma == NULL) - pfn = KVM_PFN_ERR_FAULT; - else if ((vma->vm_flags & VM_PFNMAP)) { - pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + - vma->vm_pgoff; - BUG_ON(!kvm_is_mmio_pfn(pfn)); - } else { - if (async && (vma->vm_flags & VM_WRITE)) - *async = true; - pfn = KVM_PFN_ERR_FAULT; - } - up_read(¤t->mm->mmap_sem); - } else - pfn = page_to_pfn(page[0]); + if (atomic) + return KVM_PFN_ERR_FAULT; + npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); + if (npages == 1) + return pfn; + + down_read(¤t->mm->mmap_sem); + if (npages == -EHWPOISON || + (!async && check_user_page_hwpoison(addr))) { + pfn = KVM_PFN_ERR_HWPOISON; + goto exit; + } + + vma = find_vma_intersection(current->mm, addr, addr + 1); + + if (vma == NULL) + pfn = KVM_PFN_ERR_FAULT; + else if ((vma->vm_flags & VM_PFNMAP)) { + pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + + vma->vm_pgoff; + BUG_ON(!kvm_is_mmio_pfn(pfn)); + } else { + if (async && (vma->vm_flags & VM_WRITE)) + *async = true; + pfn = KVM_PFN_ERR_FAULT; + } +exit: + up_read(¤t->mm->mmap_sem); return pfn; } -- cgit v1.2.3 From 12ce13fea925ab629f2281be2b9b01cf647a86cd Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 11:00:49 +0800 Subject: KVM: use 'writable' as a hint to map writable pfn In current code, we always map writable pfn for the read fault, in order to support readonly memslot, we map writable pfn only if 'writable' is not NULL Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index aa4a38ad9d9a..c89d1b5129e4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1054,6 +1054,14 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, if (!(async || atomic)) return false; + /* + * Fast pin a writable pfn only if it is a write fault request + * or the caller allows to map a writable pfn for a read fault + * request. + */ + if (!(write_fault || writable)) + return false; + npages = __get_user_pages_fast(addr, 1, 1, page); if (npages == 1) { *pfn = page_to_pfn(page[0]); @@ -1093,7 +1101,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, return npages; /* map read fault as writable if possible */ - if (unlikely(!write_fault)) { + if (unlikely(!write_fault) && writable) { struct page *wpage[1]; npages = __get_user_pages_fast(addr, 1, 1, wpage); @@ -1109,6 +1117,20 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, return npages; } +/* + * Pin guest page in memory and return its pfn. + * @addr: host virtual address which maps memory to the guest + * @atomic: whether this function can sleep + * @async: whether this function need to wait IO complete if the + * host page is not in the memory + * @write_fault: whether we should get a writable host page + * @writable: whether it allows to map a writable host page for !@write_fault + * + * The function will map a writable host page for these two cases: + * 1): @write_fault = true + * 2): @write_fault = false && @writable, @writable will tell the caller + * whether the mapping is writable. + */ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) { -- cgit v1.2.3 From ca3a490c7de8472b514e2d635c052b1e0f8e1c9d Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 11:01:50 +0800 Subject: KVM: introduce KVM_HVA_ERR_BAD Then, remove bad_hva and inline kvm_is_error_hva Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 8 +++++++- virt/kvm/kvm_main.c | 13 +------------ 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4fd33e03d5dc..b9bba60c298b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -82,6 +82,13 @@ static inline bool is_invalid_pfn(pfn_t pfn) return !is_noslot_pfn(pfn) && is_error_pfn(pfn); } +#define KVM_HVA_ERR_BAD (PAGE_OFFSET) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return addr == PAGE_OFFSET; +} + #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) static inline bool is_error_page(struct page *page) @@ -430,7 +437,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) return slot; } -int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c89d1b5129e4..e3e1658c491d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -931,17 +931,6 @@ void kvm_disable_largepages(void) } EXPORT_SYMBOL_GPL(kvm_disable_largepages); -static inline unsigned long bad_hva(void) -{ - return PAGE_OFFSET; -} - -int kvm_is_error_hva(unsigned long addr) -{ - return addr == bad_hva(); -} -EXPORT_SYMBOL_GPL(kvm_is_error_hva); - struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { return __gfn_to_memslot(kvm_memslots(kvm), gfn); @@ -988,7 +977,7 @@ static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) - return bad_hva(); + return KVM_HVA_ERR_BAD; if (nr_pages) *nr_pages = slot->npages - (gfn - slot->base_gfn); -- cgit v1.2.3 From 4d8b81abc47b83a1939e59df2fdb0e98dfe0eedd Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Aug 2012 11:02:51 +0800 Subject: KVM: introduce readonly memslot In current code, if we map a readonly memory space from host to guest and the page is not currently mapped in the host, we will get a fault pfn and async is not allowed, then the vm will crash We introduce readonly memory region to map ROM/ROMD to the guest, read access is happy for readonly memslot, write access on readonly memslot will cause KVM_EXIT_MMIO exit Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- Documentation/virtual/kvm/api.txt | 10 ++-- arch/x86/include/asm/kvm.h | 1 + arch/x86/kvm/mmu.c | 9 ++++ arch/x86/kvm/x86.c | 1 + include/linux/kvm.h | 6 ++- include/linux/kvm_host.h | 7 +-- virt/kvm/kvm_main.c | 96 +++++++++++++++++++++++++++++++-------- 7 files changed, 102 insertions(+), 28 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index bf33aaa4c59f..b91bfd43f007 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -857,7 +857,8 @@ struct kvm_userspace_memory_region { }; /* for kvm_memory_region::flags */ -#define KVM_MEM_LOG_DIRTY_PAGES 1UL +#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) +#define KVM_MEM_READONLY (1UL << 1) This ioctl allows the user to create or modify a guest physical memory slot. When changing an existing slot, it may be moved in the guest @@ -873,9 +874,12 @@ It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr be identical. This allows large pages in the guest to be backed by large pages in the host. -The flags field supports just one flag, KVM_MEM_LOG_DIRTY_PAGES, which +The flags field supports two flag, KVM_MEM_LOG_DIRTY_PAGES, which instructs kvm to keep track of writes to memory within the slot. See -the KVM_GET_DIRTY_LOG ioctl. +the KVM_GET_DIRTY_LOG ioctl. Another flag is KVM_MEM_READONLY when the +KVM_CAP_READONLY_MEM capability, it indicates the guest memory is read-only, +that means, guest is only allowed to read it. Writes will be posted to +userspace as KVM_EXIT_MMIO exits. When the KVM_CAP_SYNC_MMU capability, changes in the backing of the memory region are automatically reflected into the guest. For example, an mmap() diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 246617efd67f..521bf252e34b 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -25,6 +25,7 @@ #define __KVM_HAVE_DEBUGREGS #define __KVM_HAVE_XSAVE #define __KVM_HAVE_XCRS +#define __KVM_HAVE_READONLY_MEM /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5548971ae80d..8e312a2e1412 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2647,6 +2647,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) { + /* + * Do not cache the mmio info caused by writing the readonly gfn + * into the spte otherwise read access on readonly gfn also can + * caused mmio page fault and treat it as mmio access. + * Return 1 to tell kvm to emulate it. + */ + if (pfn == KVM_PFN_ERR_RO_FAULT) + return 1; + if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 704680d0fa3e..42bbf4187d20 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2175,6 +2175,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_PCI_2_3: case KVM_CAP_KVMCLOCK_CTRL: + case KVM_CAP_READONLY_MEM: r = 1; break; case KVM_CAP_COALESCED_MMIO: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 2de335d7f63e..d808694673f9 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -106,7 +106,8 @@ struct kvm_userspace_memory_region { * other bits are reserved for kvm internal use which are defined in * include/linux/kvm_host.h. */ -#define KVM_MEM_LOG_DIRTY_PAGES 1UL +#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) +#define KVM_MEM_READONLY (1UL << 1) /* for KVM_IRQ_LINE */ struct kvm_irq_level { @@ -621,6 +622,9 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_PPC_GET_SMMU_INFO 78 #define KVM_CAP_S390_COW 79 #define KVM_CAP_PPC_ALLOC_HTAB 80 +#ifdef __KVM_HAVE_READONLY_MEM +#define KVM_CAP_READONLY_MEM 81 +#endif #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a913ac709a9d..5972c9845ddb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -465,6 +465,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); +unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_dirty(struct page *page); @@ -792,12 +793,6 @@ hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) return slot->base_gfn + gfn_offset; } -static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, - gfn_t gfn) -{ - return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; -} - static inline gpa_t gfn_to_gpa(gfn_t gfn) { return (gpa_t)gfn << PAGE_SHIFT; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e3e1658c491d..3416f8a31f63 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -680,7 +680,13 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) { - if (mem->flags & ~KVM_MEM_LOG_DIRTY_PAGES) + u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; + +#ifdef KVM_CAP_READONLY_MEM + valid_flags |= KVM_MEM_READONLY; +#endif + + if (mem->flags & ~valid_flags) return -EINVAL; return 0; @@ -973,18 +979,45 @@ out: return size; } -static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, - gfn_t *nr_pages) +static bool memslot_is_readonly(struct kvm_memory_slot *slot) +{ + return slot->flags & KVM_MEM_READONLY; +} + +static unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, + gfn_t gfn) +{ + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; +} + +static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, + gfn_t *nr_pages, bool write) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return KVM_HVA_ERR_BAD; + if (memslot_is_readonly(slot) && write) + return KVM_HVA_ERR_RO_BAD; + if (nr_pages) *nr_pages = slot->npages - (gfn - slot->base_gfn); - return gfn_to_hva_memslot(slot, gfn); + return __gfn_to_hva_memslot(slot, gfn); } +static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, + gfn_t *nr_pages) +{ + return __gfn_to_hva_many(slot, gfn, nr_pages, true); +} + +unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, + gfn_t gfn) +{ + return gfn_to_hva_many(slot, gfn, NULL); +} +EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); + unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); @@ -997,7 +1030,7 @@ EXPORT_SYMBOL_GPL(gfn_to_hva); */ static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn) { - return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); + return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); } static int kvm_read_hva(void *data, void __user *hva, int len) @@ -1106,6 +1139,17 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, return npages; } +static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) +{ + if (unlikely(!(vma->vm_flags & VM_READ))) + return false; + + if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) + return false; + + return true; +} + /* * Pin guest page in memory and return its pfn. * @addr: host virtual address which maps memory to the guest @@ -1130,8 +1174,6 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); - BUG_ON(!write_fault && !writable); - if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) return pfn; @@ -1158,7 +1200,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, vma->vm_pgoff; BUG_ON(!kvm_is_mmio_pfn(pfn)); } else { - if (async && (vma->vm_flags & VM_WRITE)) + if (async && vma_is_valid(vma, write_fault)) *async = true; pfn = KVM_PFN_ERR_FAULT; } @@ -1167,19 +1209,40 @@ exit: return pfn; } +static pfn_t +__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, + bool *async, bool write_fault, bool *writable) +{ + unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); + + if (addr == KVM_HVA_ERR_RO_BAD) + return KVM_PFN_ERR_RO_FAULT; + + if (kvm_is_error_hva(addr)) + return KVM_PFN_ERR_BAD; + + /* Do not map writable pfn in the readonly memslot. */ + if (writable && memslot_is_readonly(slot)) { + *writable = false; + writable = NULL; + } + + return hva_to_pfn(addr, atomic, async, write_fault, + writable); +} + static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) { - unsigned long addr; + struct kvm_memory_slot *slot; if (async) *async = false; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return KVM_PFN_ERR_BAD; + slot = gfn_to_memslot(kvm, gfn); - return hva_to_pfn(addr, atomic, async, write_fault, writable); + return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, + writable); } pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) @@ -1210,15 +1273,12 @@ EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - unsigned long addr = gfn_to_hva_memslot(slot, gfn); - return hva_to_pfn(addr, false, NULL, true, NULL); + return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); } pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) { - unsigned long addr = gfn_to_hva_memslot(slot, gfn); - - return hva_to_pfn(addr, true, NULL, true, NULL); + return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); -- cgit v1.2.3 From 66a03505a7fcc70187319ef2318832f4d3c451a6 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Fri, 24 Aug 2012 16:50:28 +0800 Subject: KVM: PPC: book3s: fix build error caused by gfn_to_hva_memslot() The build error was caused by that builtin functions are calling the functions implemented in modules. This error was introduced by commit 4d8b81abc4 ("KVM: introduce readonly memslot"). The patch fixes the build error by moving function __gfn_to_hva_memslot() from kvm_main.c to kvm_host.h and making that "inline" so that the builtin function (kvmppc_h_enter) can use that. Acked-by: Paul Mackerras Signed-off-by: Gavin Shan Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 +- include/linux/kvm_host.h | 6 ++++++ virt/kvm/kvm_main.c | 6 ------ 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 56ac1a5d9912..fb0e821622d4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -197,7 +197,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, pa &= PAGE_MASK; } else { /* Translate to host virtual address */ - hva = gfn_to_hva_memslot(memslot, gfn); + hva = __gfn_to_hva_memslot(memslot, gfn); /* Look up the Linux PTE for the backing page */ pte_size = psize; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5972c9845ddb..9c0b3c3ae0a5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -773,6 +773,12 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) return search_memslots(slots, gfn); } +static inline unsigned long +__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +{ + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; +} + static inline int memslot_id(struct kvm *kvm, gfn_t gfn) { return gfn_to_memslot(kvm, gfn)->id; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3416f8a31f63..6425906d7cec 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -984,12 +984,6 @@ static bool memslot_is_readonly(struct kvm_memory_slot *slot) return slot->flags & KVM_MEM_READONLY; } -static unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, - gfn_t gfn) -{ - return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; -} - static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) { -- cgit v1.2.3 From 2df72e9bc4c505d8357012f2924589f3d16f9d44 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 24 Aug 2012 15:54:57 -0300 Subject: KVM: split kvm_arch_flush_shadow Introducing kvm_arch_flush_shadow_memslot, to invalidate the translations of a single memory slot. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 8 +++++++- arch/powerpc/kvm/powerpc.c | 6 +++++- arch/s390/kvm/kvm-s390.c | 7 ++++++- arch/x86/kvm/x86.c | 8 +++++++- include/linux/kvm_host.h | 6 +++++- virt/kvm/kvm_main.c | 8 ++++---- 6 files changed, 34 insertions(+), 9 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index eac65380bd20..8b3a9c0e771d 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1613,11 +1613,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, return; } -void kvm_arch_flush_shadow(struct kvm *kvm) +void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); } +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_arch_flush_shadow_all(); +} + long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 879b14a61403..4d213b8b0fb5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -334,8 +334,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvmppc_core_commit_memory_region(kvm, mem); } +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} -void kvm_arch_flush_shadow(struct kvm *kvm) +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) { } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e83df7f0fedd..ecced9d18986 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -969,7 +969,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, return; } -void kvm_arch_flush_shadow(struct kvm *kvm) +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) { } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 666da13c34fc..37797a090a8f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6447,12 +6447,18 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, spin_unlock(&kvm->mmu_lock); } -void kvm_arch_flush_shadow(struct kvm *kvm) +void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + kvm_arch_flush_shadow_all(kvm); +} + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9c0b3c3ae0a5..40791930bc15 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -458,7 +458,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int user_alloc); bool kvm_largepages_enabled(void); void kvm_disable_largepages(void); -void kvm_arch_flush_shadow(struct kvm *kvm); +/* flush all memory translations */ +void kvm_arch_flush_shadow_all(struct kvm *kvm); +/* flush memory translations pointing to 'slot' */ +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int nr_pages); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6425906d7cec..a4bf05be5fea 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -408,7 +408,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow(kvm); + kvm_arch_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -582,7 +582,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow(kvm); + kvm_arch_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_free_physmem(kvm); @@ -814,7 +814,7 @@ int __kvm_set_memory_region(struct kvm *kvm, * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_roots) */ - kvm_arch_flush_shadow(kvm); + kvm_arch_flush_shadow_memslot(kvm, slot); kfree(old_memslots); } @@ -854,7 +854,7 @@ int __kvm_set_memory_region(struct kvm *kvm, * mmio sptes. */ if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) - kvm_arch_flush_shadow(kvm); + kvm_arch_flush_shadow_all(kvm); kvm_free_physmem_slot(&old, &new); kfree(old_memslots); -- cgit v1.2.3 From 12d6e7538e2d418c08f082b1b44ffa5fb7270ed8 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 24 Aug 2012 15:54:58 -0300 Subject: KVM: perform an invalid memslot step for gpa base change PPC must flush all translations before the new memory slot is visible. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a4bf05be5fea..f41ea1262d51 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -791,7 +791,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* destroy any largepage mappings for dirty tracking */ } - if (!npages) { + if (!npages || base_gfn != old.base_gfn) { struct kvm_memory_slot *slot; r = -ENOMEM; @@ -807,8 +807,8 @@ int __kvm_set_memory_region(struct kvm *kvm, old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); - /* From this point no new shadow pages pointing to a deleted - * memslot will be created. + /* From this point no new shadow pages pointing to a deleted, + * or moved, memslot will be created. * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) -- cgit v1.2.3 From 3b4dc3a031110753b9ba36432dbd21f989fcee56 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 28 Aug 2012 17:43:26 -0300 Subject: KVM: move postcommit flush to x86, as mmio sptes are x86 specific Other arches do not need this. Signed-off-by: Marcelo Tosatti v2: fix incorrect deletion of mmio sptes on gpa move (noticed by Takuya) Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 8 ++++++++ virt/kvm/kvm_main.c | 7 ------- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 37797a090a8f..6f6812ec8419 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6445,6 +6445,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); + /* + * If memory slot is created, or moved, we need to clear all + * mmio sptes. + */ + if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) { + kvm_mmu_zap_all(kvm); + kvm_reload_remote_mmus(kvm); + } } void kvm_arch_flush_shadow_all(struct kvm *kvm) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f41ea1262d51..4fe02d900810 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -849,13 +849,6 @@ int __kvm_set_memory_region(struct kvm *kvm, kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); - /* - * If the new memory slot is created, we need to clear all - * mmio sptes. - */ - if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) - kvm_arch_flush_shadow_all(kvm); - kvm_free_physmem_slot(&old, &new); kfree(old_memslots); -- cgit v1.2.3 From 9fc77441e5e1bf80b794cc546d2243ee9f4afb75 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 16 Sep 2012 11:50:30 +0300 Subject: KVM: make processes waiting on vcpu mutex killable vcpu mutex can be held for unlimited time so taking it with mutex_lock on an ioctl is wrong: one process could be passed a vcpu fd and call this ioctl on the vcpu used by another process, it will then be unkillable until the owner exits. Call mutex_lock_killable instead and return status. Note: mutex_lock_interruptible would be even nicer, but I am not sure all users are prepared to handle EINTR from these ioctls. They might misinterpret it as an error. Cleanup paths expect a vcpu that can't be used by any userspace so this will always succeed - catch bugs by calling BUG_ON. Catch callers that don't check return state by adding __must_check. Signed-off-by: Michael S. Tsirkin Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/x86.c | 12 +++++++++--- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 10 +++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) (limited to 'virt/kvm/kvm_main.c') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c4d451ed1573..19047eafa38d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6016,7 +6016,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int r; vcpu->arch.mtrr_state.have_fixed = 1; - vcpu_load(vcpu); + r = vcpu_load(vcpu); + if (r) + return r; r = kvm_arch_vcpu_reset(vcpu); if (r == 0) r = kvm_mmu_setup(vcpu); @@ -6027,9 +6029,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + int r; vcpu->arch.apf.msr_val = 0; - vcpu_load(vcpu); + r = vcpu_load(vcpu); + BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); @@ -6275,7 +6279,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { - vcpu_load(vcpu); + int r; + r = vcpu_load(vcpu); + BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 40791930bc15..80bfc880921e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -408,7 +408,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); -void vcpu_load(struct kvm_vcpu *vcpu); +int __must_check vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4fe02d900810..cc3f6dc506e4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -131,11 +131,12 @@ bool kvm_is_mmio_pfn(pfn_t pfn) /* * Switches to specified vcpu, until a matching vcpu_put() */ -void vcpu_load(struct kvm_vcpu *vcpu) +int vcpu_load(struct kvm_vcpu *vcpu) { int cpu; - mutex_lock(&vcpu->mutex); + if (mutex_lock_killable(&vcpu->mutex)) + return -EINTR; if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { /* The thread running this VCPU changed. */ struct pid *oldpid = vcpu->pid; @@ -148,6 +149,7 @@ void vcpu_load(struct kvm_vcpu *vcpu) preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); + return 0; } void vcpu_put(struct kvm_vcpu *vcpu) @@ -1891,7 +1893,9 @@ static long kvm_vcpu_ioctl(struct file *filp, #endif - vcpu_load(vcpu); + r = vcpu_load(vcpu); + if (r) + return r; switch (ioctl) { case KVM_RUN: r = -EINVAL; -- cgit v1.2.3