summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_vio.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_vio.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c94
1 files changed, 67 insertions, 27 deletions
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 9a3f2646ecc7e..62a8d03ba7e96 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -363,6 +363,40 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return ret;
}
+static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long tce)
+{
+ unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ enum dma_data_direction dir = iommu_tce_direction(tce);
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long ua = 0;
+
+ /* Allow userspace to poison TCE table */
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
+
+ if (iommu_tce_check_gpa(stt->page_shift, gpa))
+ return H_TOO_HARD;
+
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ return H_TOO_HARD;
+
+ list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+ unsigned long hpa = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+ long shift = stit->tbl->it_page_shift;
+
+ mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ return H_TOO_HARD;
+ }
+
+ return H_SUCCESS;
+}
+
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
@@ -376,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
+ return H_SUCCESS;
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
@@ -401,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
long ret;
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (dir == DMA_NONE)
return H_SUCCESS;
@@ -449,15 +482,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
- return H_HARDWARE;
+ return H_TOO_HARD;
}
if (dir != DMA_NONE)
@@ -517,8 +550,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
ret = H_PARAMETER;
goto unlock_exit;
}
@@ -533,14 +565,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry, tce);
@@ -583,7 +611,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -599,10 +627,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ret = kvmppc_tce_validate(stt, tce);
if (ret != H_SUCCESS)
goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ /*
+ * This looks unsafe, because we validate, then regrab
+ * the TCE from userspace which could have been changed by
+ * another thread.
+ *
+ * But it actually is safe, because the relevant checks will be
+ * re-executed in the following code. If userspace tries to
+ * change this dodgily it will result in a messier failure mode
+ * but won't threaten the host.
+ */
+ if (get_user(tce, tces + i)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+ tce = be64_to_cpu(tce);
- if (kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
- &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -610,14 +654,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry + i, tce);