summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-10-11 16:56:42 +0100
committerJoerg Roedel <jroedel@suse.de>2018-11-06 16:30:39 +0100
commit5d95f40e62e4f2ed3053e9b178471669736cf636 (patch)
tree9e4e0c2dab5030cdd9394a75a9c4e8688bd88696 /drivers/iommu
parent651022382c7f8da46cb4872a545ee1da6d097d2a (diff)
downloadlinux-0-day-5d95f40e62e4f2ed3053e9b178471669736cf636.tar.gz
linux-0-day-5d95f40e62e4f2ed3053e9b178471669736cf636.tar.xz
iommu: Do physical merging in iommu_map_sg()
The original motivation for iommu_map_sg() was to give IOMMU drivers the chance to map an IOVA-contiguous scatterlist as efficiently as they could. It turns out that there isn't really much driver-specific business involved there, so now that the default implementation is mandatory let's just improve that - the main thing we're after is to use larger pages wherever possible, and as long as domain->pgsize_bitmap reflects reality, iommu_map() can already do that in a generic way. All we need to do is detect physically-contiguous segments and batch them into a single map operation, since whatever we do here is transparent to our caller and not bound by any segment-length restrictions on the list itself. Speaking of efficiency, there's really very little point in duplicating the checks that iommu_map() is going to do anyway, so those get cleared up in the process. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iommu.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index edbdf5d6962c3..f8ec49e0f6c62 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1712,33 +1712,32 @@ EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
- struct scatterlist *s;
- size_t mapped = 0;
- unsigned int i, min_pagesz;
+ size_t len = 0, mapped = 0;
+ phys_addr_t start;
+ unsigned int i = 0;
int ret;
- if (unlikely(domain->pgsize_bitmap == 0UL))
- return 0;
-
- min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
+ while (i <= nents) {
+ phys_addr_t s_phys = sg_phys(sg);
- for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+ if (len && s_phys != start + len) {
+ ret = iommu_map(domain, iova + mapped, start, len, prot);
+ if (ret)
+ goto out_err;
- /*
- * We are mapping on IOMMU page boundaries, so offset within
- * the page must be 0. However, the IOMMU may support pages
- * smaller than PAGE_SIZE, so s->offset may still represent
- * an offset of that boundary within the CPU page.
- */
- if (!IS_ALIGNED(s->offset, min_pagesz))
- goto out_err;
+ mapped += len;
+ len = 0;
+ }
- ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
- if (ret)
- goto out_err;
+ if (len) {
+ len += sg->length;
+ } else {
+ len = sg->length;
+ start = s_phys;
+ }
- mapped += s->length;
+ if (++i < nents)
+ sg = sg_next(sg);
}
return mapped;