summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
commitaf7ddd8a627c62a835524b3f5b471edbbbcce025 (patch)
treeaf9777ddef6d394c7cc01fca599328b584ca2bc1
parentfe2b0cdabcd9e6aeca66a104bc03576946e5fee2 (diff)
parent8b1cce9f5832a8eda17d37a3c49fb7dd2d650f46 (diff)
downloadlinux-0-day-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.gz
linux-0-day-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.xz
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
-rw-r--r--Documentation/DMA-API.txt29
-rw-r--r--Documentation/features/io/sg-chain/arch-support.txt33
-rw-r--r--Documentation/x86/x86_64/boot-options.txt5
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/kernel/pci_iommu.c16
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/common/dmabounce.c12
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c14
-rw-r--r--arch/arm/mm/dma-mapping.c39
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h8
-rw-r--r--arch/arm64/mm/dma-mapping.c286
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/mm/dma-coherent.c5
-rw-r--r--arch/csky/Kconfig3
-rw-r--r--arch/csky/mm/dma-mapping.c142
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c87
-rw-r--r--arch/ia64/kernel/dma-mapping.c21
-rw-r--r--arch/ia64/mm/init.c19
-rw-r--r--arch/ia64/sn/pci/pci_dma.c8
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/mm/consistent.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/dma-mapping.h4
-rw-r--r--arch/mips/include/asm/jazzdma.h6
-rw-r--r--arch/mips/jazz/jazzdma.c16
-rw-r--r--arch/nds32/Kconfig1
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/kernel/dma.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/kernel/pci-dma.c4
-rw-r--r--arch/parisc/kernel/setup.c4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h4
-rw-r--r--arch/powerpc/kernel/dma-iommu.c6
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c17
-rw-r--r--arch/powerpc/kernel/iommu.c28
-rw-r--r--arch/powerpc/platforms/cell/iommu.c1
-rw-r--r--arch/powerpc/platforms/pseries/vio.c3
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/include/asm/dma-mapping.h15
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/pci/pci_dma.c20
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/asm/dma.h48
-rw-r--r--arch/sparc/include/asm/leon.h9
-rw-r--r--arch/sparc/include/asm/pci.h53
-rw-r--r--arch/sparc/include/asm/pci_32.h41
-rw-r--r--arch/sparc/include/asm/pci_64.h52
-rw-r--r--arch/sparc/kernel/iommu.c12
-rw-r--r--arch/sparc/kernel/iommu_common.h2
-rw-r--r--arch/sparc/kernel/ioport.c243
-rw-r--r--arch/sparc/kernel/pci_sun4v.c14
-rw-r--r--arch/sparc/mm/io-unit.c80
-rw-r--r--arch/sparc/mm/iommu.c160
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/amd_gart_64.c63
-rw-r--r--arch/x86/kernel/pci-calgary_64.c30
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c7
-rw-r--r--arch/x86/pci/sta2x11-fixup.c1
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/kernel/pci-dma.c2
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/base/platform.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/iommu/amd_iommu.c31
-rw-r--r--drivers/iommu/dma-iommu.c23
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/parisc/ccio-dma.c10
-rw-r--r--drivers/parisc/sba_iommu.c10
-rw-r--r--drivers/pci/controller/vmd.c48
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/xen/swiotlb-xen.c36
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/linux/dma-debug.h34
-rw-r--r--include/linux/dma-direct.h19
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dma-mapping.h350
-rw-r--r--include/linux/dma-noncoherent.h7
-rw-r--r--include/linux/scatterlist.h6
-rw-r--r--include/linux/swiotlb.h77
-rw-r--r--kernel/dma/Kconfig14
-rw-r--r--kernel/dma/Makefile5
-rw-r--r--kernel/dma/debug.c259
-rw-r--r--kernel/dma/direct.c222
-rw-r--r--kernel/dma/dummy.c39
-rw-r--r--kernel/dma/mapping.c223
-rw-r--r--kernel/dma/remap.c256
-rw-r--r--kernel/dma/swiotlb.c253
-rw-r--r--kernel/dma/virt.c2
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/scatterlist.c2
112 files changed, 1408 insertions, 2340 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index ac66ae2509a91..e133ccd602280 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -60,15 +60,6 @@ the returned memory, like GFP_DMA).
::
- void *
- dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-
-Wraps dma_alloc_coherent() and also zeroes the returned memory if the
-allocation attempt succeeded.
-
-::
-
void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
@@ -717,12 +708,15 @@ dma-api/num_errors The number in this file shows how many
dma-api/min_free_entries This read-only file can be read to get the
minimum number of free dma_debug_entries the
allocator has ever seen. If this value goes
- down to zero the code will disable itself
- because it is not longer reliable.
+ down to zero the code will attempt to increase
+ nr_total_entries to compensate.
dma-api/num_free_entries The current number of free dma_debug_entries
in the allocator.
+dma-api/nr_total_entries The total number of dma_debug_entries in the
+ allocator, both free and used.
+
dma-api/driver-filter You can write a name of a driver into this file
to limit the debug output to requests from that
particular driver. Write an empty string to
@@ -742,10 +736,15 @@ driver filter at boot time. The debug code will only print errors for that
driver afterwards. This filter can be disabled or changed later using debugfs.
When the code disables itself at runtime this is most likely because it ran
-out of dma_debug_entries. These entries are preallocated at boot. The number
-of preallocated entries is defined per architecture. If it is too low for you
-boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
-architectural default.
+out of dma_debug_entries and was unable to allocate more on-demand. 65536
+entries are preallocated at boot - if this is too low for you boot with
+'dma_debug_entries=<your_desired_number>' to overwrite the default. Note
+that the code allocates entries in batches, so the exact number of
+preallocated entries may be greater than the actual number requested. The
+code will print to the kernel log each time it has dynamically allocated
+as many entries as were initially preallocated. This is to indicate that a
+larger preallocation size may be appropriate, or if it happens continually
+that a driver may be leaking mappings.
::
diff --git a/Documentation/features/io/sg-chain/arch-support.txt b/Documentation/features/io/sg-chain/arch-support.txt
deleted file mode 100644
index 6554f0372c3fc..0000000000000
--- a/Documentation/features/io/sg-chain/arch-support.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Feature name: sg-chain
-# Kconfig: ARCH_HAS_SG_CHAIN
-# description: arch supports chained scatter-gather lists
-#
- -----------------------
- | arch |status|
- -----------------------
- | alpha: | TODO |
- | arc: | ok |
- | arm: | ok |
- | arm64: | ok |
- | c6x: | TODO |
- | h8300: | TODO |
- | hexagon: | TODO |
- | ia64: | ok |
- | m68k: | TODO |
- | microblaze: | TODO |
- | mips: | TODO |
- | nds32: | TODO |
- | nios2: | TODO |
- | openrisc: | TODO |
- | parisc: | TODO |
- | powerpc: | ok |
- | riscv: | TODO |
- | s390: | ok |
- | sh: | TODO |
- | sparc: | ok |
- | um: | TODO |
- | unicore32: | TODO |
- | x86: | ok |
- | xtensa: | TODO |
- -----------------------
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index ad6d2a80cf05a..abc53886655ec 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -209,7 +209,7 @@ IOMMU (input/output memory management unit)
mapping with memory protection, etc.
Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
- iommu=[<size>][,noagp][,off][,force][,noforce][,leak[=<nr_of_leak_pages>]
+ iommu=[<size>][,noagp][,off][,force][,noforce]
[,memaper[=<order>]][,merge][,fullflush][,nomerge]
[,noaperture][,calgary]
@@ -228,9 +228,6 @@ IOMMU (input/output memory management unit)
allowed Overwrite iommu off workarounds for specific chipsets.
fullflush Flush IOMMU on each allocation (default).
nofullflush Don't use IOMMU fullflush.
- leak Turn on simple iommu leak tracing (only when
- CONFIG_IOMMU_LEAK is on). Default number of leak pages
- is 20.
memaper[=<order>] Allocate an own aperture over RAM with size 32MB<<order.
(default: order=1, i.e. 64MB)
merge Do scatter-gather (SG) merging. Implies "force"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5b4f883634538..5da6ff54b3e75 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -5,6 +5,7 @@ config ALPHA
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_NO_PREEMPT
+ select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_AOUT
select HAVE_IDE
@@ -202,7 +203,6 @@ config ALPHA_EIGER
config ALPHA_JENSEN
bool "Jensen"
depends on BROKEN
- select DMA_DIRECT_OPS
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
of the first-generation Alpha systems. A number of these systems
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 8beeafd4f68e4..0ee6a5c99b16b 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_ALPHA_JENSEN
- return &dma_direct_ops;
+ return NULL;
#else
return &alpha_pci_ops;
#endif
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 46e08e0d91818..aa0f50d0f8237 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -291,7 +291,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
use direct_map above, it now must be considered an error. */
if (! alpha_mv.mv_pci_tbi) {
printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
- return 0;
+ return DMA_MAPPING_ERROR;
}
arena = hose->sg_pci;
@@ -307,7 +307,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n");
- return 0;
+ return DMA_MAPPING_ERROR;
}
paddr &= PAGE_MASK;
@@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
gfp &= ~GFP_DMA;
try_again:
- cpu_addr = (void *)__get_free_pages(gfp, order);
+ cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: "
"get_free_pages failed from %pf\n",
@@ -455,7 +455,7 @@ try_again:
memset(cpu_addr, 0, size);
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
- if (*dma_addrp == 0) {
+ if (*dma_addrp == DMA_MAPPING_ERROR) {
free_pages((unsigned long)cpu_addr, order);
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
return NULL;
@@ -671,7 +671,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_address
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
sg->length, dac_allowed);
- return sg->dma_address != 0;
+ return sg->dma_address != DMA_MAPPING_ERROR;
}
start = sg;
@@ -935,11 +935,6 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
return 0;
}
-static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == 0;
-}
-
const struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
@@ -947,7 +942,6 @@ const struct dma_map_ops alpha_pci_ops = {
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
.unmap_sg = alpha_pci_unmap_sg,
- .mapping_error = alpha_pci_mapping_error,
.dma_supported = alpha_pci_supported,
};
EXPORT_SYMBOL(alpha_pci_ops);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index dadb494d83fd4..ca8e26d045a99 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -13,12 +13,10 @@ config ARC
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_SG_CHAIN
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index cf9619d4efb4f..4135abec3fb09 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1294,7 +1294,7 @@ void __init arc_cache_init_master(void)
/*
* In case of IOC (say IOC+SLC case), pointers above could still be set
* but end up not being relevant as the first function in chain is not
- * called at all for @dma_direct_ops
+ * called at all for devices using coherent DMA.
* arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
*/
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index db203ff69ccfa..1525ac00fd02e 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -33,7 +33,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
*/
BUG_ON(gfp & __GFP_HIGHMEM);
- page = alloc_pages(gfp, order);
+ page = alloc_pages(gfp | __GFP_ZERO, order);
if (!page)
return NULL;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5c0305585a0a2..2196aac0e45c8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -19,6 +19,7 @@ config ARM
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
@@ -29,7 +30,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
- select DMA_DIRECT_OPS if !MMU
+ select DMA_REMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
@@ -118,7 +119,6 @@ config ARM
<http://www.arm.linux.org.uk/>.
config ARM_HAS_SG_CHAIN
- select ARCH_HAS_SG_CHAIN
bool
config ARM_DMA_USE_IOMMU
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9a92de63426ff..5ba4622030cac 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -257,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -327,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
@@ -336,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
return map_single(dev, page_address(page) + offset, size, dir, attrs);
@@ -453,11 +453,6 @@ static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
return arm_dma_ops.dma_supported(dev, dma_mask);
}
-static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return arm_dma_ops.mapping_error(dev, dma_addr);
-}
-
static const struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -472,7 +467,6 @@ static const struct dma_map_ops dmabounce_ops = {
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.dma_supported = dmabounce_dma_supported,
- .mapping_error = dmabounce_mapping_error,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 6821f1249300d..772f48ef84b75 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -9,8 +9,6 @@
#include <linux/dma-debug.h>
#include <linux/kref.h>
-#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
-
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 965b7c846ecb1..31d3b96f0f4b1 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
+ return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
}
#ifdef __arch_page_to_dma
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 712416ecd8e6c..f304b10e23a4c 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -22,7 +22,7 @@
#include "dma.h"
/*
- * dma_direct_ops is used if
+ * The generic direct mapping code is used if
* - MMU/MPU is off
* - cpu is v7m w/o cache support
* - device is coherent
@@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = {
};
EXPORT_SYMBOL(arm_nommu_dma_ops);
-static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
-{
- return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
-}
-
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- const struct dma_map_ops *dma_ops;
-
if (IS_ENABLED(CONFIG_CPU_V7M)) {
/*
* Cache support for v7m is optional, so can be treated as
@@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
}
- dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
-
- set_dma_ops(dev, dma_ops);
+ if (!dev->archdata.dma_coherent)
+ set_dma_ops(dev, &arm_nommu_dma_ops);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 78de138aa66dc..f1e2922e447cd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == ARM_MAPPING_ERROR;
-}
-
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp &= ~(__GFP_COMP);
args.gfp = gfp;
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false;
@@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
@@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
bitmap_set(mapping->bitmaps[i], start, count);
@@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
int i;
dma_addr = __alloc_iova(mapping, size);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
iova = dma_addr;
@@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
@@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs);
- if (*handle == ARM_MAPPING_ERROR)
+ if (*handle == DMA_MAPPING_ERROR)
goto err_mapping;
return addr;
@@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
void *addr = NULL;
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
@@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs);
- if (*handle == ARM_MAPPING_ERROR)
+ if (*handle == DMA_MAPPING_ERROR)
goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
@@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int prot;
size = PAGE_ALIGN(size);
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size);
- if (iova == ARM_MAPPING_ERROR)
+ if (iova == DMA_MAPPING_ERROR)
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
@@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
for (i = 1; i < nents; i++) {
s = sg_next(s);
- s->dma_address = ARM_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
@@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs);
@@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/**
@@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
@@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/**
@@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
@@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6b7bf0fc190da..ff9291872372a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,7 +23,6 @@ config ARM64
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -81,7 +80,7 @@ config ARM64
select CPU_PM if (SUSPEND || CPU_IDLE)
select CRC32
select DCACHE_WORD_ACCESS
- select DMA_DIRECT_OPS
+ select DMA_DIRECT_REMAP
select EDAC_SUPPORT
select FRAME_POINTER
select GENERIC_ALLOCATOR
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index c41f3fb1446ce..95dbf3ef735af 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,15 +24,9 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-extern const struct dma_map_ops dummy_dma_ops;
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- /*
- * We expect no ISA devices, and all other DMA masters are expected to
- * have someone call arch_setup_dma_ops at device creation time.
- */
- return &dummy_dma_ops;
+ return NULL;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a537044060991..fb0908456a1f9 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -33,113 +33,6 @@
#include <asm/cacheflush.h>
-static struct gen_pool *atomic_pool __ro_after_init;
-
-#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
-static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
-{
- unsigned long val;
- void *ptr = NULL;
-
- if (!atomic_pool) {
- WARN(1, "coherent pool not initialised!\n");
- return NULL;
- }
-
- val = gen_pool_alloc(atomic_pool, size);
- if (val) {
- phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
-
- *ret_page = phys_to_page(phys);
- ptr = (void *)val;
- memset(ptr, 0, size);
- }
-
- return ptr;
-}
-
-static bool __in_atomic_pool(void *start, size_t size)
-{
- return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
-}
-
-static int __free_from_pool(void *start, size_t size)
-{
- if (!__in_atomic_pool(start, size))
- return 0;
-
- gen_pool_free(atomic_pool, (unsigned long)start, size);
-
- return 1;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs)
-{
- struct page *page;
- void *ptr, *coherent_ptr;
- pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
-
- size = PAGE_ALIGN(size);
-
- if (!gfpflags_allow_blocking(flags)) {
- struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page, flags);
-
- if (addr)
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
-
- return addr;
- }
-
- ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
- if (!ptr)
- goto no_mem;
-
- /* remove any dirty cache lines on the kernel alias */
- __dma_flush_area(ptr, size);
-
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, __builtin_return_address(0));
- if (!coherent_ptr)
- goto no_map;
-
- return coherent_ptr;
-
-no_map:
- dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
-no_mem:
- return NULL;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
- void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
-
- vunmap(vaddr);
- dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
- }
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return __phys_to_pfn(dma_to_phys(dev, dma_addr));
-}
-
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
@@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ __dma_flush_area(page_address(page), size);
+}
+
#ifdef CONFIG_IOMMU_DMA
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
struct page *page, size_t size)
@@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
}
#endif /* CONFIG_IOMMU_DMA */
-static int __init atomic_pool_init(void)
-{
- pgprot_t prot = __pgprot(PROT_NORMAL_NC);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
- struct page *page;
- void *addr;
- unsigned int pool_size_order = get_order(atomic_pool_size);
-
- if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
- else
- page = alloc_pages(GFP_DMA32, pool_size_order);
-
- if (page) {
- int ret;
- void *page_addr = page_address(page);
-
- memset(page_addr, 0, atomic_pool_size);
- __dma_flush_area(page_addr, atomic_pool_size);
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- goto free_page;
-
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
- VM_USERMAP, prot, atomic_pool_init);
-
- if (!addr)
- goto destroy_genpool;
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page),
- atomic_pool_size, -1);
- if (ret)
- goto remove_mapping;
-
- gen_pool_set_algo(atomic_pool,
- gen_pool_first_fit_order_align,
- NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
- return 0;
- }
- goto out;
-
-remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
-destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
-free_page:
- if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
-out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
- return -ENOMEM;
-}
-
-/********************************************
- * The following APIs are for dummy DMA ops *
- ********************************************/
-
-static void *__dummy_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- return NULL;
-}
-
-static void __dummy_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
-}
-
-static int __dummy_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- return -ENXIO;
-}
-
-static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static void __dummy_sync_single(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void __dummy_sync_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
-}
-
-static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return 1;
-}
-
-static int __dummy_dma_supported(struct device *hwdev, u64 mask)
-{
- return 0;
-}
-
-const struct dma_map_ops dummy_dma_ops = {
- .alloc = __dummy_alloc,
- .free = __dummy_free,
- .mmap = __dummy_mmap,
- .map_page = __dummy_map_page,
- .unmap_page = __dummy_unmap_page,
- .map_sg = __dummy_map_sg,
- .unmap_sg = __dummy_unmap_sg,
- .sync_single_for_cpu = __dummy_sync_single,
- .sync_single_for_device = __dummy_sync_single,
- .sync_sg_for_cpu = __dummy_sync_sg,
- .sync_sg_for_device = __dummy_sync_sg,
- .mapping_error = __dummy_mapping_error,
- .dma_supported = __dummy_dma_supported,
-};
-EXPORT_SYMBOL(dummy_dma_ops);
-
static int __init arm64_dma_init(void)
{
WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
TAINT_CPU_OUT_OF_SPEC,
"ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
ARCH_DMA_MINALIGN, cache_line_size());
-
- return atomic_pool_init();
+ return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
}
arch_initcall(arm64_dma_init);
@@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
page = alloc_pages(gfp, get_order(size));
addr = page ? page_address(page) : NULL;
} else {
- addr = __alloc_from_pool(size, &page, gfp);
+ addr = dma_alloc_from_pool(size, &page, gfp);
}
if (!addr)
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
if (coherent)
__free_pages(page, get_order(size));
else
- __free_from_pool(addr, size);
+ dma_free_from_pool(addr, size);
addr = NULL;
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
@@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
return NULL;
@@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* coherent devices.
* Hence how dodgy the below logic looks...
*/
- if (__in_atomic_pool(cpu_addr, size)) {
+ if (dma_in_atomic_pool(cpu_addr, size)) {
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
- __free_from_pool(cpu_addr, size);
+ dma_free_from_pool(cpu_addr, size);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
@@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- !iommu_dma_mapping_error(dev, dev_addr))
+ dev_addr != DMA_MAPPING_ERROR)
__dma_map_area(page_address(page) + offset, size, dir);
return dev_addr;
@@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
- .mapping_error = iommu_dma_mapping_error,
};
static int __init __iommu_dma_init(void)
@@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
-
dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 84420109113d5..456e154674d15 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -9,7 +9,6 @@ config C6X
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select CLKDEV_LOOKUP
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 01305c7872010..75b79571732c1 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order)
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, unsigned long attrs)
{
+ void *ret;
u32 paddr;
int order;
@@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (!paddr)
return NULL;
- return phys_to_virt(paddr);
+ ret = phys_to_virt(paddr);
+ memset(ret, 0, 1 << order);
+ return ret;
}
/*
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index cb64f8dacd08e..37bed8aadf95b 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -7,8 +7,7 @@ config CSKY
select COMMON_CLK
select CLKSRC_MMIO
select CLKSRC_OF
- select DMA_DIRECT_OPS
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_REMAP
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select DW_APB_TIMER_OF
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 85437b21e045d..80783bb71c5cb 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -14,73 +14,13 @@
#include <linux/version.h>
#include <asm/cache.h>
-static struct gen_pool *atomic_pool;
-static size_t atomic_pool_size __initdata = SZ_256K;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
static int __init atomic_pool_init(void)
{
- struct page *page;
- size_t size = atomic_pool_size;
- void *ptr;
- int ret;
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- BUG();
-
- page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size));
- if (!page)
- BUG();
-
- ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!ptr)
- BUG();
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
- page_to_phys(page), atomic_pool_size, -1);
- if (ret)
- BUG();
-
- gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
- atomic_pool_size / 1024);
-
- pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
- page_to_phys(page));
-
- return 0;
+ return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
}
postcore_initcall(atomic_pool_init);
-static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
- dma_addr_t *dma_handle)
-{
- unsigned long addr;
-
- addr = gen_pool_alloc(atomic_pool, size);
- if (addr)
- *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
-
- return (void *)addr;
-}
-
-static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
-}
-
-static void __dma_clear_buffer(struct page *page, size_t size)
+void arch_dma_prep_coherent(struct page *page, size_t size)
{
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size)
}
}
-static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *vaddr;
- struct page *page;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (DMA_ATTR_NON_CONSISTENT & attrs) {
- pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
- return NULL;
- }
-
- if (IS_ENABLED(CONFIG_DMA_CMA))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- gfp);
- else
- page = alloc_pages(gfp, get_order(size));
-
- if (!page) {
- pr_err("csky %s no more free pages.\n", __func__);
- return NULL;
- }
-
- *dma_handle = page_to_phys(page);
-
- __dma_clear_buffer(page, size);
-
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
- return page;
-
- vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
- pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
- if (!vaddr)
- BUG();
-
- return vaddr;
-}
-
-static void csky_dma_free_nonatomic(
- struct device *dev,
- size_t size,
- void *vaddr,
- dma_addr_t dma_handle,
- unsigned long attrs
- )
-{
- struct page *page = phys_to_page(dma_handle);
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if ((unsigned int)vaddr >= VMALLOC_START)
- dma_common_free_remap(vaddr, size, VM_USERMAP);
-
- if (IS_ENABLED(CONFIG_DMA_CMA))
- dma_release_from_contiguous(dev, page, count);
- else
- __free_pages(page, get_order(size));
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- if (gfpflags_allow_blocking(gfp))
- return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
- attrs);
- else
- return csky_dma_alloc_atomic(dev, size, dma_handle);
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
- csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
- else
- csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
-}
-
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index d19c6b16cd5d0..6472a06854703 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -22,7 +22,6 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
- select DMA_DIRECT_OPS
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 2b688af379e6d..d71036c598de5 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -31,7 +31,6 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
- select DMA_DIRECT_OPS
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 36773def6920b..cbf6c67c7166e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -28,8 +28,8 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
- select ARCH_HAS_DMA_MARK_CLEAN
- select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 58969039bed2f..8840ed97712f2 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev)
const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
if (use_swiotlb(dev))
- return &swiotlb_dma_ops;
+ return NULL;
return &sba_dma_ops;
}
EXPORT_SYMBOL(hwsw_dma_get_ops);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e8a93b07283e4..5a361e51cb1ef 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
}
/**
- * sba_map_single_attrs - map one buffer and return IOVA for DMA
+ * sba_map_page - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
- * @addr: driver buffer to map.
- * @size: number of bytes to map in driver buffer.
- * @dir: R/W or both.
+ * @page: page to map
+ * @poff: offset into page
+ * @size: number of bytes to map
+ * @dir: dma direction
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
@@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
- DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
+ DBG_BYPASS("sba_map_page() bypass mask/addr: "
"0x%lx/0x%lx\n",
to_pci_dev(dev)->dma_mask, pci_addr);
return pci_addr;
@@ -966,14 +967,14 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
- if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
+ if (sba_check_pdir(ioc,"Check before sba_map_page()"))
panic("Sanity check failed");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
pide = sba_alloc_range(ioc, dev, size);
if (pide < 0)
- return 0;
+ return DMA_MAPPING_ERROR;
iovp = (dma_addr_t) pide << iovp_shift;
@@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
- sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
+ sba_check_pdir(ioc,"Check after sba_map_page()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
return SBA_IOVA(ioc, iovp, offset);
}
-static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return sba_map_page(dev, virt_to_page(addr),
- (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
-}
-
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
@@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
#endif
/**
- * sba_unmap_single_attrs - unmap one IOVA and free resources
+ * sba_unmap_page - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
/*
** Address does not fall w/in IOVA, must be bypassing
*/
- DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
+ DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
iova);
#ifdef ENABLE_MARK_CLEAN
@@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
-void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- sba_unmap_page(dev, iova, size, dir, attrs);
-}
-
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
@@ -1132,30 +1119,24 @@ static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs)
{
+ struct page *page;
struct ioc *ioc;
+ int node = -1;
void *addr;
ioc = GET_IOC(dev);
ASSERT(ioc);
-
#ifdef CONFIG_NUMA
- {
- struct page *page;
-
- page = alloc_pages_node(ioc->node, flags, get_order(size));
- if (unlikely(!page))
- return NULL;
-
- addr = page_address(page);
- }
-#else
- addr = (void *) __get_free_pages(flags, get_order(size));
+ node = ioc->node;
#endif
- if (unlikely(!addr))
+
+ page = alloc_pages_node(node, flags, get_order(size));
+ if (unlikely(!page))
return NULL;
+ addr = page_address(page);
memset(addr, 0, size);
- *dma_handle = virt_to_phys(addr);
+ *dma_handle = page_to_phys(page);
#ifdef ALLOW_IOV_BYPASS
ASSERT(dev->coherent_dma_mask);
@@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
* If device can't bypass or bypass is disabled, pass the 32bit fake
* device to map single to get an iova mapping.
*/
- *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
- size, 0, 0);
-
+ *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, *dma_handle))
+ return NULL;
return addr;
}
@@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
+ sba_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sglist->dma_length = sglist->length;
- sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
+ sglist->dma_address = sba_map_page(dev, sg_page(sglist),
+ sglist->offset, sglist->length, dir, attrs);
+ if (dma_mapping_error(dev, sglist->dma_address))
+ return 0;
return 1;
}
@@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
while (nents && sglist->dma_length) {
- sba_unmap_single_attrs(dev, sglist->dma_address,
- sglist->dma_length, dir, attrs);
+ sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
+ dir, attrs);
sglist = sg_next(sglist);
nents--;
}
@@ -2080,8 +2065,6 @@ static int __init acpi_sba_ioc_init_acpi(void)
/* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi);
-extern const struct dma_map_ops swiotlb_dma_ops;
-
static int __init
sba_init(void)
{
@@ -2095,7 +2078,7 @@ sba_init(void)
* a successful kdump kernel boot is to use the swiotlb.
*/
if (is_kdump_kernel()) {
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
@@ -2117,7 +2100,7 @@ sba_init(void)
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
@@ -2170,11 +2153,6 @@ static int sba_dma_supported (struct device *dev, u64 mask)
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
-static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
__setup("nosbagart", nosbagart);
static int __init
@@ -2208,7 +2186,6 @@ const struct dma_map_ops sba_dma_ops = {
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.dma_supported = sba_dma_supported,
- .mapping_error = sba_dma_mapping_error,
};
void sba_dma_init(void)
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 7a471d8d67d4e..ad7d9963de34f 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
@@ -16,9 +16,26 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
EXPORT_SYMBOL(dma_get_ops);
#ifdef CONFIG_SWIOTLB
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return page_to_pfn(virt_to_page(cpu_addr));
+}
+
void __init swiotlb_dma_init(void)
{
- dma_ops = &swiotlb_dma_ops;
swiotlb_init(1);
}
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d5e12ff1d73cf..0cf43bb13d6e5 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/dma-noncoherent.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void
-dma_mark_clean(void *addr, size_t size)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
+ unsigned long pfn = PHYS_PFN(paddr);
+
+ do {
+ set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
+ } while (++pfn <= PHYS_PFN(paddr + size - 1));
}
inline void
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 4ce4ee4ef9f2c..b7d42e4edc1f2 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -196,7 +196,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -314,11 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
return nhwentries;
}
-static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
static u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
@@ -441,7 +436,6 @@ static struct dma_map_ops sn_dma_ops = {
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
- .mapping_error = sn_dma_mapping_error,
.dma_supported = sn_dma_supported,
.get_required_mask = sn_dma_get_required_mask,
};
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 1bc9f1ba759a7..8a5868e9a3a0e 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -26,7 +26,6 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
- select DMA_DIRECT_OPS if HAS_DMA
select ARCH_DISCARD_MEMBLOCK
config CPU_BIG_ENDIAN
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index e99993c57d6b6..b4aa853051bd4 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -32,7 +32,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
size = PAGE_ALIGN(size);
order = get_order(size);
- page = alloc_pages(flag, order);
+ page = alloc_pages(flag | __GFP_ZERO, order);
if (!page)
return NULL;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index effed2efd306c..eda9e2315ef52 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -12,7 +12,6 @@ config MICROBLAZE
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 45e0a1aa93573..3002cbca3059b 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -81,7 +81,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
size = PAGE_ALIGN(size);
order = get_order(size);
- vaddr = __get_free_pages(gfp, order);
+ vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
if (!vaddr)
return NULL;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e49b5a0c85853..63183a8454d6f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,7 +18,6 @@ config MIPS
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
select CPU_PM if CPU_IDLE
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index b4c477eb46ce9..20dfaad3a55d2 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -10,10 +10,8 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#if defined(CONFIG_MACH_JAZZ)
return &jazz_dma_ops;
-#elif defined(CONFIG_SWIOTLB)
- return &swiotlb_dma_ops;
#else
- return &dma_direct_ops;
+ return NULL;
#endif
}
diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h
index d913439c738c9..d13f940022d5f 100644
--- a/arch/mips/include/asm/jazzdma.h
+++ b/arch/mips/include/asm/jazzdma.h
@@ -40,12 +40,6 @@ extern int vdma_get_enable(int channel);
#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
/*
- * error code returned by vdma_alloc()
- * (See also arch/mips/kernel/jazzdma.c)
- */
-#define VDMA_ERROR 0xffffffff
-
-/*
* VDMA pagetable entry description
*/
typedef volatile struct VDMA_PGTBL_ENTRY {
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 4c41ed0a637e5..6256d35dbf4db 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -104,12 +104,12 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
if (vdma_debug)
printk("vdma_alloc: Invalid physical address: %08lx\n",
paddr);
- return VDMA_ERROR; /* invalid physical address */
+ return DMA_MAPPING_ERROR; /* invalid physical address */
}
if (size > 0x400000 || size == 0) {
if (vdma_debug)
printk("vdma_alloc: Invalid size: %08lx\n", size);
- return VDMA_ERROR; /* invalid physical address */
+ return DMA_MAPPING_ERROR; /* invalid physical address */
}
spin_lock_irqsave(&vdma_lock, flags);
@@ -123,7 +123,7 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
first < VDMA_PGTBL_ENTRIES) first++;
if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */
spin_unlock_irqrestore(&vdma_lock, flags);
- return VDMA_ERROR;
+ return DMA_MAPPING_ERROR;
}
last = first + 1;
@@ -569,7 +569,7 @@ static void *jazz_dma_alloc(struct device *dev, size_t size,
return NULL;
*dma_handle = vdma_alloc(virt_to_phys(ret), size);
- if (*dma_handle == VDMA_ERROR) {
+ if (*dma_handle == DMA_MAPPING_ERROR) {
dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
return NULL;
}
@@ -620,7 +620,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
- if (sg->dma_address == VDMA_ERROR)
+ if (sg->dma_address == DMA_MAPPING_ERROR)
return 0;
sg_dma_len(sg) = sg->length;
}
@@ -674,11 +674,6 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}
-static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == VDMA_ERROR;
-}
-
const struct dma_map_ops jazz_dma_ops = {
.alloc = jazz_dma_alloc,
.free = jazz_dma_free,
@@ -692,6 +687,5 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
.dma_supported = dma_direct_supported,
.cache_sync = arch_dma_cache_sync,
- .mapping_error = jazz_dma_mapping_error,
};
EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 7a04adacb2f0b..1af6bbae72207 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -11,7 +11,6 @@ config NDS32
select CLKSRC_MMIO
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 7e95506e957aa..f6c4b0f499972 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -4,7 +4,6 @@ config NIOS2
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_SWAP
- select DMA_DIRECT_OPS
select TIMER_OF
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 285f7d05c8ed2..d0feebad5a8f6 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -7,7 +7,6 @@
config OPENRISC
def_bool y
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select DMA_DIRECT_OPS
select OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 159336adfa2f6..f79457cb3741d 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -89,7 +89,7 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
.mm = &init_mm
};
- page = alloc_pages_exact(size, gfp);
+ page = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!page)
return NULL;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 92a339ee28b38..6e1b71da0e713 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
@@ -184,7 +185,6 @@ config PA11
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select DMA_DIRECT_OPS
select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 04c48f1ef3fbd..239162355b58c 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -404,7 +404,7 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size);
- paddr = __get_free_pages(flag, order);
+ paddr = __get_free_pages(flag | __GFP_ZERO, order);
flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr);
@@ -429,7 +429,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
return NULL;
- addr = (void *)__get_free_pages(flag, get_order(size));
+ addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 2b108ee3b2176..f2cf86ac279b4 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -99,10 +99,6 @@ void __init dma_ops_init(void)
case pcxl2:
pa7300lc_init();
- case pcxl: /* falls through */
- case pcxs:
- case pcxt:
- hppa_dma_ops = &dma_direct_ops;
break;
default:
break;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a1e858e42ada7..50f27a6560514 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,7 +139,6 @@ config PPC
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64
- select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index dacd0f93f2b29..ebf66809f2d35 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -103,7 +103,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
}
#define HAVE_ARCH_DMA_SET_MASK 1
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index e847ff69cb2bf..17524d222a7b6 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -143,8 +143,6 @@ struct scatterlist;
#ifdef CONFIG_PPC64
-#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
-
static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base)
{
@@ -239,8 +237,6 @@ static inline void iommu_del_device(struct device *dev)
}
#endif /* !CONFIG_IOMMU_API */
-int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
#else
static inline void *get_iommu_table_base(struct device *dev)
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2ca6cfaebf650..9c9bcaae2f759 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -105,11 +105,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask;
}
-int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == IOMMU_MAPPING_ERROR;
-}
-
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -120,5 +115,4 @@ struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
- .mapping_error = dma_iommu_mapping_error,
};
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 678811abccfcd..7d5fc97516223 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -50,16 +50,15 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
.alloc = __dma_nommu_alloc_coherent,
.free = __dma_nommu_free_coherent,
.mmap = dma_nommu_mmap_coherent,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
.dma_supported = swiotlb_dma_supported,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = dma_direct_mapping_error,
+ .map_page = dma_direct_map_page,
+ .unmap_page = dma_direct_unmap_page,
+ .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
+ .sync_single_for_device = dma_direct_sync_single_for_device,
+ .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
+ .sync_sg_for_device = dma_direct_sync_sg_for_device,
.get_required_mask = swiotlb_powerpc_get_required,
};
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 9d5d109f15c05..d0625480b59e5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
if (should_fail_iommu(dev))
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
/*
* We don't need to disable preemption here because any CPU can
@@ -278,7 +278,7 @@ again:
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
}
@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long attrs)
{
unsigned long entry;
- dma_addr_t ret = IOMMU_MAPPING_ERROR;
+ dma_addr_t ret = DMA_MAPPING_ERROR;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
- if (unlikely(entry == IOMMU_MAPPING_ERROR))
- return IOMMU_MAPPING_ERROR;
+ if (unlikely(entry == DMA_MAPPING_ERROR))
+ return DMA_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */
@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
- * IOMMU_MAPPING_ERROR. For all other errors the functionality is
+ * DMA_MAPPING_ERROR. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/* Flush/invalidate TLB caches if necessary */
@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
- if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
+ if (unlikely(entry == DMA_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = IOMMU_MAPPING_ERROR;
+ outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
- s->dma_address = IOMMU_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
- dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
+ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align,
attrs);
- if (dma_handle == IOMMU_MAPPING_ERROR) {
+ if (dma_handle == DMA_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
- if (mapping == IOMMU_MAPPING_ERROR) {
+ if (mapping == DMA_MAPPING_ERROR) {
free_pages((unsigned long)ret, order);
return NULL;
}
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 12352a58072ab..af2a3c15e0ecc 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = {
.dma_supported = dma_suported_and_switch,
.map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page,
- .mapping_error = dma_iommu_mapping_error,
};
static void cell_dma_dev_setup(struct device *dev)
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 93cc9eec66011..1fad4649735bd 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
- dma_addr_t ret = IOMMU_MAPPING_ERROR;
+ dma_addr_t ret = DMA_MAPPING_ERROR;
tbl = get_iommu_table_base(dev);
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
@@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
- .mapping_error = dma_iommu_mapping_error,
};
/**
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index ee833e6f5ccb4..106539bb914e9 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -19,7 +19,6 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_DIRECT_OPS
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_SHOW
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h
deleted file mode 100644
index 8facc1c8fa054..0000000000000
--- a/arch/riscv/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _RISCV_ASM_DMA_MAPPING_H
-#define _RISCV_ASM_DMA_MAPPING_H 1
-
-#ifdef CONFIG_SWIOTLB
-#include <linux/swiotlb.h>
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &swiotlb_dma_ops;
-}
-#else
-#include <asm-generic/dma-mapping.h>
-#endif /* CONFIG_SWIOTLB */
-
-#endif /* _RISCV_ASM_DMA_MAPPING_H */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5173366af8f35..21d271d04ca61 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -73,7 +73,6 @@ config S390
select ARCH_HAS_KCOV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -140,7 +139,6 @@ config S390
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
- select DMA_DIRECT_OPS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index d387a0fbdd7e4..9e52d1527f714 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -15,8 +15,6 @@
#include <linux/pci.h>
#include <asm/pci_dma.h>
-#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
-
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static int s390_iommu_strict;
@@ -301,7 +299,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return S390_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
@@ -349,7 +347,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
dma_addr = dma_alloc_address(dev, nr_pages);
- if (dma_addr == S390_MAPPING_ERROR) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ret = -ENOSPC;
goto out_err;
}
@@ -372,7 +370,7 @@ out_free:
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
- return S390_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
@@ -406,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t map;
size = PAGE_ALIGN(size);
- page = alloc_pages(flag, get_order(size));
+ page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
@@ -449,7 +447,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int ret;
dma_addr_base = dma_alloc_address(dev, nr_pages);
- if (dma_addr_base == S390_MAPPING_ERROR)
+ if (dma_addr_base == DMA_MAPPING_ERROR)
return -ENOMEM;
dma_addr = dma_addr_base;
@@ -496,7 +494,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
- s->dma_address = S390_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) ||
@@ -546,11 +544,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
}
}
-static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == S390_MAPPING_ERROR;
-}
-
int zpci_dma_init_device(struct zpci_dev *zdev)
{
int rc;
@@ -675,7 +668,6 @@ const struct dma_map_ops s390_pci_dma_ops = {
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
- .mapping_error = s390_mapping_error,
/* dma_supported is unconditionally true without a callback */
};
EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index f82a4da7adf3f..10fd4e9c454b3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -7,7 +7,6 @@ config SUPERH
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
- select DMA_DIRECT_OPS
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 490b2c95c212c..f5bb9ded1d187 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -40,7 +40,6 @@ config SPARC
select MODULES_USE_ELF_RELA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
- select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select LOCKDEP_SMALL if LOCKDEP
select NEED_DMA_MAP_STATE
@@ -49,7 +48,6 @@ config SPARC
config SPARC32
def_bool !64BIT
select ARCH_HAS_SYNC_DMA_FOR_CPU
- select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select CLZ_TAB
select HAVE_UID16
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index b0bb2fcaf1c90..ed32845bd2d2f 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -2,9 +2,7 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-#include <linux/dma-debug.h>
+#include <asm/cpu_type.h>
extern const struct dma_map_ops *dma_ops;
@@ -14,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return &dma_direct_ops;
+ return NULL;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
- return &dma_direct_ops;
+ return NULL;
#endif
return dma_ops;
}
diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h
index a1d7c86917c6e..462e7c794a095 100644
--- a/arch/sparc/include/asm/dma.h
+++ b/arch/sparc/include/asm/dma.h
@@ -91,54 +91,10 @@ extern int isa_dma_bridge_buggy;
#endif
#ifdef CONFIG_SPARC32
-
-/* Routines for data transfer buffers. */
struct device;
-struct scatterlist;
-
-struct sparc32_dma_ops {
- __u32 (*get_scsi_one)(struct device *, char *, unsigned long);
- void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
- void (*release_scsi_one)(struct device *, __u32, unsigned long);
- void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
-#ifdef CONFIG_SBUS
- int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
- void (*unmap_dma_area)(struct device *, unsigned long, int);
-#endif
-};
-extern const struct sparc32_dma_ops *sparc32_dma_ops;
-
-#define mmu_get_scsi_one(dev,vaddr,len) \
- sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
-#define mmu_get_scsi_sgl(dev,sg,sz) \
- sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
-#define mmu_release_scsi_one(dev,vaddr,len) \
- sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
-#define mmu_release_scsi_sgl(dev,sg,sz) \
- sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
-
-#ifdef CONFIG_SBUS
-/*
- * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
- *
- * The mmu_map_dma_area establishes two mappings in one go.
- * These mappings point to pages normally mapped at 'va' (linear address).
- * First mapping is for CPU visible address at 'a', uncached.
- * This is an alias, but it works because it is an uncached mapping.
- * Second mapping is for device visible address, or "bus" address.
- * The bus address is returned at '*pba'.
- *
- * These functions seem distinct, but are hard to split.
- * On sun4m, page attributes depend on the CPU type, so we have to
- * know if we are mapping RAM or I/O, so it has to be an additional argument
- * to a separate mapping function for CPU visible mappings.
- */
-#define sbus_map_dma_area(dev,pba,va,a,len) \
- sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
-#define sbus_unmap_dma_area(dev,ba,len) \
- sparc32_dma_ops->unmap_dma_area(dev, ba, len)
-#endif /* CONFIG_SBUS */
+unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len);
+bool sparc_dma_free_resource(void *cpu_addr, size_t size);
#endif
#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 8c01f0f6b1ed9..c1e05e4ab9e35 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -254,4 +254,13 @@ extern int leon_ipi_irq;
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
+/*
+ * On LEON PCI Memory space is mapped 1:1 with physical address space.
+ *
+ * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
+ * are converted into CPU addresses to virtual addresses that are mapped with
+ * MMU to the PCI Host PCI I/O space window which are translated to the low
+ * 64Kbytes by the Host controller.
+ */
+
#endif
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index cad79a6ce0e49..cfec79bb1831c 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -1,9 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_PCI_H
#define ___ASM_SPARC_PCI_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/pci_64.h>
+
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader.
+ */
+#define pcibios_assign_all_busses() 0
+
+#define PCIBIOS_MIN_IO 0UL
+#define PCIBIOS_MIN_MEM 0UL
+
+#define PCI_IRQ_NONE 0xffffffff
+
+
+#ifdef CONFIG_SPARC64
+
+/* PCI IOMMU mapping bypass support. */
+
+/* PCI 64-bit addressing works for all slots on all controller
+ * types on sparc64. However, it requires that the device
+ * can drive enough of the 64 bits.
+ */
+#define PCI64_REQUIRED_MASK (~(u64)0)
+#define PCI64_ADDR_BASE 0xfffc000000000000UL
+
+/* Return the index of the PCI controller for device PDEV. */
+int pci_domain_nr(struct pci_bus *bus);
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 1;
+}
+
+/* Platform support for /proc/bus/pci/X/Y mmap()s. */
+#define HAVE_PCI_MMAP
+#define arch_can_pci_mmap_io() 1
+#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
+#define get_pci_unmapped_area get_fb_unmapped_area
+
+#define HAVE_ARCH_PCI_RESOURCE_TO_USER
+#endif /* CONFIG_SPARC64 */
+
+#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI)
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return PCI_IRQ_NONE;
+}
#else
-#include <asm/pci_32.h>
-#endif
+#include <asm-generic/pci.h>
#endif
+
+#endif /* ___ASM_SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
deleted file mode 100644
index cfc0ee9476c6d..0000000000000
--- a/arch/sparc/include/asm/pci_32.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SPARC_PCI_H
-#define __SPARC_PCI_H
-
-#ifdef __KERNEL__
-
-#include <linux/dma-mapping.h>
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- * already-configured bus numbers - to be used for buggy BIOSes
- * or architectures with incomplete PCI setup by the loader.
- */
-#define pcibios_assign_all_busses() 0
-
-#define PCIBIOS_MIN_IO 0UL
-#define PCIBIOS_MIN_MEM 0UL
-
-#define PCI_IRQ_NONE 0xffffffff
-
-#endif /* __KERNEL__ */
-
-#ifndef CONFIG_LEON_PCI
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-#else
-/*
- * On LEON PCI Memory space is mapped 1:1 with physical address space.
- *
- * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
- * are converted into CPU addresses to virtual addresses that are mapped with
- * MMU to the PCI Host PCI I/O space window which are translated to the low
- * 64Kbytes by the Host controller.
- */
-
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return PCI_IRQ_NONE;
-}
-#endif
-
-#endif /* __SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
deleted file mode 100644
index fac77813402c6..0000000000000
--- a/arch/sparc/include/asm/pci_64.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SPARC64_PCI_H
-#define __SPARC64_PCI_H
-
-#ifdef __KERNEL__
-
-#include <linux/dma-mapping.h>
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- * already-configured bus numbers - to be used for buggy BIOSes
- * or architectures with incomplete PCI setup by the loader.
- */
-#define pcibios_assign_all_busses() 0
-
-#define PCIBIOS_MIN_IO 0UL
-#define PCIBIOS_MIN_MEM 0UL
-
-#define PCI_IRQ_NONE 0xffffffff
-
-/* PCI IOMMU mapping bypass support. */
-
-/* PCI 64-bit addressing works for all slots on all controller
- * types on sparc64. However, it requires that the device
- * can drive enough of the 64 bits.
- */
-#define PCI64_REQUIRED_MASK (~(u64)0)
-#define PCI64_ADDR_BASE 0xfffc000000000000UL
-
-/* Return the index of the PCI controller for device PDEV. */
-
-int pci_domain_nr(struct pci_bus *bus);
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return 1;
-}
-
-/* Platform support for /proc/bus/pci/X/Y mmap()s. */
-
-#define HAVE_PCI_MMAP
-#define arch_can_pci_mmap_io() 1
-#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
-#define get_pci_unmapped_area get_fb_unmapped_area
-
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return PCI_IRQ_NONE;
-}
-
-#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-#endif /* __KERNEL__ */
-
-#endif /* __SPARC64_PCI_H */
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 05eb016fc41be..b1a09080e8da3 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -314,7 +314,7 @@ bad:
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
- return SPARC_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
@@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = SPARC_MAPPING_ERROR;
+ outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -573,7 +573,7 @@ iommu_map_failed:
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
- s->dma_address = SPARC_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -741,11 +741,6 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == SPARC_MAPPING_ERROR;
-}
-
static int dma_4u_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
@@ -771,7 +766,6 @@ static const struct dma_map_ops sun4u_dma_ops = {
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
.dma_supported = dma_4u_supported,
- .mapping_error = dma_4u_mapping_error,
};
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index e3c02ba325004..d62ed9c5682d5 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -48,6 +48,4 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
-#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0)
-
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index aeaad04fdd146..f89603855f1ec 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -52,8 +52,6 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-const struct sparc32_dma_ops *sparc32_dma_ops;
-
/* This function must make sure that caches and memory are coherent after DMA
* On LEON systems without cache snooping it flushes the entire D-CACHE.
*/
@@ -247,178 +245,60 @@ static void _sparc_free_io(struct resource *res)
release_resource(res);
}
-#ifdef CONFIG_SBUS
-
-void sbus_set_sbus64(struct device *dev, int x)
-{
- printk("sbus_set_sbus64: unsupported\n");
-}
-EXPORT_SYMBOL(sbus_set_sbus64);
-
-/*
- * Allocate a chunk of memory suitable for DMA.
- * Typically devices use them for control blocks.
- * CPU may access them without any explicit flushing.
- */
-static void *sbus_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *dma_addrp, gfp_t gfp,
- unsigned long attrs)
+unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
{
- struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = PAGE_ALIGN(len);
- unsigned long va;
struct resource *res;
- int order;
-
- /* XXX why are some lengths signed, others unsigned? */
- if (len <= 0) {
- return NULL;
- }
- /* XXX So what is maxphys for us and how do drivers know it? */
- if (len > 256*1024) { /* __get_free_pages() limit */
- return NULL;
- }
-
- order = get_order(len_total);
- va = __get_free_pages(gfp, order);
- if (va == 0)
- goto err_nopages;
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
- goto err_nomem;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return 0;
+ res->name = dev->of_node->full_name;
- if (allocate_resource(&_sparc_dvma, res, len_total,
- _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
- goto err_nova;
+ if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
+ _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
+ printk("%s: cannot occupy 0x%zx", __func__, len);
+ kfree(res);
+ return 0;
}
- // XXX The sbus_map_dma_area does this for us below, see comments.
- // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
- /*
- * XXX That's where sdev would be used. Currently we load
- * all iommu tables with the same translations.
- */
- if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
- goto err_noiommu;
-
- res->name = op->dev.of_node->full_name;
-
- return (void *)(unsigned long)res->start;
-
-err_noiommu:
- release_resource(res);
-err_nova:
- kfree(res);
-err_nomem:
- free_pages(va, order);
-err_nopages:
- return NULL;
+ return res->start;
}
-static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, unsigned long attrs)
+bool sparc_dma_free_resource(void *cpu_addr, size_t size)
{
+ unsigned long addr = (unsigned long)cpu_addr;
struct resource *res;
- struct page *pgv;
- if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)p)) == NULL) {
- printk("sbus_free_consistent: cannot free %p\n", p);
- return;
+ res = lookup_resource(&_sparc_dvma, addr);
+ if (!res) {
+ printk("%s: cannot free %p\n", __func__, cpu_addr);
+ return false;
}
- if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
- printk("sbus_free_consistent: unaligned va %p\n", p);
- return;
+ if ((addr & (PAGE_SIZE - 1)) != 0) {
+ printk("%s: unaligned va %p\n", __func__, cpu_addr);
+ return false;
}
- n = PAGE_ALIGN(n);
- if (resource_size(res) != n) {
- printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
- (long)resource_size(res), n);
- return;
+ size = PAGE_ALIGN(size);
+ if (resource_size(res) != size) {
+ printk("%s: region 0x%lx asked 0x%zx\n",
+ __func__, (long)resource_size(res), size);
+ return false;
}
release_resource(res);
kfree(res);
-
- pgv = virt_to_page(p);
- sbus_unmap_dma_area(dev, ba, n);
-
- __free_pages(pgv, get_order(n));
-}
-
-/*
- * Map a chunk of memory so that devices can see it.
- * CPU view of this memory may be inconsistent with
- * a device view and explicit flushing is necessary.
- */
-static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t len,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *va = page_address(page) + offset;
-
- /* XXX why are some lengths signed, others unsigned? */
- if (len <= 0) {
- return 0;
- }
- /* XXX So what is maxphys for us and how do drivers know it? */
- if (len > 256*1024) { /* __get_free_pages() limit */
- return 0;
- }
- return mmu_get_scsi_one(dev, va, len);
-}
-
-static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
- enum dma_data_direction dir, unsigned long attrs)
-{
- mmu_release_scsi_one(dev, ba, n);
-}
-
-static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, unsigned long attrs)
-{
- mmu_get_scsi_sgl(dev, sg, n);
- return n;
-}
-
-static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, unsigned long attrs)
-{
- mmu_release_scsi_sgl(dev, sg, n);
-}
-
-static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int n, enum dma_data_direction dir)
-{
- BUG();
+ return true;
}
-static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int n, enum dma_data_direction dir)
-{
- BUG();
-}
+#ifdef CONFIG_SBUS
-static int sbus_dma_supported(struct device *dev, u64 mask)
+void sbus_set_sbus64(struct device *dev, int x)
{
- return 0;
+ printk("sbus_set_sbus64: unsupported\n");
}
-
-static const struct dma_map_ops sbus_dma_ops = {
- .alloc = sbus_alloc_coherent,
- .free = sbus_free_coherent,
- .map_page = sbus_map_page,
- .unmap_page = sbus_unmap_page,
- .map_sg = sbus_map_sg,
- .unmap_sg = sbus_unmap_sg,
- .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
- .sync_sg_for_device = sbus_sync_sg_for_device,
- .dma_supported = sbus_dma_supported,
-};
+EXPORT_SYMBOL(sbus_set_sbus64);
static int __init sparc_register_ioport(void)
{
@@ -438,45 +318,30 @@ arch_initcall(sparc_register_ioport);
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
- unsigned long len_total = PAGE_ALIGN(size);
+ unsigned long addr;
void *va;
- struct resource *res;
- int order;
- if (size == 0) {
+ if (!size || size > 256 * 1024) /* __get_free_pages() limit */
return NULL;
- }
- if (size > 256*1024) { /* __get_free_pages() limit */
- return NULL;
- }
- order = get_order(len_total);
- va = (void *) __get_free_pages(gfp, order);
- if (va == NULL) {
- printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT);
- goto err_nopages;
+ size = PAGE_ALIGN(size);
+ va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
+ if (!va) {
+ printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
+ return NULL;
}
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk("%s: no core\n", __func__);
+ addr = sparc_dma_alloc_resource(dev, size);
+ if (!addr)
goto err_nomem;
- }
- if (allocate_resource(&_sparc_dvma, res, len_total,
- _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("%s: cannot occupy 0x%lx", __func__, len_total);
- goto err_nova;
- }
- srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
+ srmmu_mapiorange(0, virt_to_phys(va), addr, size);
*dma_handle = virt_to_phys(va);
- return (void *) res->start;
+ return (void *)addr;
-err_nova:
- kfree(res);
err_nomem:
- free_pages((unsigned long)va, order);
-err_nopages:
+ free_pages((unsigned long)va, get_order(size));
return NULL;
}
@@ -491,31 +356,11 @@ err_nopages:
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- struct resource *res;
-
- if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)cpu_addr)) == NULL) {
- printk("%s: cannot free %p\n", __func__, cpu_addr);
+ if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
return;
- }
-
- if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
- printk("%s: unaligned va %p\n", __func__, cpu_addr);
- return;
- }
-
- size = PAGE_ALIGN(size);
- if (resource_size(res) != size) {
- printk("%s: region 0x%lx asked 0x%zx\n", __func__,
- (long)resource_size(res), size);
- return;
- }
dma_make_coherent(dma_addr, size);
srmmu_unmapiorange((unsigned long)cpu_addr, size);
-
- release_resource(res);
- kfree(res);
free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
}
@@ -528,7 +373,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
dma_make_coherent(paddr, PAGE_ALIGN(size));
}
-const struct dma_map_ops *dma_ops = &sbus_dma_ops;
+const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
#ifdef CONFIG_PROC_FS
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 565d9ac883d0d..fa0e42b4cbfb8 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -414,12 +414,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return SPARC_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
- return SPARC_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
@@ -592,7 +592,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = SPARC_MAPPING_ERROR;
+ outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -609,7 +609,7 @@ iommu_map_failed:
iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
- s->dma_address = SPARC_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -688,11 +688,6 @@ static int dma_4v_supported(struct device *dev, u64 device_mask)
return pci64_dma_supported(to_pci_dev(dev), device_mask);
}
-static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == SPARC_MAPPING_ERROR;
-}
-
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
@@ -701,7 +696,6 @@ static const struct dma_map_ops sun4v_dma_ops = {
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.dma_supported = dma_4v_supported,
- .mapping_error = dma_4v_mapping_error,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index c8cb27d3ea759..f770ee7229d8d 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -12,7 +12,7 @@
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
-#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -140,34 +140,44 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
return vaddr;
}
-static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
+static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len, enum dma_data_direction dir,
+ unsigned long attrs)
{
+ void *vaddr = page_address(page) + offset;
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long ret, flags;
+ /* XXX So what is maxphys for us and how do drivers know it? */
+ if (!len || len > 256 * 1024)
+ return DMA_MAPPING_ERROR;
+
spin_lock_irqsave(&iounit->lock, flags);
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
spin_unlock_irqrestore(&iounit->lock, flags);
return ret;
}
-static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct iounit_struct *iounit = dev->archdata.iommu;
+ struct scatterlist *sg;
unsigned long flags;
+ int i;
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
spin_lock_irqsave(&iounit->lock, flags);
- while (sz != 0) {
- --sz;
+ for_each_sg(sgl, sg, nents, i) {
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
sg->dma_length = sg->length;
- sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
+ return nents;
}
-static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
+static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long flags;
@@ -181,34 +191,47 @@ static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned lo
spin_unlock_irqrestore(&iounit->lock, flags);
}
-static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct iounit_struct *iounit = dev->archdata.iommu;
- unsigned long flags;
- unsigned long vaddr, len;
+ unsigned long flags, vaddr, len;
+ struct scatterlist *sg;
+ int i;
spin_lock_irqsave(&iounit->lock, flags);
- while (sz != 0) {
- --sz;
+ for_each_sg(sgl, sg, nents, i) {
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
- sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
#ifdef CONFIG_SBUS
-static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
+static void *iounit_alloc(struct device *dev, size_t len,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iounit_struct *iounit = dev->archdata.iommu;
- unsigned long page, end;
+ unsigned long va, addr, page, end, ret;
pgprot_t dvma_prot;
iopte_t __iomem *iopte;
- *pba = addr;
+ /* XXX So what is maxphys for us and how do drivers know it? */
+ if (!len || len > 256 * 1024)
+ return NULL;
+
+ len = PAGE_ALIGN(len);
+ va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
+ if (!va)
+ return NULL;
+
+ addr = ret = sparc_dma_alloc_resource(dev, len);
+ if (!addr)
+ goto out_free_pages;
+ *dma_handle = addr;
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
end = PAGE_ALIGN((addr + len));
@@ -237,27 +260,32 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
flush_cache_all();
flush_tlb_all();
- return 0;
+ return (void *)ret;
+
+out_free_pages:
+ free_pages(va, get_order(len));
+ return NULL;
}
-static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
+static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
{
/* XXX Somebody please fill this in */
}
#endif
-static const struct sparc32_dma_ops iounit_dma_ops = {
- .get_scsi_one = iounit_get_scsi_one,
- .get_scsi_sgl = iounit_get_scsi_sgl,
- .release_scsi_one = iounit_release_scsi_one,
- .release_scsi_sgl = iounit_release_scsi_sgl,
+static const struct dma_map_ops iounit_dma_ops = {
#ifdef CONFIG_SBUS
- .map_dma_area = iounit_map_dma_area,
- .unmap_dma_area = iounit_unmap_dma_area,
+ .alloc = iounit_alloc,
+ .free = iounit_free,
#endif
+ .map_page = iounit_map_page,
+ .unmap_page = iounit_unmap_page,
+ .map_sg = iounit_map_sg,
+ .unmap_sg = iounit_unmap_sg,
};
void __init ld_mmu_iounit(void)
{
- sparc32_dma_ops = &iounit_dma_ops;
+ dma_ops = &iounit_dma_ops;
}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 2c5f8a648f8c3..e8d5d73ca40d6 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -13,7 +13,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
-#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -205,59 +205,67 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
return busa0;
}
-static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
+static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len)
{
- unsigned long off;
- int npages;
- struct page *page;
- u32 busa;
-
- off = (unsigned long)vaddr & ~PAGE_MASK;
- npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
- page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
- busa = iommu_get_one(dev, page, npages);
- return busa + off;
+ void *vaddr = page_address(page) + offset;
+ unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
+ unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* XXX So what is maxphys for us and how do drivers know it? */
+ if (!len || len > 256 * 1024)
+ return DMA_MAPPING_ERROR;
+ return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
}
-static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
+static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
+ struct page *page, unsigned long offset, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
flush_page_for_dma(0);
- return iommu_get_scsi_one(dev, vaddr, len);
+ return __sbus_iommu_map_page(dev, page, offset, len);
}
-static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
+static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
+ struct page *page, unsigned long offset, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
- unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
+ void *vaddr = page_address(page) + offset;
+ unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
- while(page < ((unsigned long)(vaddr + len))) {
- flush_page_for_dma(page);
- page += PAGE_SIZE;
+ while (p < (unsigned long)vaddr + len) {
+ flush_page_for_dma(p);
+ p += PAGE_SIZE;
}
- return iommu_get_scsi_one(dev, vaddr, len);
+
+ return __sbus_iommu_map_page(dev, page, offset, len);
}
-static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
+static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
- int n;
+ struct scatterlist *sg;
+ int i, n;
flush_page_for_dma(0);
- while (sz != 0) {
- --sz;
+
+ for_each_sg(sgl, sg, nents, i) {
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
sg->dma_length = sg->length;
- sg = sg_next(sg);
}
+
+ return nents;
}
-static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
+static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page, oldpage = 0;
- int n, i;
-
- while(sz != 0) {
- --sz;
+ struct scatterlist *sg;
+ int i, j, n;
+ for_each_sg(sgl, sg, nents, j) {
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
/*
@@ -277,8 +285,9 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
sg->dma_length = sg->length;
- sg = sg_next(sg);
}
+
+ return nents;
}
static void iommu_release_one(struct device *dev, u32 busa, int npages)
@@ -297,40 +306,52 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
bit_map_clear(&iommu->usemap, ioptex, npages);
}
-static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
+static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t len, enum dma_data_direction dir, unsigned long attrs)
{
- unsigned long off;
+ unsigned long off = dma_addr & ~PAGE_MASK;
int npages;
- off = vaddr & ~PAGE_MASK;
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
- iommu_release_one(dev, vaddr & PAGE_MASK, npages);
+ iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
}
-static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
- int n;
-
- while(sz != 0) {
- --sz;
+ struct scatterlist *sg;
+ int i, n;
+ for_each_sg(sgl, sg, nents, i) {
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
sg->dma_address = 0x21212121;
- sg = sg_next(sg);
}
}
#ifdef CONFIG_SBUS
-static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
- unsigned long addr, int len)
+static void *sbus_iommu_alloc(struct device *dev, size_t len,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iommu_struct *iommu = dev->archdata.iommu;
- unsigned long page, end;
+ unsigned long va, addr, page, end, ret;
iopte_t *iopte = iommu->page_table;
iopte_t *first;
int ioptex;
+ /* XXX So what is maxphys for us and how do drivers know it? */
+ if (!len || len > 256 * 1024)
+ return NULL;
+
+ len = PAGE_ALIGN(len);
+ va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
+ if (va == 0)
+ return NULL;
+
+ addr = ret = sparc_dma_alloc_resource(dev, len);
+ if (!addr)
+ goto out_free_pages;
+
BUG_ON((va & ~PAGE_MASK) != 0);
BUG_ON((addr & ~PAGE_MASK) != 0);
BUG_ON((len & ~PAGE_MASK) != 0);
@@ -385,16 +406,25 @@ static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long
flush_tlb_all();
iommu_invalidate(iommu->regs);
- *pba = iommu->start + (ioptex << PAGE_SHIFT);
- return 0;
+ *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
+ return (void *)ret;
+
+out_free_pages:
+ free_pages(va, get_order(len));
+ return NULL;
}
-static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
+static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
+ dma_addr_t busa, unsigned long attrs)
{
struct iommu_struct *iommu = dev->archdata.iommu;
iopte_t *iopte = iommu->page_table;
- unsigned long end;
+ struct page *page = virt_to_page(cpu_addr);
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+ unsigned long end;
+
+ if (!sparc_dma_free_resource(cpu_addr, len))
+ return;
BUG_ON((busa & ~PAGE_MASK) != 0);
BUG_ON((len & ~PAGE_MASK) != 0);
@@ -408,38 +438,40 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
flush_tlb_all();
iommu_invalidate(iommu->regs);
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
+
+ __free_pages(page, get_order(len));
}
#endif
-static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
- .get_scsi_one = iommu_get_scsi_one_gflush,
- .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
- .release_scsi_one = iommu_release_scsi_one,
- .release_scsi_sgl = iommu_release_scsi_sgl,
+static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
#ifdef CONFIG_SBUS
- .map_dma_area = iommu_map_dma_area,
- .unmap_dma_area = iommu_unmap_dma_area,
+ .alloc = sbus_iommu_alloc,
+ .free = sbus_iommu_free,
#endif
+ .map_page = sbus_iommu_map_page_gflush,
+ .unmap_page = sbus_iommu_unmap_page,
+ .map_sg = sbus_iommu_map_sg_gflush,
+ .unmap_sg = sbus_iommu_unmap_sg,
};
-static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
- .get_scsi_one = iommu_get_scsi_one_pflush,
- .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
- .release_scsi_one = iommu_release_scsi_one,
- .release_scsi_sgl = iommu_release_scsi_sgl,
+static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
#ifdef CONFIG_SBUS
- .map_dma_area = iommu_map_dma_area,
- .unmap_dma_area = iommu_unmap_dma_area,
+ .alloc = sbus_iommu_alloc,
+ .free = sbus_iommu_free,
#endif
+ .map_page = sbus_iommu_map_page_pflush,
+ .unmap_page = sbus_iommu_unmap_page,
+ .map_sg = sbus_iommu_map_sg_pflush,
+ .unmap_sg = sbus_iommu_unmap_sg,
};
void __init ld_mmu_iommu(void)
{
if (flush_page_for_dma_global) {
/* flush_page_for_dma flushes everything, no matter of what page is it */
- sparc32_dma_ops = &iommu_dma_gflush_ops;
+ dma_ops = &sbus_iommu_dma_gflush_ops;
} else {
- sparc32_dma_ops = &iommu_dma_pflush_ops;
+ dma_ops = &sbus_iommu_dma_pflush_ops;
}
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index a4c05159dca5c..2681027d7bffb 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -4,7 +4,6 @@ config UNICORE32
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select DMA_DIRECT_OPS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c7094f813183e..57552f2b37eb7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -66,7 +66,6 @@ config X86
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
@@ -90,7 +89,6 @@ config X86
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CLOCKSOURCE_WATCHDOG
select DCACHE_WORD_ACCESS
- select DMA_DIRECT_OPS
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
select GENERIC_CLOCKEVENTS
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 3f9d1b4019bbf..e0ff3ac8c1274 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */
static u32 *iommu_gatt_base; /* Remapping table */
-static dma_addr_t bad_dma_addr;
-
/*
* If this is disabled the IOMMU will use an optimized flushing strategy
* of only flushing when an mapping is reused. With it true the GART is
@@ -74,8 +72,6 @@ static u32 gart_unmapped_entry;
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
-#define EMERGENCY_PAGES 32 /* = 128KB */
-
#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
@@ -155,9 +151,6 @@ static void flush_gart(void)
#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
-static int leak_trace;
-static int iommu_leak_pages = 20;
-
static void dump_leak(void)
{
static int dump;
@@ -184,14 +177,6 @@ static void iommu_full(struct device *dev, size_t size, int dir)
*/
dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
-
- if (size > PAGE_SIZE*EMERGENCY_PAGES) {
- if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Memory would be corrupted\n");
- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic(KERN_ERR
- "PCI-DMA: Random memory would be DMAed\n");
- }
#ifdef CONFIG_IOMMU_LEAK
dump_leak();
#endif
@@ -220,7 +205,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
int i;
if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
- return bad_dma_addr;
+ return DMA_MAPPING_ERROR;
iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) {
@@ -229,7 +214,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
iommu_full(dev, size, dir);
- return bad_dma_addr;
+ return DMA_MAPPING_ERROR;
}
for (i = 0; i < npages; i++) {
@@ -271,7 +256,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
int npages;
int i;
- if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
+ if (dma_addr == DMA_MAPPING_ERROR ||
dma_addr >= iommu_bus_base + iommu_size)
return;
@@ -315,7 +300,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir, 0);
- if (addr == bad_dma_addr) {
+ if (addr == DMA_MAPPING_ERROR) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir, 0);
nents = 0;
@@ -471,7 +456,7 @@ error:
iommu_full(dev, pages << PAGE_SHIFT, dir);
for_each_sg(sg, s, nents, i)
- s->dma_address = bad_dma_addr;
+ s->dma_address = DMA_MAPPING_ERROR;
return 0;
}
@@ -490,7 +475,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
flush_gart();
- if (unlikely(*dma_addr == bad_dma_addr))
+ if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
goto out_free;
return vaddr;
out_free:
@@ -507,11 +492,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
}
-static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == bad_dma_addr);
-}
-
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -695,7 +675,6 @@ static const struct dma_map_ops gart_dma_ops = {
.unmap_page = gart_unmap_page,
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
- .mapping_error = gart_mapping_error,
.dma_supported = dma_direct_supported,
};
@@ -730,7 +709,6 @@ int __init gart_iommu_init(void)
unsigned long aper_base, aper_size;
unsigned long start_pfn, end_pfn;
unsigned long scratch;
- long i;
if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
@@ -774,29 +752,12 @@ int __init gart_iommu_init(void)
if (!iommu_gart_bitmap)
panic("Cannot allocate iommu bitmap\n");
-#ifdef CONFIG_IOMMU_LEAK
- if (leak_trace) {
- int ret;
-
- ret = dma_debug_resize_entries(iommu_pages);
- if (ret)
- pr_debug("PCI-DMA: Cannot trace all the entries\n");
- }
-#endif
-
- /*
- * Out of IOMMU space handling.
- * Reserve some invalid pages at the beginning of the GART.
- */
- bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
-
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
agp_memory_reserved = iommu_size;
iommu_start = aper_size - iommu_size;
iommu_bus_base = info.aper_base + iommu_start;
- bad_dma_addr = iommu_bus_base;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
/*
@@ -838,8 +799,6 @@ int __init gart_iommu_init(void)
if (!scratch)
panic("Cannot allocate iommu scratch page");
gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
- for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
- iommu_gatt_base[i] = gart_unmapped_entry;
flush_gart();
dma_ops = &gart_dma_ops;
@@ -853,16 +812,6 @@ void __init gart_parse_options(char *p)
{
int arg;
-#ifdef CONFIG_IOMMU_LEAK
- if (!strncmp(p, "leak", 4)) {
- leak_trace = 1;
- p += 4;
- if (*p == '=')
- ++p;
- if (isdigit(*p) && get_option(&p, &arg))
- iommu_leak_pages = arg;
- }
-#endif
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
if (!strncmp(p, "fullflush", 9))
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index bbfc8b1e9104c..c70720f61a34b 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -51,8 +51,6 @@
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
-#define CALGARY_MAPPING_ERROR 0
-
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
#else
@@ -157,8 +155,6 @@ static const unsigned long phb_debug_offsets[] = {
#define PHB_DEBUG_STUFF_OFFSET 0x0020
-#define EMERGENCY_PAGES 32 /* = 128KB */
-
unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
static int translate_empty_slots __read_mostly = 0;
static int calgary_detected __read_mostly = 0;
@@ -255,7 +251,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return CALGARY_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
}
@@ -274,11 +270,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
dma_addr_t ret;
entry = iommu_range_alloc(dev, tbl, npages);
-
- if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
+ if (unlikely(entry == DMA_MAPPING_ERROR)) {
pr_warn("failed to allocate %u pages in iommu %p\n",
npages, tbl);
- return CALGARY_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/* set the return dma address */
@@ -294,12 +289,10 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry;
- unsigned long badend;
unsigned long flags;
/* were we called with bad_dma_address? */
- badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely(dma_addr < badend)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
@@ -383,7 +376,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == CALGARY_MAPPING_ERROR) {
+ if (entry == DMA_MAPPING_ERROR) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -401,7 +394,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = CALGARY_MAPPING_ERROR;
+ sg->dma_address = DMA_MAPPING_ERROR;
sg->dma_length = 0;
}
return 0;
@@ -454,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == CALGARY_MAPPING_ERROR)
+ if (mapping == DMA_MAPPING_ERROR)
goto free;
*dma_handle = mapping;
return ret;
@@ -479,11 +472,6 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
-static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == CALGARY_MAPPING_ERROR;
-}
-
static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
@@ -491,7 +479,6 @@ static const struct dma_map_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
- .mapping_error = calgary_mapping_error,
.dma_supported = dma_direct_supported,
};
@@ -739,9 +726,6 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
u64 start;
struct iommu_table *tbl = pci_iommu(dev->bus);
- /* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
-
/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
if (is_calgary(dev->device)) {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index f4562fcec6815..d460998ae8285 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -17,7 +17,7 @@
static bool disable_dac_quirk __read_mostly;
-const struct dma_map_ops *dma_ops = &dma_direct_ops;
+const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
#ifdef CONFIG_IOMMU_DEBUG
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index bd08b9e1c9e2a..5f5302028a9af 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -62,10 +62,8 @@ IOMMU_INIT(pci_swiotlb_detect_4gb,
void __init pci_swiotlb_init(void)
{
- if (swiotlb) {
+ if (swiotlb)
swiotlb_init(0);
- dma_ops = &swiotlb_dma_ops;
- }
}
void __init pci_swiotlb_late_init(void)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 006f373f54aba..385afa2b9e17a 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -381,13 +381,6 @@ void __init mem_encrypt_init(void)
swiotlb_update_mem_attributes();
/*
- * With SEV, DMA operations cannot use encryption, we need to use
- * SWIOTLB to bounce buffer DMA operation.
- */
- if (sev_active())
- dma_ops = &swiotlb_dma_ops;
-
- /*
* With SEV, we need to unroll the rep string I/O instructions.
*/
if (sev_active())
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 7a5bafb76d770..3cdafea55ab6b 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -168,7 +168,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
return;
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
- pdev->dev.dma_ops = &swiotlb_dma_ops;
pdev->dev.archdata.is_sta2x11 = true;
/* We must enable all devices as master, for audio DMA to work */
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d29b7365da8d9..36338e7564a3f 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config XTENSA
def_bool y
- select ARCH_HAS_SG_CHAIN
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
@@ -10,7 +9,7 @@ config XTENSA
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_DIRECT_OPS
+ select DMA_REMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 1fc138b6bc0a8..9171bff76fc4c 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -160,7 +160,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
flag & __GFP_NOWARN);
if (!page)
- page = alloc_pages(flag, get_order(size));
+ page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e9eda5558c1fc..5efd4219f1127 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1456,6 +1456,11 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
const struct iommu_ops *iommu;
u64 dma_addr = 0, size = 0;
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ set_dma_ops(dev, &dma_dummy_ops);
+ return 0;
+ }
+
iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 0fb5f140f1b00..445cbd8266ca4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1137,8 +1137,7 @@ int platform_dma_configure(struct device *dev)
ret = of_dma_configure(dev, dev->of_node, true);
} else if (has_acpi_companion(dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
- if (attr != DEV_DMA_NOT_SUPPORTED)
- ret = acpi_dma_configure(dev, attr);
+ ret = acpi_dma_configure(dev, attr);
}
return ret;
@@ -1178,37 +1177,6 @@ int __init platform_bus_init(void)
return error;
}
-#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
-static u64 dma_default_get_required_mask(struct device *dev)
-{
- u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
- u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
- u64 mask;
-
- if (!high_totalram) {
- /* convert to mask just covering totalram */
- low_totalram = (1 << (fls(low_totalram) - 1));
- low_totalram += low_totalram - 1;
- mask = low_totalram;
- } else {
- high_totalram = (1 << (fls(high_totalram) - 1));
- high_totalram += high_totalram - 1;
- mask = (((u64)high_totalram) << 32) + 0xffffffff;
- }
- return mask;
-}
-
-u64 dma_get_required_mask(struct device *dev)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->get_required_mask)
- return ops->get_required_mask(dev);
- return dma_default_get_required_mask(dev);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-#endif
-
static __initdata LIST_HEAD(early_platform_driver_list);
static __initdata LIST_HEAD(early_platform_device_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f05a29ff586e1..9fd5fbe8bebf4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -583,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
dev_priv->map_mode = vmw_dma_map_populate;
- if (dma_ops->sync_single_for_cpu)
+ if (dma_ops && dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1167ff0416cf7..567221cca13c8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -55,8 +55,6 @@
#include "amd_iommu_types.h"
#include "irq_remapping.h"
-#define AMD_IOMMU_MAPPING_ERROR 0
-
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define LOOP_TIMEOUT 100000
@@ -2186,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev)
dev_name(dev));
iommu_ignore_device(dev);
- dev->dma_ops = &dma_direct_ops;
+ dev->dma_ops = NULL;
goto out;
}
init_iommu_group(dev);
@@ -2339,7 +2337,7 @@ static dma_addr_t __map_single(struct device *dev,
paddr &= PAGE_MASK;
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (address == AMD_IOMMU_MAPPING_ERROR)
+ if (!address)
goto out;
prot = dir2prot(direction);
@@ -2376,7 +2374,7 @@ out_unmap:
dma_ops_free_iova(dma_dom, address, pages);
- return AMD_IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/*
@@ -2427,7 +2425,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
- return AMD_IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
@@ -2504,7 +2502,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (address == AMD_IOMMU_MAPPING_ERROR)
+ if (address == DMA_MAPPING_ERROR)
goto out_err;
prot = dir2prot(direction);
@@ -2627,7 +2625,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
size, DMA_BIDIRECTIONAL, dma_mask);
- if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
+ if (*dma_addr == DMA_MAPPING_ERROR)
goto out_free;
return page_address(page);
@@ -2678,11 +2676,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
return check_device(dev);
}
-static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == AMD_IOMMU_MAPPING_ERROR;
-}
-
static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
@@ -2691,7 +2684,6 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
.map_sg = map_sg,
.unmap_sg = unmap_sg,
.dma_supported = amd_iommu_dma_supported,
- .mapping_error = amd_iommu_mapping_error,
};
static int init_reserved_iova_ranges(void)
@@ -2778,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void)
swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
- /*
- * In case we don't initialize SWIOTLB (actually the common case
- * when AMD IOMMU is enabled and SME is not active), make sure there
- * are global dma_ops set as a fall-back for devices not handled by
- * this driver (for example non-PCI devices). When SME is active,
- * make sure that swiotlb variable remains set so the global dma_ops
- * continue to be SWIOTLB.
- */
- if (!swiotlb)
- dma_ops = &dma_direct_ops;
-
if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
else
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b04753b2040..60c7e9e9901e9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -32,8 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
-#define IOMMU_MAPPING_ERROR 0
-
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -523,7 +521,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
{
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = IOMMU_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
}
/**
@@ -556,7 +554,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
- *handle = IOMMU_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -649,11 +647,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
- return IOMMU_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
return iova + iova_off;
}
@@ -694,7 +692,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = IOMMU_MAPPING_ERROR;
+ sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
/*
@@ -737,11 +735,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
+ if (sg_dma_address(s) != DMA_MAPPING_ERROR)
s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
- sg_dma_address(s) = IOMMU_MAPPING_ERROR;
+ sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
}
@@ -858,11 +856,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == IOMMU_MAPPING_ERROR;
-}
-
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
{
@@ -882,7 +875,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return NULL;
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
- if (iommu_dma_mapping_error(dev, iova))
+ if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
INIT_LIST_HEAD(&msi_page->list);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 41a4b8808802b..0ad67d65bbce2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev)
return 0;
}
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir, u64 dma_mask)
+static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int dir,
+ u64 dma_mask)
{
+ phys_addr_t paddr = page_to_phys(page) + offset;
struct dmar_domain *domain;
phys_addr_t start_paddr;
unsigned long iova_pfn;
@@ -3615,7 +3617,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
domain = get_valid_domain_for_dev(dev);
if (!domain)
- return 0;
+ return DMA_MAPPING_ERROR;
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
@@ -3653,7 +3655,7 @@ error:
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
- return 0;
+ return DMA_MAPPING_ERROR;
}
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
@@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
+ return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,10 +3754,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
return NULL;
memset(page_address(page), 0, size);
- *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
- DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (*dma_handle)
+ *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (*dma_handle != DMA_MAPPING_ERROR)
return page_address(page);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
@@ -3865,11 +3865,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return nelems;
}
-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return !dma_addr;
-}
-
static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
@@ -3877,7 +3872,6 @@ static const struct dma_map_ops intel_dma_ops = {
.unmap_sg = intel_unmap_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
- .mapping_error = intel_mapping_error,
.dma_supported = dma_direct_supported,
};
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index c327985c9523b..6479435ac96bf 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -149,7 +149,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
dma_addr_t tmp;
- void *va = kmalloc(size, gfp);
+ void *va = kmalloc(size, gfp | __GFP_ZERO);
if (va) {
tmp = mic_map_single(mdev, va, size);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 701a7d6a74d5c..714aac72df0ec 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -110,8 +110,6 @@
#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
-#define CCIO_MAPPING_ERROR (~(dma_addr_t)0)
-
struct ioa_registers {
/* Runway Supervisory Set */
int32_t unused1[12];
@@ -740,7 +738,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
- return CCIO_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
BUG_ON(size <= 0);
@@ -1021,11 +1019,6 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
}
-static int ccio_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == CCIO_MAPPING_ERROR;
-}
-
static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
@@ -1034,7 +1027,6 @@ static const struct dma_map_ops ccio_ops = {
.unmap_page = ccio_unmap_page,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
- .mapping_error = ccio_mapping_error,
};
#ifdef CONFIG_PROC_FS
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index c1e599a429af9..452d306ce5cb8 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -93,8 +93,6 @@
#define DEFAULT_DMA_HINT_REG 0
-#define SBA_MAPPING_ERROR (~(dma_addr_t)0)
-
struct sba_device *sba_list;
EXPORT_SYMBOL_GPL(sba_list);
@@ -725,7 +723,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
ioc = GET_IOC(dev);
if (!ioc)
- return SBA_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -1080,11 +1078,6 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
}
-static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == SBA_MAPPING_ERROR;
-}
-
static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
@@ -1093,7 +1086,6 @@ static const struct dma_map_ops sba_ops = {
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
- .mapping_error = sba_mapping_error,
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e50b0b5815ffe..3890812cdf87e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -307,39 +307,32 @@ static struct device *to_vmd_dev(struct device *dev)
return &vmd->dev->dev;
}
-static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
-{
- return get_dma_ops(to_vmd_dev(dev));
-}
-
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
gfp_t flag, unsigned long attrs)
{
- return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
- attrs);
+ return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
}
static void vmd_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t addr, unsigned long attrs)
{
- return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
- attrs);
+ return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
}
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
- size, attrs);
+ return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
+ attrs);
}
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
- addr, size, attrs);
+ return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
+ attrs);
}
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
@@ -347,66 +340,60 @@ static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
- dir, attrs);
+ return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
+ attrs);
}
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
+ dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
}
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+ return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+ dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
+ dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
- dir);
+ dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
+ dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
}
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
-{
- return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
+ dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
}
static int vmd_dma_supported(struct device *dev, u64 mask)
{
- return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
+ return dma_supported(to_vmd_dev(dev), mask);
}
static u64 vmd_get_required_mask(struct device *dev)
{
- return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
+ return dma_get_required_mask(to_vmd_dev(dev));
}
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
@@ -446,7 +433,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
- ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
add_dma_domain(domain);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bef17c3fca67c..ea55444e6eadd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1600,10 +1600,8 @@ static int pci_dma_configure(struct device *dev)
ret = of_dma_configure(dev, bridge->parent->of_node, true);
} else if (has_acpi_companion(bridge)) {
struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
- enum dev_dma_attr attr = acpi_get_dma_attr(adev);
- if (attr != DEV_DMA_NOT_SUPPORTED)
- ret = acpi_dma_configure(dev, attr);
+ ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev));
}
pci_put_host_bridge_device(bridge);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2a7f545bd0b57..989cf872b98c6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -53,8 +53,6 @@
* API.
*/
-#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
-
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
@@ -405,8 +403,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs);
- if (map == SWIOTLB_MAP_ERROR)
- return XEN_SWIOTLB_ERROR_CODE;
+ if (map == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
@@ -421,7 +419,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
- return XEN_SWIOTLB_ERROR_CODE;
+ return DMA_MAPPING_ERROR;
}
/*
@@ -443,21 +441,8 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr)) {
+ if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
- return;
- }
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- /*
- * phys_to_virt doesn't work with hihgmem page but we could
- * call dma_mark_clean() with hihgmem page here. However, we
- * are fine since dma_mark_clean() is null on POWERPC. We can
- * make dma_mark_clean() take a physical address if necessary.
- */
- dma_mark_clean(phys_to_virt(paddr), size);
}
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -495,11 +480,6 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
if (target == SYNC_FOR_DEVICE)
xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- dma_mark_clean(phys_to_virt(paddr), size);
}
void
@@ -574,7 +554,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_phys(sg),
sg->length,
dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
+ if (map == DMA_MAPPING_ERROR) {
dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
@@ -700,11 +680,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
}
-static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == XEN_SWIOTLB_ERROR_CODE;
-}
-
const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
@@ -719,5 +694,4 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.dma_supported = xen_swiotlb_dma_supported,
.mmap = xen_swiotlb_dma_mmap,
.get_sgtable = xen_swiotlb_get_sgtable,
- .mapping_error = xen_swiotlb_mapping_error,
};
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 880a292d792fb..c13f46109e888 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,7 +4,7 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &dma_direct_ops;
+ return NULL;
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 30213adbb6b9d..2ad5c363d7d57 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -30,8 +30,6 @@ struct bus_type;
extern void dma_debug_add_bus(struct bus_type *bus);
-extern int dma_debug_resize_entries(u32 num_entries);
-
extern void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len);
@@ -72,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction);
-extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction);
-
-extern void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction);
-
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
@@ -101,11 +88,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
-static inline int dma_debug_resize_entries(u32 num_entries)
-{
- return 0;
-}
-
static inline void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len)
{
@@ -174,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
{
}
-static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 9e66bfe369aa0..b7338702592a6 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,8 +5,6 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
-#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
-
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
@@ -50,14 +48,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
-#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
-void dma_mark_clean(void *addr, size_t size);
-#else
-static inline void dma_mark_clean(void *addr, size_t size)
-{
-}
-#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
-
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
@@ -67,11 +57,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
+struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
int dma_direct_supported(struct device *dev, u64 mask);
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index e8ca5e6542773..e760dc5d1fa80 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d327bdd53716f..ba521d5506c90 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -128,13 +128,14 @@ struct dma_map_ops {
enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
};
-extern const struct dma_map_ops dma_direct_ops;
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
extern const struct dma_map_ops dma_virt_ops;
+extern const struct dma_map_ops dma_dummy_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -220,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
}
#endif
+static inline bool dma_is_direct(const struct dma_map_ops *ops)
+{
+ return likely(!ops);
+}
+
+/*
+ * All the dma_direct_* declarations are here just for the indirect call bypass,
+ * and must not be used directly drivers!
+ */
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline void dma_direct_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
@@ -230,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
BUG_ON(!valid_dma_direction(dir));
debug_dma_map_single(dev, ptr, size);
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, attrs);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
+ else
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
dir, addr, true);
@@ -247,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_page(dev, addr, size, dir, attrs);
+ else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+}
+
/*
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
* It should never return a value < 0.
@@ -264,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int ents;
BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ if (dma_is_direct(ops))
+ ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ else
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
@@ -279,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+ else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
@@ -293,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ else
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, false);
-}
-
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
@@ -327,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr;
- if (ops->map_resource)
+ if (ops && ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
@@ -342,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_resource)
+ if (ops && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
@@ -354,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+}
+
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -366,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_device(dev, addr, size, dir);
+ else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-}
-
static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
@@ -406,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
@@ -418,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
@@ -431,16 +495,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->cache_sync)
- ops->cache_sync(dev, vaddr, size, dir);
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir);
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -455,107 +511,29 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
+int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
+bool dma_in_atomic_pool(void *start, size_t size);
+void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
+bool dma_free_from_pool(void *start, size_t size);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs);
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
-}
-
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
- WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
-
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- /* let the implementation decide on the zone to allocate from: */
- flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if (!arch_dma_alloc_attrs(&dev))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
-
- if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
- return;
- /*
- * On non-coherent platforms which implement DMA-coherent buffers via
- * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
- * this far in IRQ context is a) at risk of a BUG_ON() or trying to
- * sleep on some machines, and b) an indication that the driver is
- * probably misusing the coherent API anyway.
- */
- WARN_ON(irqs_disabled());
-
- if (!ops->free || !cpu_addr)
- return;
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -573,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
debug_dma_mapping_error(dev, dma_addr);
- if (ops->mapping_error)
- return ops->mapping_error(dev, dma_addr);
- return 0;
-}
-
-static inline void dma_check_mask(struct device *dev, u64 mask)
-{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
-}
-
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- dma_check_mask(dev, mask);
-
- *dev->dma_mask = mask;
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
return 0;
}
-#endif
+
+int dma_supported(struct device *dev, u64 mask);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
static inline u64 dma_get_mask(struct device *dev)
{
@@ -618,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev)
return DMA_BIT_MASK(32);
}
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask);
-#else
-static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (!dma_supported(dev, mask))
- return -EIO;
-
- dma_check_mask(dev, mask);
-
- dev->coherent_dma_mask = mask;
- return 0;
-}
-#endif
-
/*
* Set both the DMA mask and the coherent DMA mask to the same thing.
* Note that we don't check the return value from dma_set_coherent_mask()
@@ -676,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
@@ -709,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#endif
+/*
+ * Please always use dma_alloc_coherent instead as it already zeroes the memory!
+ */
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle,
- flag | __GFP_ZERO);
- return ret;
+ return dma_alloc_coherent(dev, size, dma_handle, flag);
}
static inline int dma_get_cache_alignment(void)
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 9051b055beec8..69b36ed31a99c 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#else
-#define arch_dma_cache_sync NULL
+static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+{
+}
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -69,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+void arch_dma_prep_coherent(struct page *page, size_t size);
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 093aa57120b0c..b96f0d0b5b8f3 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
*/
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SG_MAX_SEGMENTS 2048
-#else
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#else
+#define SG_MAX_SEGMENTS 2048
#endif
#ifdef CONFIG_SG_POOL
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index a387b59640a4b..7c007ed7505f9 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -16,8 +16,6 @@ enum swiotlb_force {
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
};
-extern enum swiotlb_force swiotlb_force;
-
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
@@ -46,9 +44,6 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
};
-/* define the last possible byte of physical address space as a mapping error */
-#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
-
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
@@ -65,56 +60,44 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
-/* Accessory functions. */
-
-extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
-extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_exit(void);
+extern enum swiotlb_force swiotlb_force;
+extern phys_addr_t io_tlb_start, io_tlb_end;
+
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return paddr >= io_tlb_start && paddr < io_tlb_end;
+}
+
+bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_exit(void) { }
-static inline unsigned int swiotlb_max_segment(void) { return 0; }
-#endif
+#define swiotlb_force SWIOTLB_NO_FORCE
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return false;
+}
+static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
+ dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return false;
+}
+static inline void swiotlb_exit(void)
+{
+}
+static inline unsigned int swiotlb_max_segment(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
extern void swiotlb_set_max_segment(unsigned int);
-extern const struct dma_map_ops swiotlb_dma_ops;
-
#endif /* __LINUX_SWIOTLB_H */
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 645c7a2ecde8b..ca88b867e7fea 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -35,13 +35,8 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
config ARCH_HAS_DMA_MMAP_PGPROT
bool
-config DMA_DIRECT_OPS
- bool
- depends on HAS_DMA
-
config DMA_NONCOHERENT_CACHE_SYNC
bool
- depends on DMA_DIRECT_OPS
config DMA_VIRT_OPS
bool
@@ -49,5 +44,12 @@ config DMA_VIRT_OPS
config SWIOTLB
bool
- select DMA_DIRECT_OPS
select NEED_DMA_MAP_STATE
+
+config DMA_REMAP
+ depends on MMU
+ bool
+
+config DMA_DIRECT_REMAP
+ bool
+ select DMA_REMAP
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 7d581e4eea4a2..72ff6e46aa866 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,10 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_HAS_DMA) += mapping.o
+obj-$(CONFIG_HAS_DMA) += mapping.o direct.o dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
-obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
-
+obj-$(CONFIG_DMA_REMAP) += remap.o
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 231ca4628062b..164706da2a73f 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -17,6 +17,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "DMA-API: " fmt
+
#include <linux/sched/task_stack.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
@@ -41,10 +43,9 @@
#define HASH_FN_SHIFT 13
#define HASH_FN_MASK (HASH_SIZE - 1)
-/* allow architectures to override this if absolutely required */
-#ifndef PREALLOC_DMA_DEBUG_ENTRIES
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-#endif
+/* If the pool runs out, add this many new entries at once */
+#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
enum {
dma_debug_single,
@@ -142,6 +143,7 @@ static struct dentry *show_all_errors_dent __read_mostly;
static struct dentry *show_num_errors_dent __read_mostly;
static struct dentry *num_free_entries_dent __read_mostly;
static struct dentry *min_free_entries_dent __read_mostly;
+static struct dentry *nr_total_entries_dent __read_mostly;
static struct dentry *filter_dent __read_mostly;
/* per-driver filter related state */
@@ -234,7 +236,7 @@ static bool driver_filter(struct device *dev)
error_count += 1; \
if (driver_filter(dev) && \
(show_all_errors || show_num_errors > 0)) { \
- WARN(1, "%s %s: " format, \
+ WARN(1, pr_fmt("%s %s: ") format, \
dev ? dev_driver_string(dev) : "NULL", \
dev ? dev_name(dev) : "NULL", ## arg); \
dump_entry_trace(entry); \
@@ -519,7 +521,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln)
* prematurely.
*/
WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
- "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+ pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
}
@@ -614,7 +616,7 @@ void debug_dma_assert_idle(struct page *page)
cln = to_cacheline_number(entry);
err_printk(entry->dev, entry,
- "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+ "cpu touching an active dma mapped cacheline [cln=%pa]\n",
&cln);
}
@@ -634,7 +636,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) {
- pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
+ pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
}
@@ -643,6 +645,24 @@ static void add_dma_entry(struct dma_debug_entry *entry)
*/
}
+static int dma_debug_create_entries(gfp_t gfp)
+{
+ struct dma_debug_entry *entry;
+ int i;
+
+ entry = (void *)get_zeroed_page(gfp);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
+ list_add_tail(&entry[i].list, &free_entries);
+
+ num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
+ nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
+
+ return 0;
+}
+
static struct dma_debug_entry *__dma_entry_alloc(void)
{
struct dma_debug_entry *entry;
@@ -658,6 +678,18 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}
+void __dma_entry_alloc_check_leak(void)
+{
+ u32 tmp = nr_total_entries % nr_prealloc_entries;
+
+ /* Shout each time we tick over some multiple of the initial pool */
+ if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
+ pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
+ nr_total_entries,
+ (nr_total_entries / nr_prealloc_entries));
+ }
+}
+
/* struct dma_entry allocator
*
* The next two functions implement the allocator for
@@ -669,12 +701,14 @@ static struct dma_debug_entry *dma_entry_alloc(void)
unsigned long flags;
spin_lock_irqsave(&free_entries_lock, flags);
-
- if (list_empty(&free_entries)) {
- global_disable = true;
- spin_unlock_irqrestore(&free_entries_lock, flags);
- pr_err("DMA-API: debugging out of memory - disabling\n");
- return NULL;
+ if (num_free_entries == 0) {
+ if (dma_debug_create_entries(GFP_ATOMIC)) {
+ global_disable = true;
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+ pr_err("debugging out of memory - disabling\n");
+ return NULL;
+ }
+ __dma_entry_alloc_check_leak();
}
entry = __dma_entry_alloc();
@@ -707,52 +741,6 @@ static void dma_entry_free(struct dma_debug_entry *entry)
spin_unlock_irqrestore(&free_entries_lock, flags);
}
-int dma_debug_resize_entries(u32 num_entries)
-{
- int i, delta, ret = 0;
- unsigned long flags;
- struct dma_debug_entry *entry;
- LIST_HEAD(tmp);
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- if (nr_total_entries < num_entries) {
- delta = num_entries - nr_total_entries;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- for (i = 0; i < delta; i++) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
-
- list_add_tail(&entry->list, &tmp);
- }
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- list_splice(&tmp, &free_entries);
- nr_total_entries += i;
- num_free_entries += i;
- } else {
- delta = nr_total_entries - num_entries;
-
- for (i = 0; i < delta && !list_empty(&free_entries); i++) {
- entry = __dma_entry_alloc();
- kfree(entry);
- }
-
- nr_total_entries -= i;
- }
-
- if (nr_total_entries != num_entries)
- ret = 1;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- return ret;
-}
-
/*
* DMA-API debugging init code
*
@@ -761,36 +749,6 @@ int dma_debug_resize_entries(u32 num_entries)
* 2. Preallocate a given number of dma_debug_entry structs
*/
-static int prealloc_memory(u32 num_entries)
-{
- struct dma_debug_entry *entry, *next_entry;
- int i;
-
- for (i = 0; i < num_entries; ++i) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto out_err;
-
- list_add_tail(&entry->list, &free_entries);
- }
-
- num_free_entries = num_entries;
- min_free_entries = num_entries;
-
- pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
-
- return 0;
-
-out_err:
-
- list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- return -ENOMEM;
-}
-
static ssize_t filter_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -850,7 +808,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
* switched off.
*/
if (current_driver_name[0])
- pr_info("DMA-API: switching off dma-debug driver filter\n");
+ pr_info("switching off dma-debug driver filter\n");
current_driver_name[0] = 0;
current_driver = NULL;
goto out_unlock;
@@ -868,7 +826,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
current_driver_name[i] = 0;
current_driver = NULL;
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
+ pr_info("enable driver filter for driver [%s]\n",
current_driver_name);
out_unlock:
@@ -887,7 +845,7 @@ static int dma_debug_fs_init(void)
{
dma_debug_dent = debugfs_create_dir("dma-api", NULL);
if (!dma_debug_dent) {
- pr_err("DMA-API: can not create debugfs directory\n");
+ pr_err("can not create debugfs directory\n");
return -ENOMEM;
}
@@ -926,6 +884,12 @@ static int dma_debug_fs_init(void)
if (!min_free_entries_dent)
goto out_err;
+ nr_total_entries_dent = debugfs_create_u32("nr_total_entries", 0444,
+ dma_debug_dent,
+ &nr_total_entries);
+ if (!nr_total_entries_dent)
+ goto out_err;
+
filter_dent = debugfs_create_file("driver_filter", 0644,
dma_debug_dent, NULL, &filter_fops);
if (!filter_dent)
@@ -973,7 +937,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
count = device_dma_allocations(dev, &entry);
if (count == 0)
break;
- err_printk(dev, entry, "DMA-API: device driver has pending "
+ err_printk(dev, entry, "device driver has pending "
"DMA allocations while released from device "
"[count=%d]\n"
"One of leaked entries details: "
@@ -1009,7 +973,7 @@ void dma_debug_add_bus(struct bus_type *bus)
static int dma_debug_init(void)
{
- int i;
+ int i, nr_pages;
/* Do not use dma_debug_initialized here, since we really want to be
* called to set dma_debug_initialized
@@ -1023,24 +987,31 @@ static int dma_debug_init(void)
}
if (dma_debug_fs_init() != 0) {
- pr_err("DMA-API: error creating debugfs entries - disabling\n");
+ pr_err("error creating debugfs entries - disabling\n");
global_disable = true;
return 0;
}
- if (prealloc_memory(nr_prealloc_entries) != 0) {
- pr_err("DMA-API: debugging out of memory error - disabled\n");
+ nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
+ for (i = 0; i < nr_pages; ++i)
+ dma_debug_create_entries(GFP_KERNEL);
+ if (num_free_entries >= nr_prealloc_entries) {
+ pr_info("preallocated %d debug entries\n", nr_total_entries);
+ } else if (num_free_entries > 0) {
+ pr_warn("%d debug entries requested but only %d allocated\n",
+ nr_prealloc_entries, nr_total_entries);
+ } else {
+ pr_err("debugging out of memory error - disabled\n");
global_disable = true;
return 0;
}
-
- nr_total_entries = num_free_entries;
+ min_free_entries = num_free_entries;
dma_debug_initialized = true;
- pr_info("DMA-API: debugging enabled by kernel config\n");
+ pr_info("debugging enabled by kernel config\n");
return 0;
}
core_initcall(dma_debug_init);
@@ -1051,7 +1022,7 @@ static __init int dma_debug_cmdline(char *str)
return -EINVAL;
if (strncmp(str, "off", 3) == 0) {
- pr_info("DMA-API: debugging disabled on kernel command line\n");
+ pr_info("debugging disabled on kernel command line\n");
global_disable = true;
}
@@ -1085,11 +1056,11 @@ static void check_unmap(struct dma_debug_entry *ref)
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free an "
+ "device driver tries to free an "
"invalid DMA memory address\n");
} else {
err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free DMA "
+ "device driver tries to free DMA "
"memory it has not allocated [device "
"address=0x%016llx] [size=%llu bytes]\n",
ref->dev_addr, ref->size);
@@ -1098,7 +1069,7 @@ static void check_unmap(struct dma_debug_entry *ref)
}
if (ref->size != entry->size) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
+ err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different size "
"[device address=0x%016llx] [map size=%llu bytes] "
"[unmap size=%llu bytes]\n",
@@ -1106,7 +1077,7 @@ static void check_unmap(struct dma_debug_entry *ref)
}
if (ref->type != entry->type) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
+ err_printk(ref->dev, entry, "device driver frees "
"DMA memory with wrong function "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped as %s] [unmapped as %s]\n",
@@ -1114,7 +1085,7 @@ static void check_unmap(struct dma_debug_entry *ref)
type2name[entry->type], type2name[ref->type]);
} else if ((entry->type == dma_debug_coherent) &&
(phys_addr(ref) != phys_addr(entry))) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
+ err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different CPU address "
"[device address=0x%016llx] [size=%llu bytes] "
"[cpu alloc address=0x%016llx] "
@@ -1126,7 +1097,7 @@ static void check_unmap(struct dma_debug_entry *ref)
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
+ err_printk(ref->dev, entry, "device driver frees "
"DMA sg list with different entry count "
"[map count=%d] [unmap count=%d]\n",
entry->sg_call_ents, ref->sg_call_ents);
@@ -1137,7 +1108,7 @@ static void check_unmap(struct dma_debug_entry *ref)
* DMA API don't handle this properly, so check for it here
*/
if (ref->direction != entry->direction) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
+ err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different direction "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [unmapped with %s]\n",
@@ -1153,7 +1124,7 @@ static void check_unmap(struct dma_debug_entry *ref)
*/
if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
err_printk(ref->dev, entry,
- "DMA-API: device driver failed to check map error"
+ "device driver failed to check map error"
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped as %s]",
ref->dev_addr, ref->size,
@@ -1178,7 +1149,7 @@ static void check_for_stack(struct device *dev,
return;
addr = page_address(page) + offset;
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
+ err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
} else {
/* Stack is vmalloced. */
int i;
@@ -1188,7 +1159,7 @@ static void check_for_stack(struct device *dev,
continue;
addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
+ err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
break;
}
}
@@ -1208,7 +1179,7 @@ static void check_for_illegal_area(struct device *dev, void *addr, unsigned long
{
if (overlap(addr, len, _stext, _etext) ||
overlap(addr, len, __start_rodata, __end_rodata))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+ err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
}
static void check_sync(struct device *dev,
@@ -1224,7 +1195,7 @@ static void check_sync(struct device *dev,
entry = bucket_find_contain(&bucket, ref, &flags);
if (!entry) {
- err_printk(dev, NULL, "DMA-API: device driver tries "
+ err_printk(dev, NULL, "device driver tries "
"to sync DMA memory it has not allocated "
"[device address=0x%016llx] [size=%llu bytes]\n",
(unsigned long long)ref->dev_addr, ref->size);
@@ -1232,7 +1203,7 @@ static void check_sync(struct device *dev,
}
if (ref->size > entry->size) {
- err_printk(dev, entry, "DMA-API: device driver syncs"
+ err_printk(dev, entry, "device driver syncs"
" DMA memory outside allocated range "
"[device address=0x%016llx] "
"[allocation size=%llu bytes] "
@@ -1245,7 +1216,7 @@ static void check_sync(struct device *dev,
goto out;
if (ref->direction != entry->direction) {
- err_printk(dev, entry, "DMA-API: device driver syncs "
+ err_printk(dev, entry, "device driver syncs "
"DMA memory with different direction "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
@@ -1256,7 +1227,7 @@ static void check_sync(struct device *dev,
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
!(ref->direction == DMA_TO_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
+ err_printk(dev, entry, "device driver syncs "
"device read-only DMA memory for cpu "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
@@ -1266,7 +1237,7 @@ static void check_sync(struct device *dev,
if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
!(ref->direction == DMA_FROM_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
+ err_printk(dev, entry, "device driver syncs "
"device write-only DMA memory to device "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
@@ -1276,7 +1247,7 @@ static void check_sync(struct device *dev,
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver syncs "
+ err_printk(ref->dev, entry, "device driver syncs "
"DMA sg list with different entry count "
"[map count=%d] [sync count=%d]\n",
entry->sg_call_ents, ref->sg_call_ents);
@@ -1297,7 +1268,7 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
* whoever generated the list forgot to check them.
*/
if (sg->length > max_seg)
- err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
+ err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
sg->length, max_seg);
/*
* In some cases this could potentially be the DMA API
@@ -1307,7 +1278,7 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
start = sg_dma_address(sg);
end = start + sg_dma_len(sg) - 1;
if ((start ^ end) & ~boundary)
- err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
+ err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
start, end, boundary);
#endif
}
@@ -1319,11 +1290,11 @@ void debug_dma_map_single(struct device *dev, const void *addr,
return;
if (!virt_addr_valid(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
+ err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
addr, len);
if (is_vmalloc_addr(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
+ err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
addr, len);
}
EXPORT_SYMBOL(debug_dma_map_single);
@@ -1662,48 +1633,6 @@ void debug_dma_sync_single_for_device(struct device *dev,
}
EXPORT_SYMBOL(debug_dma_sync_single_for_device);
-void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
-
-void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
-
void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
{
@@ -1780,7 +1709,7 @@ static int __init dma_debug_driver_setup(char *str)
}
if (current_driver_name[0])
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
+ pr_info("enable driver filter for driver [%s]\n",
current_driver_name);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 375c77e8d52fa..355d16acee6dd 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -13,6 +13,7 @@
#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
#include <linux/set_memory.h>
+#include <linux/swiotlb.h>
/*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but
@@ -30,27 +31,16 @@ static inline bool force_dma_unencrypted(void)
return sev_active();
}
-static bool
-check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
- const char *caller)
+static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
- if (!dev->dma_mask) {
- dev_err(dev,
- "%s: call on device without dma_mask\n",
- caller);
- return false;
- }
-
- if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
- dev_err(dev,
- "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
- caller, &dma_addr, size,
- *dev->dma_mask, dev->bus_dma_mask);
- }
- return false;
+ if (!dev->dma_mask) {
+ dev_err_once(dev, "DMA map on device without dma_mask\n");
+ } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
+ dev_err_once(dev,
+ "overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
}
- return true;
+ WARN_ON_ONCE(1);
}
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
@@ -103,14 +93,13 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
}
-void *dma_direct_alloc_pages(struct device *dev, size_t size,
+struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size);
struct page *page = NULL;
u64 phys_mask;
- void *ret;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
@@ -150,11 +139,34 @@ again:
}
}
+ return page;
+}
+
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ struct page *page;
+ void *ret;
+
+ page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
if (!page)
return NULL;
+
+ if (PageHighMem(page)) {
+ /*
+ * Depending on the cma= arguments and per-arch setup
+ * dma_alloc_from_contiguous could return highmem pages.
+ * Without remapping there is no way to return them here,
+ * so log an error and fail.
+ */
+ dev_info(dev, "Rejecting highmem page from CMA.\n");
+ __dma_direct_free_pages(dev, size, page);
+ return NULL;
+ }
+
ret = page_address(page);
if (force_dma_unencrypted()) {
- set_memory_decrypted((unsigned long)ret, 1 << page_order);
+ set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
} else {
*dma_handle = phys_to_dma(dev, page_to_phys(page));
@@ -163,20 +175,22 @@ again:
return ret;
}
-/*
- * NOTE: this function must never look at the dma_addr argument, because we want
- * to be able to use it as a helper for iommu implementations as well.
- */
+void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
+}
+
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned int page_order = get_order(size);
if (force_dma_unencrypted())
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
- if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
- free_pages((unsigned long)cpu_addr, page_order);
+ __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
}
void *dma_direct_alloc(struct device *dev, size_t size,
@@ -196,67 +210,111 @@ void dma_direct_free(struct device *dev, size_t size,
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
-static void dma_direct_sync_single_for_device(struct device *dev,
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- if (dev_is_dma_coherent(dev))
- return;
- arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(dev, paddr, size, dir);
}
+EXPORT_SYMBOL(dma_direct_sync_single_for_device);
-static void dma_direct_sync_sg_for_device(struct device *dev,
+void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
- return;
+ for_each_sg(sgl, sg, nents, i) {
+ if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+ dir, SYNC_FOR_DEVICE);
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ dir);
+ }
}
+EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
+#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static void dma_direct_sync_single_for_cpu(struct device *dev,
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- if (dev_is_dma_coherent(dev))
- return;
- arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
- arch_sync_dma_for_cpu_all(dev);
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (!dev_is_dma_coherent(dev)) {
+ arch_sync_dma_for_cpu(dev, paddr, size, dir);
+ arch_sync_dma_for_cpu_all(dev);
+ }
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
+EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
-static void dma_direct_sync_sg_for_cpu(struct device *dev,
+void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
- return;
+ for_each_sg(sgl, sg, nents, i) {
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+
+ if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
+ SYNC_FOR_CPU);
+ }
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
- arch_sync_dma_for_cpu_all(dev);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu_all(dev);
}
+EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
-static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+
+ if (unlikely(is_swiotlb_buffer(phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
+EXPORT_SYMBOL(dma_direct_unmap_page);
-static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
+ attrs);
}
+EXPORT_SYMBOL(dma_direct_unmap_sg);
#endif
+static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
+ size_t size)
+{
+ return swiotlb_force != SWIOTLB_FORCE &&
+ (!dev || dma_capable(dev, dma_addr, size));
+}
+
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -264,13 +322,17 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (!check_addr(dev, dma_addr, size, __func__))
- return DIRECT_MAPPING_ERROR;
+ if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
+ !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
+ report_addr(dev, dma_addr, size);
+ return DMA_MAPPING_ERROR;
+ }
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, phys, size, dir);
return dma_addr;
}
+EXPORT_SYMBOL(dma_direct_map_page);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
@@ -279,18 +341,20 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
- if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
- return 0;
+ sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
+ sg->offset, sg->length, dir, attrs);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ goto out_unmap;
sg_dma_len(sg) = sg->length;
}
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
return nents;
+
+out_unmap:
+ dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return 0;
}
+EXPORT_SYMBOL(dma_direct_map_sg);
/*
* Because 32-bit DMA masks are so common we expect every architecture to be
@@ -316,31 +380,3 @@ int dma_direct_supported(struct device *dev, u64 mask)
*/
return mask >= __phys_to_dma(dev, min_mask);
}
-
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == DIRECT_MAPPING_ERROR;
-}
-
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc,
- .free = dma_direct_free,
- .map_page = dma_direct_map_page,
- .map_sg = dma_direct_map_sg,
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
- .sync_single_for_device = dma_direct_sync_single_for_device,
- .sync_sg_for_device = dma_direct_sync_sg_for_device,
-#endif
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
- .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
- .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
- .unmap_page = dma_direct_unmap_page,
- .unmap_sg = dma_direct_unmap_sg,
-#endif
- .get_required_mask = dma_direct_get_required_mask,
- .dma_supported = dma_direct_supported,
- .mapping_error = dma_direct_mapping_error,
- .cache_sync = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_direct_ops);
diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c
new file mode 100644
index 0000000000000..05607642c888d
--- /dev/null
+++ b/kernel/dma/dummy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dummy DMA ops that always fail.
+ */
+#include <linux/dma-mapping.h>
+
+static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ return -ENXIO;
+}
+
+static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+
+static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return 0;
+}
+
+static int dma_dummy_supported(struct device *hwdev, u64 mask)
+{
+ return 0;
+}
+
+const struct dma_map_ops dma_dummy_ops = {
+ .mmap = dma_dummy_mmap,
+ .map_page = dma_dummy_map_page,
+ .map_sg = dma_dummy_map_sg,
+ .dma_supported = dma_dummy_supported,
+};
+EXPORT_SYMBOL(dma_dummy_ops);
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 58dec7a92b7b5..d7c34d2d1ba58 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -5,8 +5,9 @@
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
*/
-
+#include <linux/memblock.h> /* for max_pfn */
#include <linux/acpi.h>
+#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -223,7 +224,20 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
-EXPORT_SYMBOL(dma_common_get_sgtable);
+
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!dma_is_direct(ops) && ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+}
+EXPORT_SYMBOL(dma_get_sgtable_attrs);
/*
* Create userspace mapping for the DMA-coherent memory.
@@ -261,88 +275,179 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
}
-EXPORT_SYMBOL(dma_common_mmap);
-#ifdef CONFIG_MMU
-static struct vm_struct *__dma_common_pages_remap(struct page **pages,
- size_t size, unsigned long vm_flags, pgprot_t prot,
- const void *caller)
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @dma_addr: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
+ * space. The coherent DMA buffer must not be freed by the driver until the
+ * user space mapping has been released.
+ */
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
- struct vm_struct *area;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
- area = get_vm_area_caller(size, vm_flags, caller);
- if (!area)
- return NULL;
+ if (!dma_is_direct(ops) && ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+EXPORT_SYMBOL(dma_mmap_attrs);
- if (map_vm_area(area, prot, pages)) {
- vunmap(area->addr);
- return NULL;
+#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
+static u64 dma_default_get_required_mask(struct device *dev)
+{
+ u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
+ u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
+ u64 mask;
+
+ if (!high_totalram) {
+ /* convert to mask just covering totalram */
+ low_totalram = (1 << (fls(low_totalram) - 1));
+ low_totalram += low_totalram - 1;
+ mask = low_totalram;
+ } else {
+ high_totalram = (1 << (fls(high_totalram) - 1));
+ high_totalram += high_totalram - 1;
+ mask = (((u64)high_totalram) << 32) + 0xffffffff;
}
+ return mask;
+}
- return area;
+u64 dma_get_required_mask(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_is_direct(ops))
+ return dma_direct_get_required_mask(dev);
+ if (ops->get_required_mask)
+ return ops->get_required_mask(dev);
+ return dma_default_get_required_mask(dev);
}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+#endif
-/*
- * remaps an array of PAGE_SIZE pages into another vm_area
- * Cannot be used in non-sleeping contexts
- */
-void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller)
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev) (true)
+#endif
+
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs)
{
- struct vm_struct *area;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
- area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
- if (!area)
+ if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ /* let the implementation decide on the zone to allocate from: */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+ if (!arch_dma_alloc_attrs(&dev))
return NULL;
- area->pages = pages;
+ if (dma_is_direct(ops))
+ cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+ else if (ops->alloc)
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ else
+ return NULL;
- return area->addr;
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
}
+EXPORT_SYMBOL(dma_alloc_attrs);
-/*
- * remaps an allocated contiguous region into another vm_area.
- * Cannot be used in non-sleeping contexts
- */
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
- pgprot_t prot, const void *caller)
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- int i;
- struct page **pages;
- struct vm_struct *area;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
- pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
- if (!pages)
- return NULL;
+ if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
+ return;
+ /*
+ * On non-coherent platforms which implement DMA-coherent buffers via
+ * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
+ * this far in IRQ context is a) at risk of a BUG_ON() or trying to
+ * sleep on some machines, and b) an indication that the driver is
+ * probably misusing the coherent API anyway.
+ */
+ WARN_ON(irqs_disabled());
+
+ if (!cpu_addr)
+ return;
- for (i = 0; i < (size >> PAGE_SHIFT); i++)
- pages[i] = nth_page(page, i);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ if (dma_is_direct(ops))
+ dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+ else if (ops->free)
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+EXPORT_SYMBOL(dma_free_attrs);
- area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+static inline void dma_check_mask(struct device *dev, u64 mask)
+{
+ if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
+ dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
+}
- kfree(pages);
+int dma_supported(struct device *dev, u64 mask)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
- if (!area)
- return NULL;
- return area->addr;
+ if (dma_is_direct(ops))
+ return dma_direct_supported(dev, mask);
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
}
+EXPORT_SYMBOL(dma_supported);
-/*
- * unmaps a range previously mapped by dma_common_*_remap
- */
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+#ifndef HAVE_ARCH_DMA_SET_MASK
+int dma_set_mask(struct device *dev, u64 mask)
{
- struct vm_struct *area = find_vm_area(cpu_addr);
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
- if (!area || (area->flags & vm_flags) != vm_flags) {
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
- return;
- }
+ dma_check_mask(dev, mask);
+ *dev->dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+#endif
+
+#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (!dma_supported(dev, mask))
+ return -EIO;
- unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
- vunmap(cpu_addr);
+ dma_check_mask(dev, mask);
+ dev->coherent_dma_mask = mask;
+ return 0;
}
+EXPORT_SYMBOL(dma_set_coherent_mask);
#endif
+
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (dma_is_direct(ops))
+ arch_dma_cache_sync(dev, vaddr, size, dir);
+ else if (ops->cache_sync)
+ ops->cache_sync(dev, vaddr, size, dir);
+}
+EXPORT_SYMBOL(dma_cache_sync);
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
new file mode 100644
index 0000000000000..18cc09fc27b92
--- /dev/null
+++ b/kernel/dma/remap.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (c) 2014 The Linux Foundation
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/init.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+static struct vm_struct *__dma_common_pages_remap(struct page **pages,
+ size_t size, unsigned long vm_flags, pgprot_t prot,
+ const void *caller)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area_caller(size, vm_flags, caller);
+ if (!area)
+ return NULL;
+
+ if (map_vm_area(area, prot, pages)) {
+ vunmap(area->addr);
+ return NULL;
+ }
+
+ return area;
+}
+
+/*
+ * Remaps an array of PAGE_SIZE pages into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller)
+{
+ struct vm_struct *area;
+
+ area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+ if (!area)
+ return NULL;
+
+ area->pages = pages;
+
+ return area->addr;
+}
+
+/*
+ * Remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller)
+{
+ int i;
+ struct page **pages;
+ struct vm_struct *area;
+
+ pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ pages[i] = nth_page(page, i);
+
+ area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+ kfree(pages);
+
+ if (!area)
+ return NULL;
+ return area->addr;
+}
+
+/*
+ * Unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || (area->flags & vm_flags) != vm_flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
+ vunmap(cpu_addr);
+}
+
+#ifdef CONFIG_DMA_DIRECT_REMAP
+static struct gen_pool *atomic_pool __ro_after_init;
+
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
+
+static int __init early_coherent_pool(char *p)
+{
+ atomic_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
+{
+ unsigned int pool_size_order = get_order(atomic_pool_size);
+ unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+ struct page *page;
+ void *addr;
+ int ret;
+
+ if (dev_get_cma_area(NULL))
+ page = dma_alloc_from_contiguous(NULL, nr_pages,
+ pool_size_order, false);
+ else
+ page = alloc_pages(gfp, pool_size_order);
+ if (!page)
+ goto out;
+
+ arch_dma_prep_coherent(page, atomic_pool_size);
+
+ atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!atomic_pool)
+ goto free_page;
+
+ addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
+ prot, __builtin_return_address(0));
+ if (!addr)
+ goto destroy_genpool;
+
+ ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
+ page_to_phys(page), atomic_pool_size, -1);
+ if (ret)
+ goto remove_mapping;
+ gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
+
+ pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
+ atomic_pool_size / 1024);
+ return 0;
+
+remove_mapping:
+ dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+destroy_genpool:
+ gen_pool_destroy(atomic_pool);
+ atomic_pool = NULL;
+free_page:
+ if (!dma_release_from_contiguous(NULL, page, nr_pages))
+ __free_pages(page, pool_size_order);
+out:
+ pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
+ atomic_pool_size / 1024);
+ return -ENOMEM;
+}
+
+bool dma_in_atomic_pool(void *start, size_t size)
+{
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+}
+
+void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
+{
+ unsigned long val;
+ void *ptr = NULL;
+
+ if (!atomic_pool) {
+ WARN(1, "coherent pool not initialised!\n");
+ return NULL;
+ }
+
+ val = gen_pool_alloc(atomic_pool, size);
+ if (val) {
+ phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+ *ret_page = pfn_to_page(__phys_to_pfn(phys));
+ ptr = (void *)val;
+ memset(ptr, 0, size);
+ }
+
+ return ptr;
+}
+
+bool dma_free_from_pool(void *start, size_t size)
+{
+ if (!dma_in_atomic_pool(start, size))
+ return false;
+ gen_pool_free(atomic_pool, (unsigned long)start, size);
+ return true;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs)
+{
+ struct page *page = NULL;
+ void *ret;
+
+ size = PAGE_ALIGN(size);
+
+ if (!gfpflags_allow_blocking(flags) &&
+ !(attrs & DMA_ATTR_NO_KERNEL_MAPPING)) {
+ ret = dma_alloc_from_pool(size, &page, flags);
+ if (!ret)
+ return NULL;
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return ret;
+ }
+
+ page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
+ if (!page)
+ return NULL;
+
+ /* remove any dirty cache lines on the kernel alias */
+ arch_dma_prep_coherent(page, size);
+
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return page; /* opaque cookie */
+
+ /* create a coherent mapping */
+ ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
+ __builtin_return_address(0));
+ if (!ret) {
+ __dma_direct_free_pages(dev, size, page);
+ return ret;
+ }
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ /* vaddr is a struct page cookie, not a kernel address */
+ __dma_direct_free_pages(dev, size, vaddr);
+ } else if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
+ phys_addr_t phys = dma_to_phys(dev, dma_handle);
+ struct page *page = pfn_to_page(__phys_to_pfn(phys));
+
+ vunmap(vaddr);
+ __dma_direct_free_pages(dev, size, page);
+ }
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return __phys_to_pfn(dma_to_phys(dev, dma_addr));
+}
+#endif /* CONFIG_DMA_DIRECT_REMAP */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 045930e32c0e9..d6361776dc5ce 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -21,7 +21,6 @@
#include <linux/cache.h>
#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -65,7 +64,7 @@ enum swiotlb_force swiotlb_force;
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
-static phys_addr_t io_tlb_start, io_tlb_end;
+phys_addr_t io_tlb_start, io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
@@ -383,11 +382,6 @@ void __init swiotlb_exit(void)
max_segment = 0;
}
-static int is_swiotlb_buffer(phys_addr_t paddr)
-{
- return paddr >= io_tlb_start && paddr < io_tlb_end;
-}
-
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
@@ -526,7 +520,7 @@ not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
- return SWIOTLB_MAP_ERROR;
+ return DMA_MAPPING_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
@@ -623,237 +617,36 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
}
}
-static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
+/*
+ * Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
+ * to the device copy the data into it as well.
+ */
+bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t dma_addr;
+ trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
dev_warn_ratelimited(dev,
"Cannot do DMA to address %pa\n", phys);
- return DIRECT_MAPPING_ERROR;
+ return false;
}
/* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs);
- if (*phys == SWIOTLB_MAP_ERROR)
- return DIRECT_MAPPING_ERROR;
+ if (*phys == DMA_MAPPING_ERROR)
+ return false;
/* Ensure that the address returned is DMA'ble */
- dma_addr = __phys_to_dma(dev, *phys);
- if (unlikely(!dma_capable(dev, dma_addr, size))) {
+ *dma_addr = __phys_to_dma(dev, *phys);
+ if (unlikely(!dma_capable(dev, *dma_addr, size))) {
swiotlb_tbl_unmap_single(dev, *phys, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return DIRECT_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode. The
- * physical address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = phys_to_dma(dev, phys);
-
- BUG_ON(dir == DMA_NONE);
- /*
- * If the address happens to be in the device's DMA window,
- * we can safely return the device addr and not worry about bounce
- * buffering it.
- */
- if (!dma_capable(dev, dev_addr, size) ||
- swiotlb_force == SWIOTLB_FORCE) {
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
- }
-
- if (!dev_is_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
- dev_addr != DIRECT_MAPPING_ERROR)
- arch_sync_dma_for_device(dev, phys, size, dir);
-
- return dev_addr;
-}
-
-/*
- * Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_page call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (!dev_is_dma_coherent(hwdev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
-
- if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
- return;
+ return false;
}
- if (dir != DMA_FROM_DEVICE)
- return;
-
- /*
- * phys_to_virt doesn't work with hihgmem page but we could
- * call dma_mark_clean() with hihgmem page here. However, we
- * are fine since dma_mark_clean() is null on POWERPC. We can
- * make dma_mark_clean() take a physical address if necessary.
- */
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so. At the next point you give the dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-static void
-swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
- arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
-
- if (is_swiotlb_buffer(paddr))
- swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
-
- if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
- arch_sync_dma_for_device(hwdev, paddr, size, dir);
-
- if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_page
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * Device ownership issues as mentioned above for swiotlb_map_page are the
- * same here.
- */
-int
-swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = swiotlb_map_page(dev, sg_page(sg), sg->offset,
- sg->length, dir, attrs);
- if (sg->dma_address == DIRECT_MAPPING_ERROR)
- goto out_error;
- sg_dma_len(sg) = sg->length;
- }
-
- return nelems;
-
-out_error:
- swiotlb_unmap_sg_attrs(dev, sgl, i, dir,
- attrs | DMA_ATTR_SKIP_CPU_SYNC);
- sg_dma_len(sgl) = 0;
- return 0;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i)
- swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), dir,
- attrs);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-static void
-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i)
- swiotlb_sync_single(hwdev, sg->dma_address,
- sg_dma_len(sg), dir, target);
-}
-
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+ return true;
}
/*
@@ -867,19 +660,3 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
-
-const struct dma_map_ops swiotlb_dma_ops = {
- .mapping_error = dma_direct_mapping_error,
- .alloc = dma_direct_alloc,
- .free = dma_direct_free,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .dma_supported = dma_direct_supported,
-};
-EXPORT_SYMBOL(swiotlb_dma_ops);
diff --git a/kernel/dma/virt.c b/kernel/dma/virt.c
index 631ddec4b60a8..ebe128833af7b 100644
--- a/kernel/dma/virt.c
+++ b/kernel/dma/virt.c
@@ -13,7 +13,7 @@ static void *dma_virt_alloc(struct device *dev, size_t size,
{
void *ret;
- ret = (void *)__get_free_pages(gfp, get_order(size));
+ ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
if (ret)
*dma_handle = (uintptr_t)ret;
return ret;
diff --git a/lib/Kconfig b/lib/Kconfig
index 7dbbcfe9cd903..79bc2eef9c14c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -577,7 +577,7 @@ config SG_POOL
# sg chaining option
#
-config ARCH_HAS_SG_CHAIN
+config ARCH_NO_SG_CHAIN
def_bool n
config ARCH_HAS_PMEM_API
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c6096a717048..9ba349e775ef0 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -271,7 +271,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (nents == 0)
return -EINVAL;
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif