summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAhmad Fatoum <ahmad@a3f.at>2021-04-16 08:24:33 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2021-05-03 14:06:30 +0200
commitce65ca49fda3a849f80ddb7d8cafcf90ed765e2c (patch)
treee569d8a0d856fab1a465a56748e00c27bfabb000
parent02b7eaf6ab6b7c5c5f83b8742f3e43991e4cff1d (diff)
downloadbarebox-ce65ca49fda3a849f80ddb7d8cafcf90ed765e2c.tar.gz
barebox-ce65ca49fda3a849f80ddb7d8cafcf90ed765e2c.tar.xz
x86: add DMA support
Both interconnect and PCI are cache coherent on x86, so we shouldn't need any special CPU barriers for DMA. Indeed, Linux defined neither ARCH_HAS_SYNC_DMA_FOR_CPU nor ARCH_HAS_SYNC_DMA_FOR_DEVICE on x86. It thus seems that the only reordering we need to take care of is compiler-induced reordering. The Linux memory model that barebox adheres to as well demands that all accesses to shared data are volatile. volatile accesses are already guarnateed to not be reordered against each other, so we don't even need an explicit barrier(), which is already the case on other architectures that have a disabled MMU. Cc: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Ahmad Fatoum <ahmad@a3f.at> Link: https://lore.barebox.org/20210416062436.332665-2-ahmad@a3f.at Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/dma.h36
2 files changed, 36 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 311c3d1a8e..bcb44b23f0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -4,6 +4,7 @@
config X86
bool
select HAS_KALLSYMS
+ select HAS_DMA
select GENERIC_FIND_NEXT_BIT
default y
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index 3dab2b688d..8a3b044f3a 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -4,6 +4,40 @@
#ifndef __ASM_DMA_H
#define __ASM_DMA_H
-/* empty */
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <xfuncs.h>
+#include <malloc.h>
+
+/*
+ * x86 is cache coherent, so we need not do anything special here
+ */
+
+static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
+{
+ void *ret = xmemalign(4096, size);
+ if (dma_handle)
+ *dma_handle = (dma_addr_t)ret;
+
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+static inline void dma_free_coherent(void *mem, dma_addr_t dma_handle,
+ size_t size)
+{
+ free(mem);
+}
+
+static inline void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void dma_sync_single_for_device(dma_addr_t address, size_t size,
+ enum dma_data_direction dir)
+{
+}
#endif /* __ASM_DMA_H */