summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLucas Stach <dev@lynxeye.de>2015-03-05 22:49:55 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2015-03-06 07:51:47 +0100
commite05f9586b302d0dba5e9c98d849596cad1716a6f (patch)
treee4f8aa0b3e0dd5c3899d09e6d46746a37baf4635 /arch
parent6c583d0e327deecaea026cf47576fbe42274bd8c (diff)
downloadbarebox-e05f9586b302d0dba5e9c98d849596cad1716a6f.tar.gz
barebox-e05f9586b302d0dba5e9c98d849596cad1716a6f.tar.xz
ARM: change dma_alloc/free_coherent to match other architectures
As a lot drivers currently rely on the 1:1 virt->phys mapping on ARM we define DMA_ADDRESS_BROKEN to mark them. In order to use them on other architectures with a different mapping they need proper fixing. Signed-off-by: Lucas Stach <dev@lynxeye.de> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/cpu/mmu.c6
-rw-r--r--arch/arm/include/asm/mmu.h17
2 files changed, 16 insertions, 7 deletions
diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index aaf66d492a..c480e07ba7 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -379,12 +379,14 @@ static int mmu_init(void)
}
mmu_initcall(mmu_init);
-void *dma_alloc_coherent(size_t size)
+void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
{
void *ret;
size = PAGE_ALIGN(size);
ret = xmemalign(PAGE_SIZE, size);
+ if (dma_handle)
+ *dma_handle = (dma_addr_t)ret;
dma_inv_range((unsigned long)ret, (unsigned long)ret + size);
@@ -403,7 +405,7 @@ void *phys_to_virt(unsigned long phys)
return (void *)phys;
}
-void dma_free_coherent(void *mem, size_t size)
+void dma_free_coherent(void *mem, dma_addr_t dma_handle, size_t size)
{
size = PAGE_ALIGN(size);
remap_range(mem, size, pte_flags_cached);
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index c6e425f02d..f01c00389d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -11,6 +11,8 @@
#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT)
#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
+#define DMA_ADDRESS_BROKEN NULL
+
struct arm_memory;
static inline void mmu_enable(void)
@@ -33,8 +35,8 @@ static inline void *dma_alloc(size_t size)
}
#ifdef CONFIG_MMU
-void *dma_alloc_coherent(size_t size);
-void dma_free_coherent(void *mem, size_t size);
+void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle);
+void dma_free_coherent(void *mem, dma_addr_t dma_handle, size_t size);
void dma_clean_range(unsigned long, unsigned long);
void dma_flush_range(unsigned long, unsigned long);
@@ -47,12 +49,17 @@ uint32_t mmu_get_pte_cached_flags(void);
uint32_t mmu_get_pte_uncached_flags(void);
#else
-static inline void *dma_alloc_coherent(size_t size)
+static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
{
- return xmemalign(4096, size);
+ void *ret = xmemalign(4096, size);
+ if (dma_handle)
+ *dma_handle = (dma_addr_t)ret;
+
+ return ret;
}
-static inline void dma_free_coherent(void *mem, size_t size)
+static inline void dma_free_coherent(void *mem, dma_addr_t dma_handle,
+ size_t size)
{
free(mem);
}