summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2009-02-20 17:44:46 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2009-08-19 10:51:30 +0200
commite2c8e8a1809aa14b60af88901ed3df85348e2987 (patch)
tree2f16dc4043d0b3b8220370346662266a96f9e2aa /arch/arm/cpu
parent49ff3691b435c04d938ae6b75eaf18dac93817cc (diff)
downloadbarebox-e2c8e8a1809aa14b60af88901ed3df85348e2987.tar.gz
barebox-e2c8e8a1809aa14b60af88901ed3df85348e2987.tar.xz
Add MMU support
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/Makefile3
-rw-r--r--arch/arm/cpu/cache.S70
-rw-r--r--arch/arm/cpu/cpu.c5
-rw-r--r--arch/arm/cpu/mmu.c135
4 files changed, 213 insertions, 0 deletions
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index 69833e948e..3b6a6d7cc6 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -10,3 +10,6 @@ obj-$(CONFIG_ARMCORTEXA8) += start-arm.o
obj-$(CONFIG_ARCH_IMX31) += start-arm.o
obj-$(CONFIG_ARCH_IMX35) += start-arm.o
obj-$(CONFIG_CMD_ARM_CPUINFO) += cpuinfo.o
+obj-$(CONFIG_MMU) += mmu.o
+obj-$(CONFIG_MMU) += cache.o
+
diff --git a/arch/arm/cpu/cache.S b/arch/arm/cpu/cache.S
new file mode 100644
index 0000000000..f91500aec1
--- /dev/null
+++ b/arch/arm/cpu/cache.S
@@ -0,0 +1,70 @@
+#ifndef ENTRY
+#define ENTRY(name) \
+ .globl name; \
+ name:
+#endif
+
+#define CACHE_DLINESIZE 32
+
+/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * (same as v4wb)
+ */
+ENTRY(dma_inv_range)
+ tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * (same as v4wb)
+ */
+ENTRY(dma_clean_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(dma_flush_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
diff --git a/arch/arm/cpu/cpu.c b/arch/arm/cpu/cpu.c
index 1995557954..265a2b0b03 100644
--- a/arch/arm/cpu/cpu.c
+++ b/arch/arm/cpu/cpu.c
@@ -27,6 +27,7 @@
#include <common.h>
#include <command.h>
+#include <asm/mmu.h>
/**
* Read special processor register
@@ -143,6 +144,10 @@ int cleanup_before_linux (void)
shutdown_uboot();
+#ifdef CONFIG_MMU
+ mmu_disable();
+#endif
+
/* flush I/D-cache */
i = 0;
asm ("mcr p15, 0, %0, c7, c7, 0": :"r" (i));
diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
new file mode 100644
index 0000000000..298b897ab2
--- /dev/null
+++ b/arch/arm/cpu/mmu.c
@@ -0,0 +1,135 @@
+#include <common.h>
+#include <init.h>
+#include <asm/mmu.h>
+
+static unsigned long *ttb;
+
+void arm_create_section(unsigned long virt, unsigned long phys, int size_m,
+ unsigned int flags)
+{
+ int i;
+
+ phys >>= 20;
+ virt >>= 20;
+
+ for (i = size_m; i > 0; i--, virt++, phys++)
+ ttb[virt] = (phys << 20) | flags;
+
+ asm volatile (
+ "mov r0, #0;"
+ "mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */
+ "mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */
+ :
+ :
+ : "r0","memory" /* clobber list */
+ );
+}
+
+/*
+ * Prepare MMU for usage and create a flat mapping. Board
+ * code is responsible to remap the SDRAM cached
+ */
+void mmu_init(void)
+{
+ int i;
+
+ ttb = xzalloc(0x8000);
+ ttb = (void *)(((unsigned long)ttb + 0x4000) & ~0x3fff);
+
+ /* Set the ttb register */
+ asm volatile ("mcr p15,0,%0,c2,c0,0" : : "r"(ttb) /*:*/);
+
+ /* Set the Domain Access Control Register */
+ i = 0x3;
+ asm volatile ("mcr p15,0,%0,c3,c0,0" : : "r"(i) /*:*/);
+
+ /* create a flat mapping */
+ arm_create_section(0, 0, 4096, PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT);
+}
+
+/*
+ * enable the MMU. Should be called after mmu_init()
+ */
+void mmu_enable(void)
+{
+ asm volatile (
+ "mrc p15, 0, r1, c1, c0, 0;"
+ "orr r1, r1, #0x0007;" /* enable MMU + Dcache */
+ "mcr p15, 0, r1, c1, c0, 0"
+ :
+ :
+ : "r1" /* Clobber list */
+ );
+}
+
+/*
+ * Clean and invalide caches, disable MMU
+ */
+void mmu_disable(void)
+{
+ asm volatile (
+ "nop; "
+ "nop; "
+ "nop; "
+ "nop; "
+ "nop; "
+ "nop; "
+ /* test, clean and invalidate cache */
+ "1: mrc p15, 0, r15, c7, c14, 3;"
+ " bne 1b;"
+ " mov pc, lr;"
+ " mov r0, #0x0;"
+ " mcr p15, 0, r0, c7, c10, 4;" /* drain the write buffer */
+ " mcr p15, 0, r1, c7, c6, 0;" /* clear data cache */
+ " mrc p15, 0, r1, c1, c0, 0;"
+ " bic r1, r1, #0x0007;" /* disable MMU + DCache */
+ " mcr p15, 0, r1, c1, c0, 0;"
+ " mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */
+ " mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */
+ :
+ :
+ : "r0" /* Clobber list */
+ );
+}
+
+/*
+ * For boards which need coherent memory for DMA. The idea
+ * is simple: Setup a uncached section containing your SDRAM
+ * and call setup_dma_coherent() with the offset between the
+ * cached and the uncached section. dma_alloc_coherent() then
+ * works using normal malloc but returns the corresponding
+ * pointer in the uncached area.
+ */
+static unsigned long dma_coherent_offset;
+
+void setup_dma_coherent(unsigned long offset)
+{
+ dma_coherent_offset = offset;
+}
+
+void *dma_alloc_coherent(size_t size)
+{
+ void *mem;
+
+ mem = malloc(size);
+ if (mem)
+ return mem + dma_coherent_offset;
+
+ return NULL;
+}
+
+unsigned long dma_to_phys(void *virt)
+{
+ return (unsigned long)virt - dma_coherent_offset;
+}
+
+void *phys_to_dma(unsigned long phys)
+{
+ return (void *)(phys + dma_coherent_offset);
+}
+
+void dma_free_coherent(void *mem)
+{
+ free(mem - dma_coherent_offset);
+}
+