summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2013-04-04 14:20:42 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2013-04-04 14:20:42 +0200
commitdd9f6d08a2736bf8ddc0004779c563a4c3a77cef (patch)
tree9e829ce3e8a29e76b6e8f1459b2636768d170a71 /arch/arm/cpu
parent2143dfda8b32b8d1cda16982efeca57ca97b3996 (diff)
parenta81ec0225f5a100341c20b4329c8b1d81ab025c4 (diff)
downloadbarebox-dd9f6d08a2736bf8ddc0004779c563a4c3a77cef.tar.gz
barebox-dd9f6d08a2736bf8ddc0004779c563a4c3a77cef.tar.xz
Merge branch 'for-next/relocate'
Conflicts: arch/arm/lib/barebox.lds.S
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/Makefile3
-rw-r--r--arch/arm/cpu/cache.c33
-rw-r--r--arch/arm/cpu/common.c66
-rw-r--r--arch/arm/cpu/cpu.c34
-rw-r--r--arch/arm/cpu/exceptions.S55
-rw-r--r--arch/arm/cpu/mmu.c2
-rw-r--r--arch/arm/cpu/setupc.S62
-rw-r--r--arch/arm/cpu/start-pbl.c18
-rw-r--r--arch/arm/cpu/start.c5
9 files changed, 235 insertions, 43 deletions
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index 525a1806fe..c442b35797 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -22,3 +22,6 @@ pbl-$(CONFIG_CPU_32v7) += cache-armv7.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
pbl-y += start-pbl.o setupc.o
+
+obj-y += common.o
+pbl-y += common.o
diff --git a/arch/arm/cpu/cache.c b/arch/arm/cpu/cache.c
index 1254609bb6..95c8338250 100644
--- a/arch/arm/cpu/cache.c
+++ b/arch/arm/cpu/cache.c
@@ -101,3 +101,36 @@ int arm_set_cache_functions(void)
return 0;
}
+
+/*
+ * Early function to flush the caches. This is for use when the
+ * C environment is not yet fully initialized.
+ */
+void arm_early_mmu_cache_flush(void)
+{
+ switch (arm_early_get_cpu_architecture()) {
+#ifdef CONFIG_CPU_32v4T
+ case CPU_ARCH_ARMv4T:
+ v4_mmu_cache_flush();
+ return;
+#endif
+#ifdef CONFIG_CPU_32v5
+ case CPU_ARCH_ARMv5:
+ case CPU_ARCH_ARMv5T:
+ case CPU_ARCH_ARMv5TE:
+ case CPU_ARCH_ARMv5TEJ:
+ v5_mmu_cache_flush();
+ return;
+#endif
+#ifdef CONFIG_CPU_32v6
+ case CPU_ARCH_ARMv6:
+ v6_mmu_cache_flush();
+ return;
+#endif
+#ifdef CONFIG_CPU_32v7
+ case CPU_ARCH_ARMv7:
+ v7_mmu_cache_flush();
+ return;
+#endif
+ }
+}
diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
new file mode 100644
index 0000000000..40f784f02a
--- /dev/null
+++ b/arch/arm/cpu/common.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <common.h>
+#include <init.h>
+#include <sizes.h>
+#include <asm/barebox-arm.h>
+#include <asm/barebox-arm-head.h>
+#include <asm-generic/memory_layout.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+
+/*
+ * relocate binary to the currently running address
+ */
+void relocate_to_current_adr(void)
+{
+ uint32_t offset;
+ uint32_t *dstart, *dend, *dynsym, *dynend;
+
+ /* Get offset between linked address and runtime address */
+ offset = get_runtime_offset();
+
+ dstart = (void *)(ld_var(__rel_dyn_start) - offset);
+ dend = (void *)(ld_var(__rel_dyn_end) - offset);
+
+ dynsym = (void *)(ld_var(__dynsym_start) - offset);
+ dynend = (void *)(ld_var(__dynsym_end) - offset);
+
+ while (dstart < dend) {
+ uint32_t *fixup = (uint32_t *)(*dstart - offset);
+ uint32_t type = *(dstart + 1);
+
+ if ((type & 0xff) == 0x17) {
+ *fixup = *fixup - offset;
+ } else {
+ int index = type >> 8;
+ uint32_t r = dynsym[index * 4 + 1];
+
+ *fixup = *fixup + r - offset;
+ }
+
+ *dstart -= offset;
+ dstart += 2;
+ }
+
+ memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
+
+ arm_early_mmu_cache_flush();
+ flush_icache();
+}
diff --git a/arch/arm/cpu/cpu.c b/arch/arm/cpu/cpu.c
index 5f697d7916..f2e698d430 100644
--- a/arch/arm/cpu/cpu.c
+++ b/arch/arm/cpu/cpu.c
@@ -148,45 +148,13 @@ postcore_initcall(execute_init);
#endif
#ifdef ARM_MULTIARCH
-static int __get_cpu_architecture(void)
-{
- int cpu_arch;
-
- if ((read_cpuid_id() & 0x0008f000) == 0) {
- cpu_arch = CPU_ARCH_UNKNOWN;
- } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
- cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
- } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
- cpu_arch = (read_cpuid_id() >> 16) & 7;
- if (cpu_arch)
- cpu_arch += CPU_ARCH_ARMv3;
- } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
- unsigned int mmfr0;
-
- /* Revised CPUID format. Read the Memory Model Feature
- * Register 0 and check for VMSAv7 or PMSAv7 */
- asm("mrc p15, 0, %0, c0, c1, 4"
- : "=r" (mmfr0));
- if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
- (mmfr0 & 0x000000f0) >= 0x00000030)
- cpu_arch = CPU_ARCH_ARMv7;
- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
- (mmfr0 & 0x000000f0) == 0x00000020)
- cpu_arch = CPU_ARCH_ARMv6;
- else
- cpu_arch = CPU_ARCH_UNKNOWN;
- } else
- cpu_arch = CPU_ARCH_UNKNOWN;
-
- return cpu_arch;
-}
int __cpu_architecture;
int __pure cpu_architecture(void)
{
if(__cpu_architecture == CPU_ARCH_UNKNOWN)
- __cpu_architecture = __get_cpu_architecture();
+ __cpu_architecture = arm_early_get_cpu_architecture();
return __cpu_architecture;
}
diff --git a/arch/arm/cpu/exceptions.S b/arch/arm/cpu/exceptions.S
index 4185bd1d30..167c8d1fe2 100644
--- a/arch/arm/cpu/exceptions.S
+++ b/arch/arm/cpu/exceptions.S
@@ -1,4 +1,5 @@
#include <config.h>
+#include <linux/linkage.h>
#include <asm-generic/memory_layout.h>
/*
@@ -137,16 +138,58 @@ fiq:
bad_save_user_regs
bl do_fiq
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_ARM_EXCEPTIONS)
+/*
+ * With relocatable binary support the runtime exception vectors do not match
+ * the addresses in the binary. We have to fix them up during runtime
+ */
+ENTRY(arm_fixup_vectors)
+ ldr r0, =undefined_instruction
+ ldr r1, =_undefined_instruction
+ str r0, [r1]
+ ldr r0, =software_interrupt
+ ldr r1, =_software_interrupt
+ str r0, [r1]
+ ldr r0, =prefetch_abort
+ ldr r1, =_prefetch_abort
+ str r0, [r1]
+ ldr r0, =data_abort
+ ldr r1, =_data_abort
+ str r0, [r1]
+ ldr r0, =irq
+ ldr r1, =_irq
+ str r0, [r1]
+ ldr r0, =fiq
+ ldr r1, =_fiq
+ str r0, [r1]
+ bx lr
+ENDPROC(arm_fixup_vectors)
+#endif
+
.section .text_exceptions
+.globl extable
+extable:
1: b 1b /* barebox_arm_reset_vector */
#ifdef CONFIG_ARM_EXCEPTIONS
- ldr pc, =undefined_instruction /* undefined instruction */
- ldr pc, =software_interrupt /* software interrupt (SWI) */
- ldr pc, =prefetch_abort /* prefetch abort */
- ldr pc, =data_abort /* data abort */
+ ldr pc, _undefined_instruction /* undefined instruction */
+ ldr pc, _software_interrupt /* software interrupt (SWI) */
+ ldr pc, _prefetch_abort /* prefetch abort */
+ ldr pc, _data_abort /* data abort */
1: b 1b /* (reserved) */
- ldr pc, =irq /* irq (interrupt) */
- ldr pc, =fiq /* fiq (fast interrupt) */
+ ldr pc, _irq /* irq (interrupt) */
+ ldr pc, _fiq /* fiq (fast interrupt) */
+.globl _undefined_instruction
+_undefined_instruction: .word undefined_instruction
+.globl _software_interrupt
+_software_interrupt: .word software_interrupt
+.globl _prefetch_abort
+_prefetch_abort: .word prefetch_abort
+.globl _data_abort
+_data_abort: .word data_abort
+.globl _irq
+_irq: .word irq
+.globl _fiq
+_fiq: .word fiq
#else
1: b 1b /* undefined instruction */
1: b 1b /* software interrupt (SWI) */
diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index 219f50a478..34fe5c3515 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -247,6 +247,8 @@ static void vectors_init(void)
exc = arm_create_pte(0x0);
}
+ arm_fixup_vectors();
+
vectors = xmemalign(PAGE_SIZE, PAGE_SIZE);
memset(vectors, 0, PAGE_SIZE);
memcpy(vectors, __exceptions_start, __exceptions_stop - __exceptions_start);
diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc.S
index 9a8d54c224..7fd5d012f0 100644
--- a/arch/arm/cpu/setupc.S
+++ b/arch/arm/cpu/setupc.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/sections.h>
.section .text.setupc
@@ -26,9 +27,70 @@ ENTRY(setup_c)
ldr r2, =__bss_stop
sub r2, r2, r0
bl memset /* clear bss */
+#ifdef CONFIG_MMU
+ bl arm_early_mmu_cache_flush
+#endif
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* flush icache */
add lr, r5, r4 /* adjust return address to new location */
pop {r4, r5}
mov pc, lr
ENDPROC(setup_c)
+
+#ifdef CONFIG_RELOCATABLE
+/*
+ * void relocate_to_adr(unsigned long targetadr)
+ *
+ * Copy binary to targetadr, relocate code, clear bss and continue
+ * executing at new address.
+ */
+.section .text.relocate_to_adr
+ENTRY(relocate_to_adr)
+ /* r0: target address */
+ push {r3, r4, r5, r6, r7, r8}
+ mov r7, lr
+
+ mov r6, r0
+
+ bl get_runtime_offset
+
+ mov r5, r0
+
+ ld_var _text, r0, r4
+ mov r8, r0
+
+ sub r1, r0, r5 /* r1: from address */
+
+ cmp r1, r6 /* already at correct address? */
+ beq 1f /* yes, skip copy to new address */
+
+ ld_var __bss_start, r2, r4
+
+ sub r2, r2, r0 /* r2: size */
+ mov r0, r6 /* r0: target */
+
+ add r7, r7, r0 /* adjust return address */
+ sub r7, r7, r1 /* lr += offset */
+
+ bl memcpy /* copy binary */
+
+#ifdef CONFIG_MMU
+ bl arm_early_mmu_cache_flush
+#endif
+ mov r0,#0
+ mcr p15, 0, r0, c7, c5, 0 /* flush icache */
+
+ ldr r0,=1f
+ sub r0, r0, r8
+ add r0, r0, r6
+ mov pc, r0 /* jump to relocated address */
+1:
+ bl relocate_to_current_adr /* relocate binary */
+
+ mov lr, r7
+
+ pop {r3, r4, r5, r6, r7, r8}
+ mov pc, lr
+
+ENDPROC(relocate_to_adr)
+#endif
diff --git a/arch/arm/cpu/start-pbl.c b/arch/arm/cpu/start-pbl.c
index 91bc8fe9ff..6f03c4a7df 100644
--- a/arch/arm/cpu/start-pbl.c
+++ b/arch/arm/cpu/start-pbl.c
@@ -55,9 +55,13 @@ static noinline __noreturn void __barebox_arm_entry(uint32_t membase,
uint32_t pg_start, pg_end, pg_len;
void __noreturn (*barebox)(uint32_t, uint32_t, uint32_t);
uint32_t endmem = membase + memsize;
+ unsigned long barebox_base;
endmem -= STACK_SIZE; /* stack */
+ if (IS_ENABLED(CONFIG_PBL_RELOCATABLE))
+ relocate_to_current_adr();
+
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
@@ -65,8 +69,13 @@ static noinline __noreturn void __barebox_arm_entry(uint32_t membase,
pg_end = (uint32_t)&input_data_end - offset;
pg_len = pg_end - pg_start;
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ barebox_base = arm_barebox_image_place(membase + memsize);
+ else
+ barebox_base = TEXT_BASE;
+
if (offset && (IS_ENABLED(CONFIG_PBL_FORCE_PIGGYDATA_COPY) ||
- region_overlap(pg_start, pg_len, TEXT_BASE, pg_len * 4))) {
+ region_overlap(pg_start, pg_len, barebox_base, pg_len * 4))) {
/*
* copy piggydata binary to its link address
*/
@@ -86,14 +95,15 @@ static noinline __noreturn void __barebox_arm_entry(uint32_t membase,
free_mem_ptr = endmem;
free_mem_end_ptr = free_mem_ptr + SZ_128K;
- pbl_barebox_uncompress((void*)TEXT_BASE, (void *)pg_start, pg_len);
+ pbl_barebox_uncompress((void*)barebox_base, (void *)pg_start, pg_len);
+ arm_early_mmu_cache_flush();
flush_icache();
if (IS_ENABLED(CONFIG_THUMB2_BAREBOX))
- barebox = (void *)(TEXT_BASE + 1);
+ barebox = (void *)(barebox_base + 1);
else
- barebox = (void *)TEXT_BASE;
+ barebox = (void *)barebox_base;
barebox(membase, memsize, boarddata);
}
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index 943fa46281..580c1fed22 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -46,6 +46,11 @@ static noinline __noreturn void __start(uint32_t membase, uint32_t memsize,
unsigned long endmem = membase + memsize;
unsigned long malloc_start, malloc_end;
+ if (IS_ENABLED(CONFIG_RELOCATABLE)) {
+ unsigned long barebox_base = arm_barebox_image_place(endmem);
+ relocate_to_adr(barebox_base);
+ }
+
setup_c();
barebox_boarddata = boarddata;