summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu/setupc_64.S
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2024-03-04 19:59:41 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2024-03-05 16:28:05 +0100
commit0bfc6ab8bc6599720b9aab04ee1b138b96f7e6d2 (patch)
tree9d79a55314e17fd99c720f5e1eaf9bc299aa3813 /arch/arm/cpu/setupc_64.S
parent62a782bb02f53c6eadad31d5031a073184b7accb (diff)
downloadbarebox-0bfc6ab8bc6599720b9aab04ee1b138b96f7e6d2.tar.gz
barebox-0bfc6ab8bc6599720b9aab04ee1b138b96f7e6d2.tar.xz
ARM64: cpu: setupc: rewrite to be fully PIC
The code resulting from building the barebox ARM64 assembly contains relocations, which could've been position-independent just as well. Let's make them truly position independent by turning: ldr x0, =label into adr_l x0, label adr_l is position independent by virtue of being implemented using adrp, so it's usuable as-is before relocation and requires no manual addition of get_runtime_offset(). With these changes, only relocation necessary for the ARM64 generic DT 2nd stage is the one needed for get_runtime_offset() to find out whether barebox has been relocated. This is one step towards supporting mapping barebox PBL text section W^X, which precludes relocation entries emitted for code. With this change applied, there is still a data relocation entry in assembly code for get_runtime_offset(), but that doesn't bother us because it's in the data section. Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20240304190038.3486881-57-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu/setupc_64.S')
-rw-r--r--arch/arm/cpu/setupc_64.S22
1 files changed, 9 insertions, 13 deletions
diff --git a/arch/arm/cpu/setupc_64.S b/arch/arm/cpu/setupc_64.S
index f38f893be9..2138c2a600 100644
--- a/arch/arm/cpu/setupc_64.S
+++ b/arch/arm/cpu/setupc_64.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/sections.h>
.section .text.setupc
@@ -10,9 +11,9 @@
*/
ENTRY(setup_c)
mov x15, x30
- ldr x0, =__bss_start
+ adr_l x0, __bss_start
mov x1, #0
- ldr x2, =__bss_stop
+ adr_l x2, __bss_stop
sub x2, x2, x0
bl __memset /* clear bss */
mov x30, x15
@@ -29,12 +30,12 @@ ENDPROC(setup_c)
/* x0: target address */
#ifdef __PBL__
ENTRY(relocate_to_adr_full)
- ldr x2, =__image_end
+ adr_l x2, __image_end
b 1f
#endif
ENTRY(relocate_to_adr)
- ldr x2, =__bss_start
+ adr_l x2, __bss_start
b 1f
1:
@@ -45,18 +46,13 @@ ENTRY(relocate_to_adr)
mov x21, x0
- bl get_runtime_offset
- mov x5, x0
-
- ldr x0, =_text
- mov x20, x0
-
- add x1, x0, x5 /* x1: from address */
+ adr_l x1, _text
+ mov x20, x1
cmp x1, x21 /* already at correct address? */
beq 1f /* yes, skip copy to new address */
- sub x2, x2, x0 /* x2: size */
+ sub x2, x2, x1 /* x2: size */
mov x0, x21 /* x0: target */
/* adjust return address */
@@ -70,7 +66,7 @@ ENTRY(relocate_to_adr)
mov x0,#0
ic ivau, x0 /* flush icache */
- ldr x0,=1f
+ adr_l x0, 1f
sub x0, x0, x20
add x0, x0, x21
br x0 /* jump to relocated address */