summaryrefslogtreecommitdiffstats
path: root/arch/arm/lib32
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2020-09-17 14:05:38 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2020-09-22 20:40:12 +0200
commit932ef7a02e2fff7fd7d6ee1cb3558593987c5b3e (patch)
tree41f8e03d75641587c706663b69d22b50211ca445 /arch/arm/lib32
parent6cd9d2d600f3764a4c51d1a735e36397d91334f3 (diff)
downloadbarebox-932ef7a02e2fff7fd7d6ee1cb3558593987c5b3e.tar.gz
barebox-932ef7a02e2fff7fd7d6ee1cb3558593987c5b3e.tar.xz
ARM: Add KASan support
This adds KASan support to the ARM architecture. What we are doing is: * Add __no_sanitize_address attribute to various lowlevel functions which do not run in a proper C environment * Add non-instrumented variants of memset/memcpy (prefixed with '__') * make original memcpy/memset weak symbols so strong definitions in lib/kasan/common.c can replace them * Use non-instrumented memcpy in early functions * call kasan_init() Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/lib32')
-rw-r--r--arch/arm/lib32/barebox.lds.S4
-rw-r--r--arch/arm/lib32/memcpy.S3
-rw-r--r--arch/arm/lib32/memset.S4
3 files changed, 9 insertions, 2 deletions
diff --git a/arch/arm/lib32/barebox.lds.S b/arch/arm/lib32/barebox.lds.S
index ed279279a2..54d9b3e381 100644
--- a/arch/arm/lib32/barebox.lds.S
+++ b/arch/arm/lib32/barebox.lds.S
@@ -77,7 +77,9 @@ SECTIONS
_sdata = .;
. = ALIGN(4);
- .data : { *(.data*) }
+ .data : { *(.data*)
+ CONSTRUCTORS
+ }
.barebox_imd : { BAREBOX_IMD }
diff --git a/arch/arm/lib32/memcpy.S b/arch/arm/lib32/memcpy.S
index 5123691ca9..0fcdaa88e6 100644
--- a/arch/arm/lib32/memcpy.S
+++ b/arch/arm/lib32/memcpy.S
@@ -56,9 +56,12 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+.weak memcpy
ENTRY(memcpy)
+ENTRY(__memcpy)
#include "copy_template.S"
+ENDPROC(__memcpy)
ENDPROC(memcpy)
diff --git a/arch/arm/lib32/memset.S b/arch/arm/lib32/memset.S
index c4d2672038..6079dd89f6 100644
--- a/arch/arm/lib32/memset.S
+++ b/arch/arm/lib32/memset.S
@@ -15,6 +15,8 @@
.text
.align 5
+.weak memset
+ENTRY(__memset)
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
@@ -121,4 +123,4 @@ ENTRY(memset)
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)
-
+ENDPROC(__memset)