summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile6
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/cpu/Makefile2
-rw-r--r--arch/arm/cpu/common.c2
-rw-r--r--arch/arm/cpu/setupc.S6
-rw-r--r--arch/arm/cpu/start.c7
-rw-r--r--arch/arm/include/asm/string.h3
-rw-r--r--arch/arm/lib32/barebox.lds.S4
-rw-r--r--arch/arm/lib32/memcpy.S3
-rw-r--r--arch/arm/lib32/memset.S4
-rw-r--r--arch/arm/lib64/string.c26
-rw-r--r--arch/sandbox/Kconfig4
-rw-r--r--arch/sandbox/Makefile2
-rw-r--r--arch/sandbox/os/Makefile4
-rw-r--r--arch/sandbox/os/common.c2
-rw-r--r--common/Kconfig10
-rw-r--r--common/Makefile1
-rw-r--r--common/kallsyms.c4
-rw-r--r--common/startup.c15
-rw-r--r--common/tlsf.c29
-rw-r--r--include/asm-generic/barebox.lds.h12
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/linux/kasan.h89
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/printk.h20
-rw-r--r--include/string.h4
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Makefile1
-rw-r--r--lib/hexdump.c212
-rw-r--r--lib/kasan/Kconfig16
-rw-r--r--lib/kasan/Makefile14
-rw-r--r--lib/kasan/common.c108
-rw-r--r--lib/kasan/generic.c315
-rw-r--r--lib/kasan/generic_report.c150
-rw-r--r--lib/kasan/kasan.h164
-rw-r--r--lib/kasan/report.c199
-rw-r--r--lib/string.c32
-rw-r--r--pbl/string.c7
-rw-r--r--scripts/Makefile.kasan17
-rw-r--r--scripts/Makefile.lib10
40 files changed, 1462 insertions, 54 deletions
diff --git a/Makefile b/Makefile
index 73a4a576ff..bf3266e977 100644
--- a/Makefile
+++ b/Makefile
@@ -448,6 +448,7 @@ export LDFLAGS_barebox
export LDFLAGS_pbl
export CFLAGS_UBSAN
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
# Files to ignore in find ... statements
@@ -636,7 +637,10 @@ KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
# change __FILE__ to the relative path from the srctree
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
-include $(srctree)/scripts/Makefile.ubsan
+include-y +=scripts/Makefile.ubsan
+include-$(CONFIG_KASAN) += scripts/Makefile.kasan
+
+include $(addprefix $(srctree)/, $(include-y))
# KBUILD_IMAGE: Default barebox image to build
# Depending on the architecture, this can be either compressed or not.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 95fd8ecfe7..ea6d459dfe 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,8 @@ config ARM
select HAS_CACHE
select HAVE_CONFIGURABLE_TEXT_BASE if !RELOCATABLE
select HAVE_IMAGE_COMPRESSION
+ select HAVE_ARCH_KASAN
+ select ARM_OPTIMZED_STRING_FUNCTIONS if KASAN
default y
config ARM_LINUX
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index f7f9c30415..e7a6e3e6fb 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -9,6 +9,7 @@ AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
obj-y += start.o entry.o entry_ll$(S64).o
+KASAN_SANITIZE_start.o := n
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT) += board-dt-2nd.o
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT_AARCH64) += board-dt-2nd-aarch64.o
@@ -51,3 +52,4 @@ pbl-y += entry.o entry_ll$(S64).o
pbl-y += uncompress.o
obj-pbl-y += common.o sections.o
+KASAN_SANITIZE_common.o := n
diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 33f148fc0e..8cfcc8f6ce 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -120,7 +120,7 @@ void relocate_to_current_adr(void)
dstart += sizeof(*rel);
}
- memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
+ __memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
#else
#error "Architecture not specified"
#endif
diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc.S
index 8ae7c89a2c..55aa105b21 100644
--- a/arch/arm/cpu/setupc.S
+++ b/arch/arm/cpu/setupc.S
@@ -21,12 +21,12 @@ ENTRY(setup_c)
ldr r2,=__bss_start
sub r2, r2, r0
add r1, r0, r4
- bl memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
+ bl __memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
1: ldr r0, =__bss_start
mov r1, #0
ldr r2, =__bss_stop
sub r2, r2, r0
- bl memset /* clear bss */
+ bl __memset /* clear bss */
bl sync_caches_for_execution
sub lr, r5, r4 /* adjust return address to new location */
pop {r4, r5}
@@ -67,7 +67,7 @@ ENTRY(relocate_to_adr)
sub r7, r7, r1 /* sub address where we are actually running */
add r7, r7, r0 /* add address where we are going to run */
- bl memcpy /* copy binary */
+ bl __memcpy /* copy binary */
bl sync_caches_for_execution
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index aeca459cb1..f48f5beea8 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -15,6 +15,7 @@
#include <asm/unaligned.h>
#include <asm/cache.h>
#include <asm/mmu.h>
+#include <linux/kasan.h>
#include <memory.h>
#include <uncompress.h>
#include <malloc.h>
@@ -135,7 +136,7 @@ static int barebox_memory_areas_init(void)
}
device_initcall(barebox_memory_areas_init);
-__noreturn void barebox_non_pbl_start(unsigned long membase,
+__noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membase,
unsigned long memsize, void *boarddata)
{
unsigned long endmem = membase + memsize;
@@ -233,6 +234,8 @@ __noreturn void barebox_non_pbl_start(unsigned long membase,
pr_debug("initializing malloc pool at 0x%08lx (size 0x%08lx)\n",
malloc_start, malloc_end - malloc_start);
+ kasan_init(membase, memsize, malloc_start - (memsize >> KASAN_SHADOW_SCALE_SHIFT));
+
mem_malloc_init((void *)malloc_start, (void *)malloc_end - 1);
if (IS_ENABLED(CONFIG_BOOTM_OPTEE))
@@ -259,7 +262,7 @@ void start(unsigned long membase, unsigned long memsize, void *boarddata);
* First function in the uncompressed image. We get here from
* the pbl. The stack already has been set up by the pbl.
*/
-void NAKED __section(.text_entry) start(unsigned long membase,
+void NAKED __no_sanitize_address __section(.text_entry) start(unsigned long membase,
unsigned long memsize, void *boarddata)
{
barebox_non_pbl_start(membase, memsize, boarddata);
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 435647abda..cd79f63402 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -10,4 +10,7 @@ extern void *memset(void *, int, __kernel_size_t);
#endif
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *__memset(void *, int, __kernel_size_t);
+
#endif
diff --git a/arch/arm/lib32/barebox.lds.S b/arch/arm/lib32/barebox.lds.S
index ed279279a2..54d9b3e381 100644
--- a/arch/arm/lib32/barebox.lds.S
+++ b/arch/arm/lib32/barebox.lds.S
@@ -77,7 +77,9 @@ SECTIONS
_sdata = .;
. = ALIGN(4);
- .data : { *(.data*) }
+ .data : { *(.data*)
+ CONSTRUCTORS
+ }
.barebox_imd : { BAREBOX_IMD }
diff --git a/arch/arm/lib32/memcpy.S b/arch/arm/lib32/memcpy.S
index 5123691ca9..0fcdaa88e6 100644
--- a/arch/arm/lib32/memcpy.S
+++ b/arch/arm/lib32/memcpy.S
@@ -56,9 +56,12 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+.weak memcpy
ENTRY(memcpy)
+ENTRY(__memcpy)
#include "copy_template.S"
+ENDPROC(__memcpy)
ENDPROC(memcpy)
diff --git a/arch/arm/lib32/memset.S b/arch/arm/lib32/memset.S
index c4d2672038..6079dd89f6 100644
--- a/arch/arm/lib32/memset.S
+++ b/arch/arm/lib32/memset.S
@@ -15,6 +15,8 @@
.text
.align 5
+.weak memset
+ENTRY(__memset)
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
@@ -121,4 +123,4 @@ ENTRY(memset)
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)
-
+ENDPROC(__memset)
diff --git a/arch/arm/lib64/string.c b/arch/arm/lib64/string.c
index cb26331527..a2cf09e58e 100644
--- a/arch/arm/lib64/string.c
+++ b/arch/arm/lib64/string.c
@@ -5,18 +5,34 @@
void *__arch_memset(void *dst, int c, __kernel_size_t size);
void *__arch_memcpy(void * dest, const void *src, size_t count);
-void *memset(void *dst, int c, __kernel_size_t size)
+static void *_memset(void *dst, int c, __kernel_size_t size)
{
if (likely(get_cr() & CR_M))
return __arch_memset(dst, c, size);
- return __default_memset(dst, c, size);
+ return __nokasan_default_memset(dst, c, size);
}
-void *memcpy(void * dest, const void *src, size_t count)
+void __weak *memset(void *dst, int c, __kernel_size_t size)
+{
+ return _memset(dst, c, size);
+}
+
+void *__memset(void *dst, int c, __kernel_size_t size)
+ __alias(_memset);
+
+static void *_memcpy(void * dest, const void *src, size_t count)
{
if (likely(get_cr() & CR_M))
return __arch_memcpy(dest, src, count);
- return __default_memcpy(dest, src, count);
-} \ No newline at end of file
+ return __nokasan_default_memcpy(dest, src, count);
+}
+
+void __weak *memcpy(void * dest, const void *src, size_t count)
+{
+ return _memcpy(dest, src, count);
+}
+
+void *__memcpy(void * dest, const void *src, size_t count)
+ __alias(_memcpy);
diff --git a/arch/sandbox/Kconfig b/arch/sandbox/Kconfig
index 40e04919d2..81f7a96bd6 100644
--- a/arch/sandbox/Kconfig
+++ b/arch/sandbox/Kconfig
@@ -5,7 +5,7 @@ config SANDBOX
select OFTREE
select GPIOLIB
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select HAVE_ARCH_KASAN
+ select HAVE_ARCH_ASAN
select HAS_DMA
default y
@@ -22,7 +22,7 @@ config SANDBOX_UNWIND
bool
default y
select ARCH_HAS_STACK_DUMP
- depends on UBSAN || KASAN
+ depends on UBSAN || ASAN
config PHYS_ADDR_T_64BIT
bool
diff --git a/arch/sandbox/Makefile b/arch/sandbox/Makefile
index 27021222dc..ce1fe3b672 100644
--- a/arch/sandbox/Makefile
+++ b/arch/sandbox/Makefile
@@ -44,7 +44,7 @@ ifeq ($(CONFIG_GPIO_LIBFTDI1),y)
FTDI1_LIBS := $(shell pkg-config libftdi1 --libs)
endif
-ifeq ($(CONFIG_KASAN),y)
+ifeq ($(CONFIG_ASAN),y)
KBUILD_CPPFLAGS += -fsanitize=address
SANITIZER_LIBS += -fsanitize=address
endif
diff --git a/arch/sandbox/os/Makefile b/arch/sandbox/os/Makefile
index b2bd768bcb..15d688bfdd 100644
--- a/arch/sandbox/os/Makefile
+++ b/arch/sandbox/os/Makefile
@@ -8,10 +8,6 @@ KBUILD_CPPFLAGS += -DCONFIG_MALLOC_SIZE=$(CONFIG_MALLOC_SIZE)
KBUILD_CFLAGS := -Wall
-ifeq ($(CONFIG_KASAN),y)
-KBUILD_CPPFLAGS += -DCONFIG_KASAN=1
-endif
-
NOSTDINC_FLAGS :=
ifeq ($(CONFIG_SANDBOX_LINUX_I386),y)
diff --git a/arch/sandbox/os/common.c b/arch/sandbox/os/common.c
index 69fadb3b47..9fb5faf41d 100644
--- a/arch/sandbox/os/common.c
+++ b/arch/sandbox/os/common.c
@@ -347,7 +347,7 @@ int main(int argc, char *argv[])
int fdno = 0, envno = 0, option_index = 0;
char *aux;
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_ASAN
__sanitizer_set_death_callback(cookmode);
#endif
diff --git a/common/Kconfig b/common/Kconfig
index b350f5c355..3626eb2f29 100644
--- a/common/Kconfig
+++ b/common/Kconfig
@@ -1375,11 +1375,11 @@ config PBL_BREAK
source "lib/Kconfig.ubsan"
-config KASAN
- bool "KASAN: runtime memory debugger"
- depends on HAVE_ARCH_KASAN
+config ASAN
+ bool "ASAN: runtime memory debugger"
+ depends on HAVE_ARCH_ASAN
help
- Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
+ Enables ASAN (AddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
config COMPILE_TEST
@@ -1404,5 +1404,5 @@ config DDR_SPD
bool
select CRC_ITU_T
-config HAVE_ARCH_KASAN
+config HAVE_ARCH_ASAN
bool
diff --git a/common/Makefile b/common/Makefile
index ad5146a301..faf0415ef3 100644
--- a/common/Makefile
+++ b/common/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_GREGORIAN_CALENDER) += date.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_MALLOC_DLMALLOC) += dlmalloc.o
obj-$(CONFIG_MALLOC_TLSF) += tlsf_malloc.o tlsf.o calloc.o
+KASAN_SANITIZE_tlsf.o := n
obj-$(CONFIG_MALLOC_DUMMY) += dummy_malloc.o calloc.o
obj-$(CONFIG_MEMINFO) += meminfo.o
obj-$(CONFIG_MENU) += menu.o
diff --git a/common/kallsyms.c b/common/kallsyms.c
index e15dec5dfc..2c16ab2884 100644
--- a/common/kallsyms.c
+++ b/common/kallsyms.c
@@ -15,8 +15,8 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext))
- return 1;
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ return 1;
return 0;
}
diff --git a/common/startup.c b/common/startup.c
index 1c58e41288..6cb0588ae6 100644
--- a/common/startup.c
+++ b/common/startup.c
@@ -366,6 +366,19 @@ static int run_init(void)
return 0;
}
+typedef void (*ctor_fn_t)(void);
+
+/* Call all constructor functions linked into the kernel. */
+static void do_ctors(void)
+{
+#ifdef CONFIG_CONSTRUCTORS
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
+
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
+#endif
+}
+
int (*barebox_main)(void);
void __noreturn start_barebox(void)
@@ -376,6 +389,8 @@ void __noreturn start_barebox(void)
if (!IS_ENABLED(CONFIG_SHELL_NONE) && IS_ENABLED(CONFIG_COMMAND_SUPPORT))
barebox_main = run_init;
+ do_ctors();
+
for (initcall = __barebox_initcalls_start;
initcall < __barebox_initcalls_end; initcall++) {
pr_debug("initcall-> %pS\n", *initcall);
diff --git a/common/tlsf.c b/common/tlsf.c
index 86cc684ab6..4247a9d3c7 100644
--- a/common/tlsf.c
+++ b/common/tlsf.c
@@ -3,9 +3,14 @@
#include <string.h>
#include <tlsf.h>
#include "tlsfbits.h"
+#include <linux/kasan.h>
#define CHAR_BIT 8
+#ifndef CONFIG_KASAN
+#define __memcpy memcpy
+#endif
+
/*
** Constants.
*/
@@ -529,7 +534,7 @@ static void block_trim_free(control_t* control, block_header_t* block, size_t si
}
/* Trim any trailing block space off the end of a used block, return to pool. */
-static void block_trim_used(control_t* control, block_header_t* block, size_t size)
+static void block_trim_used(control_t* control, block_header_t* block, size_t size, size_t used)
{
tlsf_assert(!block_is_free(block) && "block must be used");
if (block_can_split(block, size))
@@ -541,6 +546,10 @@ static void block_trim_used(control_t* control, block_header_t* block, size_t si
remaining_block = block_merge_next(control, remaining_block);
block_insert(control, remaining_block);
}
+
+ kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t),
+ KASAN_KMALLOC_REDZONE);
+ kasan_unpoison_shadow(block_to_ptr(block), used);
}
static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
@@ -589,7 +598,8 @@ static block_header_t* block_locate_free(control_t* control, size_t size)
return block;
}
-static void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
+static void* block_prepare_used(control_t* control, block_header_t* block,
+ size_t size, size_t used)
{
void* p = 0;
if (block)
@@ -598,6 +608,10 @@ static void* block_prepare_used(control_t* control, block_header_t* block, size_
block_trim_free(control, block, size);
block_mark_as_used(block);
p = block_to_ptr(block);
+
+ kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t),
+ KASAN_KMALLOC_REDZONE);
+ kasan_unpoison_shadow(p, used);
}
return p;
}
@@ -907,6 +921,7 @@ tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
{
tlsf_t tlsf = tlsf_create(mem);
tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
+ kasan_poison_shadow(mem, bytes, KASAN_TAG_INVALID);
return tlsf;
}
@@ -926,7 +941,8 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
block_header_t* block = block_locate_free(control, adjust);
- return block_prepare_used(control, block, adjust);
+
+ return block_prepare_used(control, block, adjust, size);
}
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
@@ -983,7 +999,7 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
}
}
- return block_prepare_used(control, block, adjust);
+ return block_prepare_used(control, block, adjust, size);
}
void tlsf_free(tlsf_t tlsf, void* ptr)
@@ -994,6 +1010,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr)
control_t* control = tlsf_cast(control_t*, tlsf);
block_header_t* block = block_from_ptr(ptr);
tlsf_assert(!block_is_free(block) && "block already marked as free");
+ kasan_poison_shadow(ptr, block_size(block), 0xff);
block_mark_as_free(block);
block = block_merge_prev(control, block);
block = block_merge_next(control, block);
@@ -1050,7 +1067,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
if (p)
{
const size_t minsize = tlsf_min(cursize, size);
- memcpy(p, ptr, minsize);
+ __memcpy(p, ptr, minsize);
tlsf_free(tlsf, ptr);
}
}
@@ -1064,7 +1081,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
}
/* Trim the resulting block and return the original pointer. */
- block_trim_used(control, block, adjust);
+ block_trim_used(control, block, adjust, size);
p = ptr;
}
}
diff --git a/include/asm-generic/barebox.lds.h b/include/asm-generic/barebox.lds.h
index 138e9405a1..6971e2c1d2 100644
--- a/include/asm-generic/barebox.lds.h
+++ b/include/asm-generic/barebox.lds.h
@@ -113,12 +113,24 @@
KEEP(*(.rsa_keys.rodata.*)); \
__rsa_keys_end = .; \
+#ifdef CONFIG_CONSTRUCTORS
+#define KERNEL_CTORS() . = ALIGN(8); \
+ __ctors_start = .; \
+ KEEP(*(.ctors)) \
+ KEEP(*(SORT(.init_array.*))) \
+ KEEP(*(.init_array)) \
+ __ctors_end = .;
+#else
+#define KERNEL_CTORS()
+#endif
+
#define RO_DATA_SECTION \
BAREBOX_INITCALLS \
BAREBOX_EXITCALLS \
BAREBOX_CMDS \
BAREBOX_RATP_CMDS \
BAREBOX_SYMS \
+ KERNEL_CTORS() \
BAREBOX_MAGICVARS \
BAREBOX_CLK_TABLE \
BAREBOX_DTB \
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index f584cad48d..870bff21f6 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -13,6 +13,9 @@ extern void *_barebox_image_size;
extern void *_barebox_bare_init_size;
extern void *_barebox_pbl_size;
+/* Start and end of .ctors section - used for constructor calls. */
+extern char __ctors_start[], __ctors_end[];
+
#define barebox_image_size (__image_end - __image_start)
#define barebox_bare_init_size (unsigned int)&_barebox_bare_init_size
#define barebox_pbl_size (__piggydata_start - __image_start)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 0000000000..7c184cd0e2
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/types.h>
+
+/*
+ * On 64bit architectures tlsf aligns all allocations to a 64bit
+ * boundary, otherwise they are only 32bit aligned.
+ */
+#ifdef CONFIG_64BIT
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#else
+#define KASAN_SHADOW_SCALE_SHIFT 2
+#endif
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
+#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
+#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
+#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
+
+#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
+#define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
+
+/*
+ * Stack redzone shadow values
+ * (Those are compiler's ABI, don't change them)
+ */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
+
+/*
+ * alloca redzone shadow values
+ */
+#define KASAN_ALLOCA_LEFT 0xCA
+#define KASAN_ALLOCA_RIGHT 0xCB
+
+#ifdef CONFIG_KASAN
+
+extern unsigned long kasan_shadow_start;
+extern unsigned long kasan_shadow_base;
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ unsigned long a = (unsigned long)addr;
+
+ a -= kasan_shadow_start;
+ a >>= KASAN_SHADOW_SCALE_SHIFT;
+ a += kasan_shadow_base;
+
+ return (void *)a;
+}
+
+void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base);
+
+/* Enable reporting bugs after kasan_disable_current() */
+extern void kasan_enable_current(void);
+
+/* Disable reporting bugs for current task */
+extern void kasan_disable_current(void);
+
+void kasan_poison_shadow(const void *address, size_t size, u8 value);
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+bool kasan_save_enable_multi_shot(void);
+void kasan_restore_multi_shot(bool enabled);
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_poison_shadow(const void *address, size_t size, u8 value) {}
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 23c23a73f5..787571a5a0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -229,6 +229,8 @@ extern long long simple_strtoll(const char *,char **,unsigned int);
} \
)
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
diff --git a/include/printk.h b/include/printk.h
index 6a027175ab..9941ddb12c 100644
--- a/include/printk.h
+++ b/include/printk.h
@@ -115,14 +115,6 @@ int __pr_memory_display(int level, const void *addr, loff_t offs, unsigned nbyte
(offs), (nbytes), (size), (swab), pr_fmt("")) : 0; \
})
-#define DUMP_PREFIX_OFFSET 0
-static inline void print_hex_dump(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii)
-{
- memory_display(buf, 0, len, groupsize, 0);
-}
-
struct log_entry {
struct list_head list;
char *msg;
@@ -156,4 +148,16 @@ struct va_format {
va_list *va;
};
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen,
+ bool ascii);
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
#endif
diff --git a/include/string.h b/include/string.h
index 120a613d46..727bc51934 100644
--- a/include/string.h
+++ b/include/string.h
@@ -7,6 +7,10 @@
int strtobool(const char *str, int *val);
void *__default_memset(void *, int, __kernel_size_t);
+void *__nokasan_default_memset(void *, int, __kernel_size_t);
+
void *__default_memcpy(void * dest,const void *src,size_t count);
+void *__nokasan_default_memcpy(void * dest,const void *src,size_t count);
+
#endif /* __STRING_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index b4a8079700..6d909c1ac8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -60,6 +60,9 @@ config REED_SOLOMON
config BASE64
bool "include base64 encode/decode support"
+config CONSTRUCTORS
+ bool
+
config GENERIC_FIND_NEXT_BIT
def_bool n
@@ -151,6 +154,8 @@ source "lib/logo/Kconfig"
source "lib/bootstrap/Kconfig"
+source "lib/kasan/Kconfig"
+
config PRINTF_UUID
bool
diff --git a/lib/Makefile b/lib/Makefile
index 73399a1bf1..ba6af6f2ab 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -7,6 +7,7 @@ obj-y += string.o
obj-y += strtox.o
obj-y += kstrtox.o
obj-y += vsprintf.o
+obj-$(CONFIG_KASAN) += kasan/
pbl-$(CONFIG_PBL_CONSOLE) += vsprintf.o
obj-y += div64.o
pbl-y += div64.o
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3b1d5e6736..93f345e881 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -1,15 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/hexdump.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <common.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <linux/ctype.h>
+#include <linux/log2.h>
+#include <printk.h>
+#include <asm/unaligned.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
@@ -40,7 +39,7 @@ EXPORT_SYMBOL(hex_to_bin);
* @src: ascii hexadecimal string
* @count: result length
*
- * Return 0 on success, -1 in case of bad input.
+ * Return 0 on success, -EINVAL in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
@@ -49,7 +48,7 @@ int hex2bin(u8 *dst, const char *src, size_t count)
int lo = hex_to_bin(*src++);
if ((hi < 0) || (lo < 0))
- return -1;
+ return -EINVAL;
*dst++ = (hi << 4) | lo;
}
@@ -72,3 +71,200 @@ char *bin2hex(char *dst, const void *src, size_t count)
return dst;
}
EXPORT_SYMBOL(bin2hex);
+
+/**
+ * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @linebuf: where to put the converted data
+ * @linebuflen: total size of @linebuf, including space for terminating NUL
+ * @ascii: include ASCII after the hex output
+ *
+ * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ *
+ * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
+ * to a hex + ASCII dump at the supplied memory location.
+ * The converted output is always NUL-terminated.
+ *
+ * E.g.:
+ * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
+ * linebuf, sizeof(linebuf), true);
+ *
+ * example output buffer:
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ *
+ * Return:
+ * The amount of bytes placed in the buffer without terminating NUL. If the
+ * output was truncated, then the return value is the number of bytes
+ * (excluding the terminating NUL) which would have been written to the final
+ * string if enough space had been available.
+ */
+int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ int ngroups;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+ int ret;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if (!is_power_of_2(groupsize) || groupsize > 8)
+ groupsize = 1;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ ngroups = len / groupsize;
+ ascii_column = rowsize * 2 + rowsize / groupsize + 1;
+
+ if (!linebuflen)
+ goto overflow1;
+
+ if (!len)
+ goto nil;
+
+ if (groupsize == 8) {
+ const u64 *ptr8 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ get_unaligned(ptr8 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 4) {
+ const u32 *ptr4 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%8.8x", j ? " " : "",
+ get_unaligned(ptr4 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 2) {
+ const u16 *ptr2 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%4.4x", j ? " " : "",
+ get_unaligned(ptr2 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else {
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc_hi(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = hex_asc_lo(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < ascii_column) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
+ }
+nil:
+ linebuf[lx] = '\0';
+ return lx;
+overflow2:
+ linebuf[lx++] = '\0';
+overflow1:
+ return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
+}
+EXPORT_SYMBOL(hex_dump_to_buffer);
+
+/**
+ * print_hex_dump - print a text hex dump to syslog for a binary blob of data
+ * @level: kernel log level (e.g. KERN_DEBUG)
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
+ * to the kernel log at the specified kernel log level, with an optional
+ * leading prefix.
+ *
+ * print_hex_dump() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ * print_hex_dump() iterates over the entire input @buf, breaking it into
+ * "line size" chunks to format and print.
+ *
+ * E.g.:
+ * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
+ * 16, 1, frame->data, frame->len, true);
+ *
+ * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
+ * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
+ * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
+ */
+void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%p: %s\n",
+ level, prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(print_hex_dump);
diff --git a/lib/kasan/Kconfig b/lib/kasan/Kconfig
new file mode 100644
index 0000000000..7a18cf95be
--- /dev/null
+++ b/lib/kasan/Kconfig
@@ -0,0 +1,16 @@
+source "scripts/Kconfig.include"
+
+config HAVE_ARCH_KASAN
+ bool
+
+config CC_HAS_KASAN_GENERIC
+ def_bool $(cc-option, -fsanitize=kernel-address)
+
+config KASAN
+ bool "KASAN: runtime memory debugger"
+ depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC)
+ depends on MALLOC_TLSF
+ select CONSTRUCTORS
+ help
+ Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
+ designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/lib/kasan/Makefile b/lib/kasan/Makefile
new file mode 100644
index 0000000000..31e9d890d5
--- /dev/null
+++ b/lib/kasan/Makefile
@@ -0,0 +1,14 @@
+
+obj-y += generic_report.o generic.o report.o common.o test_kasan.o
+KASAN_SANITIZE_generic_report.o := n
+KASAN_SANITIZE_generic.o := n
+KASAN_SANITIZE_report.o := n
+KASAN_SANITIZE_common.o := n
+
+CC_FLAGS_KASAN_RUNTIME := $(call cc-option, -fno-conserve-stack)
+CC_FLAGS_KASAN_RUNTIME += -fno-stack-protector
+
+CFLAGS_generic_report.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME)
diff --git a/lib/kasan/common.c b/lib/kasan/common.c
new file mode 100644
index 0000000000..1ebf66a7b8
--- /dev/null
+++ b/lib/kasan/common.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common generic and tag-based KASAN code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+
+#include "kasan.h"
+
+int kasan_depth;
+
+void kasan_enable_current(void)
+{
+ kasan_depth++;
+}
+
+void kasan_disable_current(void)
+{
+ kasan_depth--;
+}
+
+#undef memset
+void *memset(void *addr, int c, size_t len)
+{
+ if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
+ return NULL;
+
+ return __memset(addr, c, len);
+}
+
+#ifdef __HAVE_ARCH_MEMMOVE
+#undef memmove
+void *memmove(void *dest, const void *src, size_t len)
+{
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memmove(dest, src, len);
+}
+#endif
+
+#undef memcpy
+void *memcpy(void *dest, const void *src, size_t len)
+{
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memcpy(dest, src, len);
+}
+
+/*
+ * Poisons the shadow memory for 'size' bytes starting from 'addr'.
+ * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ */
+void kasan_poison_shadow(const void *address, size_t size, u8 value)
+{
+ void *shadow_start, *shadow_end;
+
+ /*
+ * Perform shadow offset calculation based on untagged address, as
+ * some of the callers (e.g. kasan_poison_object_data) pass tagged
+ * addresses to this function.
+ */
+ address = reset_tag(address);
+
+ shadow_start = kasan_mem_to_shadow(address);
+ shadow_end = kasan_mem_to_shadow(address + size);
+
+ __memset(shadow_start, value, shadow_end - shadow_start);
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size)
+{
+ u8 tag = get_tag(address);
+
+ /*
+ * Perform shadow offset calculation based on untagged address, as
+ * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+ * addresses to this function.
+ */
+ address = reset_tag(address);
+
+ kasan_poison_shadow(address, size, tag);
+
+ if (size & KASAN_SHADOW_MASK) {
+ u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ *shadow = tag;
+ else
+ *shadow = size & KASAN_SHADOW_MASK;
+ }
+}
diff --git a/lib/kasan/generic.c b/lib/kasan/generic.c
new file mode 100644
index 0000000000..b33a6c1a6c
--- /dev/null
+++ b/lib/kasan/generic.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains core generic KASAN code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <common.h>
+
+#include "kasan.h"
+
+unsigned long kasan_shadow_start;
+unsigned long kasan_shadow_base;
+
+/*
+ * All functions below always inlined so compiler could
+ * perform better optimizations in each of __asan_loadX/__assn_storeX
+ * depending on memory access size X.
+ */
+
+static __always_inline bool memory_is_poisoned_1(unsigned long addr)
+{
+ s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(shadow_value)) {
+ s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ return unlikely(last_accessible_byte >= shadow_value);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
+ unsigned long size)
+{
+ u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
+
+ /*
+ * Access crosses 8(shadow size)-byte boundary. Such access maps
+ * into 2 shadow bytes, so we need to check them both.
+ */
+ if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+ return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
+
+ return memory_is_poisoned_1(addr + size - 1);
+}
+
+static __always_inline bool memory_is_poisoned_16(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ /* Unaligned 16-bytes access maps into 3 shadow bytes. */
+ if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ return *shadow_addr || memory_is_poisoned_1(addr + 15);
+
+ return *shadow_addr;
+}
+
+static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
+ size_t size)
+{
+ while (size) {
+ if (unlikely(*start))
+ return (unsigned long)start;
+ start++;
+ size--;
+ }
+
+ return 0;
+}
+
+static __always_inline unsigned long memory_is_nonzero(const void *start,
+ const void *end)
+{
+ unsigned int words;
+ unsigned long ret;
+ unsigned int prefix = (unsigned long)start % 8;
+
+ if (end - start <= 16)
+ return bytes_is_nonzero(start, end - start);
+
+ if (prefix) {
+ prefix = 8 - prefix;
+ ret = bytes_is_nonzero(start, prefix);
+ if (unlikely(ret))
+ return ret;
+ start += prefix;
+ }
+
+ words = (end - start) / 8;
+ while (words) {
+ if (unlikely(*(u64 *)start))
+ return bytes_is_nonzero(start, 8);
+ start += 8;
+ words--;
+ }
+
+ return bytes_is_nonzero(start, (end - start) % 8);
+}
+
+static __always_inline bool memory_is_poisoned_n(unsigned long addr,
+ size_t size)
+{
+ unsigned long ret;
+
+ ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
+ kasan_mem_to_shadow((void *)addr + size - 1) + 1);
+
+ if (unlikely(ret)) {
+ unsigned long last_byte = addr + size - 1;
+ s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
+
+ if (unlikely(ret != (unsigned long)last_shadow ||
+ ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
+{
+ if (__builtin_constant_p(size)) {
+ switch (size) {
+ case 1:
+ return memory_is_poisoned_1(addr);
+ case 2:
+ case 4:
+ case 8:
+ return memory_is_poisoned_2_4_8(addr, size);
+ case 16:
+ return memory_is_poisoned_16(addr);
+ default:
+ BUILD_BUG();
+ }
+ }
+
+ return memory_is_poisoned_n(addr, size);
+}
+
+static bool kasan_initialized;
+
+static __always_inline bool check_memory_region_inline(unsigned long addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ if (!kasan_initialized)
+ return true;
+
+ if (addr < kasan_shadow_start)
+ return true;
+
+ if (unlikely(size == 0))
+ return true;
+
+ if (unlikely(addr + size < addr))
+ return !kasan_report(addr, size, write, ret_ip);
+
+ if (addr < kasan_shadow_base)
+ return true;
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return true;
+
+ return !kasan_report(addr, size, write, ret_ip);
+}
+
+void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base)
+{
+ kasan_shadow_start = membase;
+ kasan_shadow_base = shadow_base;
+
+ kasan_unpoison_shadow((void *)membase, memsize);
+ kasan_initialized = true;
+}
+
+bool __no_sanitize_address check_memory_region(unsigned long addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ return check_memory_region_inline(addr, size, write, ret_ip);
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
+#define DEFINE_ASAN_LOAD_STORE(size) \
+ void __no_sanitize_address __asan_load##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, false, _RET_IP_);\
+ } \
+ EXPORT_SYMBOL(__asan_load##size); \
+ __alias(__asan_load##size) \
+ void __no_sanitize_address __asan_load##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_load##size##_noabort); \
+ void __asan_store##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, true, _RET_IP_); \
+ } \
+ EXPORT_SYMBOL(__asan_store##size); \
+ __alias(__asan_store##size) \
+ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+DEFINE_ASAN_LOAD_STORE(1);
+DEFINE_ASAN_LOAD_STORE(2);
+DEFINE_ASAN_LOAD_STORE(4);
+DEFINE_ASAN_LOAD_STORE(8);
+DEFINE_ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_loadN);
+
+__alias(__asan_loadN)
+void __asan_loadN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_loadN_noabort);
+
+void __asan_storeN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_storeN);
+
+__alias(__asan_storeN)
+void __asan_storeN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_storeN_noabort);
+
+/* to shut up compiler complaints */
+void __asan_handle_no_return(void) {}
+EXPORT_SYMBOL(__asan_handle_no_return);
+
+/* Emitted by compiler to poison alloca()ed objects. */
+void __asan_alloca_poison(unsigned long addr, size_t size)
+{
+ size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
+ rounded_up_size;
+ size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+
+ const void *left_redzone = (const void *)(addr -
+ KASAN_ALLOCA_REDZONE_SIZE);
+ const void *right_redzone = (const void *)(addr + rounded_up_size);
+
+ WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
+
+ kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
+ size - rounded_down_size);
+ kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_LEFT);
+ kasan_poison_shadow(right_redzone,
+ padding_size + KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_RIGHT);
+}
+EXPORT_SYMBOL(__asan_alloca_poison);
+
+/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
+{
+ if (unlikely(!stack_top || stack_top > stack_bottom))
+ return;
+
+ kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+}
+EXPORT_SYMBOL(__asan_allocas_unpoison);
+
+/* Emitted by the compiler to [un]poison local variables. */
+#define DEFINE_ASAN_SET_SHADOW(byte) \
+ void __asan_set_shadow_##byte(const void *addr, size_t size) \
+ { \
+ __memset((void *)addr, 0x##byte, size); \
+ } \
+ EXPORT_SYMBOL(__asan_set_shadow_##byte)
+
+DEFINE_ASAN_SET_SHADOW(00);
+DEFINE_ASAN_SET_SHADOW(f1);
+DEFINE_ASAN_SET_SHADOW(f2);
+DEFINE_ASAN_SET_SHADOW(f3);
+DEFINE_ASAN_SET_SHADOW(f5);
+DEFINE_ASAN_SET_SHADOW(f8);
diff --git a/lib/kasan/generic_report.c b/lib/kasan/generic_report.c
new file mode 100644
index 0000000000..1cc5829e8d
--- /dev/null
+++ b/lib/kasan/generic_report.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains generic KASAN specific error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/bitops.h>
+
+#include <asm/sections.h>
+
+#include "kasan.h"
+
+void *find_first_bad_addr(void *addr, size_t size)
+{
+ void *p = addr;
+
+ while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
+ p += KASAN_SHADOW_SCALE_SIZE;
+ return p;
+}
+
+static const char *get_shadow_bug_type(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown-crash";
+ u8 *shadow_addr;
+
+ shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
+
+ /*
+ * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+ * at the next shadow byte to determine the type of the bad access.
+ */
+ if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+ shadow_addr++;
+
+ switch (*shadow_addr) {
+ case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+ /*
+ * In theory it's still possible to see these shadow values
+ * due to a data race in the kernel code.
+ */
+ bug_type = "out-of-bounds";
+ break;
+ case KASAN_PAGE_REDZONE:
+ case KASAN_KMALLOC_REDZONE:
+ bug_type = "slab-out-of-bounds";
+ break;
+ case KASAN_GLOBAL_REDZONE:
+ bug_type = "global-out-of-bounds";
+ break;
+ case KASAN_STACK_LEFT:
+ case KASAN_STACK_MID:
+ case KASAN_STACK_RIGHT:
+ case KASAN_STACK_PARTIAL:
+ bug_type = "stack-out-of-bounds";
+ break;
+ case KASAN_FREE_PAGE:
+ case KASAN_KMALLOC_FREE:
+ case KASAN_KMALLOC_FREETRACK:
+ bug_type = "use-after-free";
+ break;
+ case KASAN_ALLOCA_LEFT:
+ case KASAN_ALLOCA_RIGHT:
+ bug_type = "alloca-out-of-bounds";
+ break;
+ case KASAN_VMALLOC_INVALID:
+ bug_type = "vmalloc-out-of-bounds";
+ break;
+ }
+
+ return bug_type;
+}
+
+static const char *get_wild_bug_type(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown-crash";
+
+ if ((unsigned long)info->access_addr < PAGE_SIZE)
+ bug_type = "null-ptr-deref";
+ else
+ bug_type = "wild-memory-access";
+
+ return bug_type;
+}
+
+const char *get_bug_type(struct kasan_access_info *info)
+{
+ /*
+ * If access_size is a negative number, then it has reason to be
+ * defined as out-of-bounds bug type.
+ *
+ * Casting negative numbers to size_t would indeed turn up as
+ * a large size_t and its value will be larger than ULONG_MAX/2,
+ * so that this can qualify as out-of-bounds.
+ */
+ if (info->access_addr + info->access_size < info->access_addr)
+ return "out-of-bounds";
+
+ if (addr_has_shadow(info->access_addr))
+ return get_shadow_bug_type(info);
+ return get_wild_bug_type(info);
+}
+
+#define DEFINE_ASAN_REPORT_LOAD(size) \
+void __asan_report_load##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, false, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_load##size##_noabort)
+
+#define DEFINE_ASAN_REPORT_STORE(size) \
+void __asan_report_store##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, true, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_store##size##_noabort)
+
+DEFINE_ASAN_REPORT_LOAD(1);
+DEFINE_ASAN_REPORT_LOAD(2);
+DEFINE_ASAN_REPORT_LOAD(4);
+DEFINE_ASAN_REPORT_LOAD(8);
+DEFINE_ASAN_REPORT_LOAD(16);
+DEFINE_ASAN_REPORT_STORE(1);
+DEFINE_ASAN_REPORT_STORE(2);
+DEFINE_ASAN_REPORT_STORE(4);
+DEFINE_ASAN_REPORT_STORE(8);
+DEFINE_ASAN_REPORT_STORE(16);
+
+void __asan_report_load_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_load_n_noabort);
+
+void __asan_report_store_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_store_n_noabort);
diff --git a/lib/kasan/kasan.h b/lib/kasan/kasan.h
new file mode 100644
index 0000000000..e17f49dbec
--- /dev/null
+++ b/lib/kasan/kasan.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MM_KASAN_KASAN_H
+#define __MM_KASAN_KASAN_H
+
+#include <linux/kasan.h>
+#include <linux/linkage.h>
+
+#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_ALLOCA_REDZONE_SIZE 32
+
+/*
+ * Stack frame marker (compiler ABI).
+ */
+#define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
+
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
+
+struct kasan_access_info {
+ const void *access_addr;
+ const void *first_bad_addr;
+ size_t access_size;
+ bool is_write;
+ unsigned long ip;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+ const void *beg; /* Address of the beginning of the global variable. */
+ size_t size; /* Size of the global variable. */
+ size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ const void *name;
+ const void *module_name; /* Name of the module where the global variable is declared. */
+ unsigned long has_dynamic_init; /* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+#endif
+#if KASAN_ABI_VERSION >= 5
+ char *odr_indicator;
+#endif
+};
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long sa = (unsigned long)shadow_addr;
+
+ sa -= kasan_shadow_base;
+ sa <<= KASAN_SHADOW_SCALE_SHIFT;
+ sa += kasan_shadow_start;
+
+ return (void *)sa;
+}
+
+static inline bool addr_has_shadow(const void *addr)
+{
+ return (addr >= (void *)kasan_shadow_start);
+}
+
+/**
+ * check_memory_region - Check memory region, and report if invalid access.
+ * @addr: the accessed address
+ * @size: the accessed size
+ * @write: true if access is a write access
+ * @ret_ip: return address
+ * @return: true if access was valid, false if invalid
+ */
+bool check_memory_region(unsigned long addr, size_t size, bool write,
+ unsigned long ret_ip);
+
+void *find_first_bad_addr(void *addr, size_t size);
+const char *get_bug_type(struct kasan_access_info *info);
+
+bool kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+void kasan_report_invalid_free(void *object, unsigned long ip);
+
+#ifndef arch_kasan_set_tag
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+ return addr;
+}
+#endif
+#ifndef arch_kasan_reset_tag
+#define arch_kasan_reset_tag(addr) ((void *)(addr))
+#endif
+#ifndef arch_kasan_get_tag
+#define arch_kasan_get_tag(addr) 0
+#endif
+
+#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
+#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
+#define get_tag(addr) arch_kasan_get_tag(addr)
+
+/*
+ * Exported functions for interfaces called from assembly or from generated
+ * code. Declarations here to avoid warning about missing declarations.
+ */
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
+void __asan_register_globals(struct kasan_global *globals, size_t size);
+void __asan_unregister_globals(struct kasan_global *globals, size_t size);
+void __asan_handle_no_return(void);
+void __asan_alloca_poison(unsigned long addr, size_t size);
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
+
+void __asan_load1(unsigned long addr);
+void __asan_store1(unsigned long addr);
+void __asan_load2(unsigned long addr);
+void __asan_store2(unsigned long addr);
+void __asan_load4(unsigned long addr);
+void __asan_store4(unsigned long addr);
+void __asan_load8(unsigned long addr);
+void __asan_store8(unsigned long addr);
+void __asan_load16(unsigned long addr);
+void __asan_store16(unsigned long addr);
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+
+void __asan_load1_noabort(unsigned long addr);
+void __asan_store1_noabort(unsigned long addr);
+void __asan_load2_noabort(unsigned long addr);
+void __asan_store2_noabort(unsigned long addr);
+void __asan_load4_noabort(unsigned long addr);
+void __asan_store4_noabort(unsigned long addr);
+void __asan_load8_noabort(unsigned long addr);
+void __asan_store8_noabort(unsigned long addr);
+void __asan_load16_noabort(unsigned long addr);
+void __asan_store16_noabort(unsigned long addr);
+void __asan_loadN_noabort(unsigned long addr, size_t size);
+void __asan_storeN_noabort(unsigned long addr, size_t size);
+
+void __asan_report_load1_noabort(unsigned long addr);
+void __asan_report_store1_noabort(unsigned long addr);
+void __asan_report_load2_noabort(unsigned long addr);
+void __asan_report_store2_noabort(unsigned long addr);
+void __asan_report_load4_noabort(unsigned long addr);
+void __asan_report_store4_noabort(unsigned long addr);
+void __asan_report_load8_noabort(unsigned long addr);
+void __asan_report_store8_noabort(unsigned long addr);
+void __asan_report_load16_noabort(unsigned long addr);
+void __asan_report_store16_noabort(unsigned long addr);
+void __asan_report_load_n_noabort(unsigned long addr, size_t size);
+void __asan_report_store_n_noabort(unsigned long addr, size_t size);
+
+void __asan_set_shadow_00(const void *addr, size_t size);
+void __asan_set_shadow_f1(const void *addr, size_t size);
+void __asan_set_shadow_f2(const void *addr, size_t size);
+void __asan_set_shadow_f3(const void *addr, size_t size);
+void __asan_set_shadow_f5(const void *addr, size_t size);
+void __asan_set_shadow_f8(const void *addr, size_t size);
+
+extern int kasan_depth;
+
+#endif
diff --git a/lib/kasan/report.c b/lib/kasan/report.c
new file mode 100644
index 0000000000..b7b2d032ee
--- /dev/null
+++ b/lib/kasan/report.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common generic and tag-based KASAN error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <printk.h>
+#include <asm-generic/sections.h>
+
+#include "kasan.h"
+
+/* Shadow layout customization. */
+#define SHADOW_BYTES_PER_BLOCK 1
+#define SHADOW_BLOCKS_PER_ROW 16
+#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
+#define SHADOW_ROWS_AROUND_ADDR 2
+
+static unsigned long kasan_flags;
+
+#define KASAN_BIT_REPORTED 0
+#define KASAN_BIT_MULTI_SHOT 1
+
+bool kasan_save_enable_multi_shot(void)
+{
+ return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
+
+void kasan_restore_multi_shot(bool enabled)
+{
+ if (!enabled)
+ clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
+
+static void print_error_description(struct kasan_access_info *info)
+{
+ pr_err("BUG: KASAN: %s in %pS\n",
+ get_bug_type(info), (void *)info->ip);
+ pr_err("%s of size %zu at addr %px\n",
+ info->is_write ? "Write" : "Read", info->access_size,
+ info->access_addr);
+}
+
+static void start_report(unsigned long *flags)
+{
+ /*
+ * Make sure we don't end up in loop.
+ */
+ kasan_disable_current();
+ pr_err("==================================================================\n");
+}
+
+static void end_report(unsigned long *flags)
+{
+ pr_err("==================================================================\n");
+ kasan_enable_current();
+}
+
+static inline bool kernel_or_module_addr(const void *addr)
+{
+ if (addr >= (void *)_stext && addr < (void *)_end)
+ return true;
+ return false;
+}
+
+static void print_address_description(void *addr, u8 tag)
+{
+ dump_stack();
+ pr_err("\n");
+
+ if (kernel_or_module_addr(addr)) {
+ pr_err("The buggy address belongs to the variable:\n");
+ pr_err(" %pS\n", addr);
+ }
+}
+
+static bool row_is_guilty(const void *row, const void *guilty)
+{
+ return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
+}
+
+static int shadow_pointer_offset(const void *row, const void *shadow)
+{
+ /* The length of ">ff00ff00ff00ff00: " is
+ * 3 + (BITS_PER_LONG/8)*2 chars.
+ */
+ return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
+ (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
+}
+
+static void print_shadow_for_address(const void *addr)
+{
+ int i;
+ const void *shadow = kasan_mem_to_shadow(addr);
+ const void *shadow_row;
+
+ shadow_row = (void *)round_down((unsigned long)shadow,
+ SHADOW_BYTES_PER_ROW)
+ - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
+
+ pr_err("Memory state around the buggy address:\n");
+
+ for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
+ const void *kaddr = kasan_shadow_to_mem(shadow_row);
+ char buffer[4 + (BITS_PER_LONG/8)*2];
+ char shadow_buf[SHADOW_BYTES_PER_ROW];
+
+ snprintf(buffer, sizeof(buffer),
+ (i == 0) ? ">%px: " : " %px: ", kaddr);
+ /*
+ * We should not pass a shadow pointer to generic
+ * function, because generic functions may try to
+ * access kasan mapping for the passed address.
+ */
+ memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
+ print_hex_dump(KERN_ERR, buffer,
+ DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
+ shadow_buf, SHADOW_BYTES_PER_ROW, 0);
+
+ if (row_is_guilty(shadow_row, shadow))
+ printf("%*c\n",
+ shadow_pointer_offset(shadow_row, shadow),
+ '^');
+
+ shadow_row += SHADOW_BYTES_PER_ROW;
+ }
+}
+
+static bool report_enabled(void)
+{
+ if (kasan_depth)
+ return false;
+ if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ return true;
+ return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
+}
+
+static void __kasan_report(unsigned long addr, size_t size, bool is_write,
+ unsigned long ip)
+{
+ struct kasan_access_info info;
+ void *tagged_addr;
+ void *untagged_addr;
+ unsigned long flags;
+
+ tagged_addr = (void *)addr;
+ untagged_addr = reset_tag(tagged_addr);
+
+ info.access_addr = tagged_addr;
+ if (addr_has_shadow(untagged_addr))
+ info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
+ else
+ info.first_bad_addr = untagged_addr;
+ info.access_size = size;
+ info.is_write = is_write;
+ info.ip = ip;
+
+ start_report(&flags);
+
+ print_error_description(&info);
+ pr_err("\n");
+
+ if (addr_has_shadow(untagged_addr)) {
+ print_address_description(untagged_addr, get_tag(tagged_addr));
+ pr_err("\n");
+ print_shadow_for_address(info.first_bad_addr);
+ } else {
+ dump_stack();
+ }
+
+ end_report(&flags);
+}
+
+bool kasan_report(unsigned long addr, size_t size, bool is_write,
+ unsigned long ip)
+{
+ bool ret = false;
+
+ if (likely(report_enabled())) {
+ __kasan_report(addr, size, is_write, ip);
+ ret = true;
+ }
+
+ return ret;
+}
diff --git a/lib/string.c b/lib/string.c
index 7548fd3581..c0bf372657 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -491,7 +491,7 @@ char *strswab(const char *s)
*
* Do not use memset() to access IO space, use memset_io() instead.
*/
-void *__default_memset(void * s,int c,size_t count)
+void *__default_memset(void * s, int c, size_t count)
{
char *xs = (char *) s;
@@ -502,8 +502,20 @@ void *__default_memset(void * s,int c,size_t count)
}
EXPORT_SYMBOL(__default_memset);
+void __no_sanitize_address *__nokasan_default_memset(void * s, int c, size_t count)
+{
+ char *xs = (char *) s;
+
+ while (count--)
+ *xs++ = c;
+
+ return s;
+}
+EXPORT_SYMBOL(__nokasan_default_memset);
+
#ifndef __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t count) __alias(__default_memset);
+void *__memset(void *s, int c, size_t count) __alias(__default_memset);
#endif
/**
@@ -515,7 +527,7 @@ void *memset(void *s, int c, size_t count) __alias(__default_memset);
* You should not use this function to access IO space, use memcpy_toio()
* or memcpy_fromio() instead.
*/
-void *__default_memcpy(void * dest,const void *src,size_t count)
+void *__default_memcpy(void * dest,const void *src, size_t count)
{
char *tmp = (char *) dest, *s = (char *) src;
@@ -524,11 +536,25 @@ void *__default_memcpy(void * dest,const void *src,size_t count)
return dest;
}
-EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__default_memcpy);
+
+void __no_sanitize_address *__nokasan_default_memcpy(void * dest,
+ const void *src, size_t count)
+{
+ char *tmp = (char *) dest, *s = (char *) src;
+
+ while (count--)
+ *tmp++ = *s++;
+
+ return dest;
+}
+EXPORT_SYMBOL(__nokasan_default_memcpy);
#ifndef __HAVE_ARCH_MEMCPY
void *memcpy(void * dest, const void *src, size_t count)
__alias(__default_memcpy);
+void *__memcpy(void * dest, const void *src, size_t count)
+ __alias(__default_memcpy);
#endif
diff --git a/pbl/string.c b/pbl/string.c
index 46bf0b32b3..e6c0997ebc 100644
--- a/pbl/string.c
+++ b/pbl/string.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/compiler.h>
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
@@ -41,6 +42,9 @@ void *memcpy(void *__dest, __const void *__src, size_t __n)
return __dest;
}
+void *__memcpy(void *__dest, __const void *__src, size_t __n)
+ __alias(memcpy);
+
void *memmove(void *__dest, __const void *__src, size_t count)
{
unsigned char *d = __dest;
@@ -120,6 +124,9 @@ void *memset(void *s, int c, size_t count)
return s;
}
+void *__memset(void *s, int c, size_t count)
+ __alias(memset);
+
/**
* strnlen - Find the length of a length-limited string
* @s: The string to be sized
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
new file mode 100644
index 0000000000..83f6aa543d
--- /dev/null
+++ b/scripts/Makefile.kasan
@@ -0,0 +1,17 @@
+ # SPDX-License-Identifier: GPL-2.0
+ifdef CONFIG_KASAN
+CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+endif
+
+CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
+
+cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+
+CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) \
+ $(call cc-param,asan-globals=1) \
+ $(call cc-param,asan-instrument-allocas=1)
+
+ifndef CONFIG_CPU_64
+CFLAGS_KASAN += $(call cc-param,asan-stack=1)
+endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 337430cd00..ab7d9f2bdf 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -127,6 +127,16 @@ _c_flags = $(KBUILD_CFLAGS) $(ccflags-y) $(CFLAGS_$(target-stem).o)
_a_flags = $(KBUILD_AFLAGS) $(asflags-y) $(AFLAGS_$(target-stem).o)
_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(target-stem).lds)
+#
+# Enable address sanitizer flags for kernel except some files or directories
+# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
+#
+ifeq ($(CONFIG_KASAN),y)
+_c_flags += $(if $(part-of-pbl),, $(if $(patsubst n%,, \
+ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
+ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)))
+endif
+
ifeq ($(CONFIG_UBSAN),y)
_CFLAGS_UBSAN = $(eval _CFLAGS_UBSAN := $(CFLAGS_UBSAN))$(_CFLAGS_UBSAN)
_c_flags += $(if $(patsubst n%,, \