From 6cd9d2d600f3764a4c51d1a735e36397d91334f3 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 17 Sep 2020 09:39:17 +0200 Subject: Add KASan support KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides a fast and comprehensive solution for finding use-after-free and out-of-bounds bugs. This adds support for KASan to barebox. It is basically a stripped down version taken from the Linux Kernel as of v5.9-rc1. Quoting the initial Linux commit 0b24becc810d ("kasan: add kernel address sanitizer infrastructure") describes what KASan does: | KASAN uses compile-time instrumentation for checking every memory access, | therefore GCC > v4.9.2 required. v4.9.2 almost works, but has issues with | putting symbol aliases into the wrong section, which breaks kasan | instrumentation of globals. | | Basic idea: | | The main idea of KASAN is to use shadow memory to record whether each byte | of memory is safe to access or not, and use compiler's instrumentation to | check the shadow memory on each memory access. | | Address sanitizer uses 1/8 of the memory addressable in kernel for shadow | memory and uses direct mapping with a scale and offset to translate a | memory address to its corresponding shadow address. | | For every 8 bytes there is one corresponding byte of shadow memory. | The following encoding used for each shadow byte: 0 means that all 8 bytes | of the corresponding memory region are valid for access; k (1 <= k <= 7) | means that the first k bytes are valid for access, and other (8 - k) bytes | are not; Any negative value indicates that the entire 8-bytes are | inaccessible. Different negative values used to distinguish between | different kinds of inaccessible memory (redzones, freed memory) (see | mm/kasan/kasan.h). | | To be able to detect accesses to bad memory we need a special compiler. | Such compiler inserts a specific function calls (__asan_load*(addr), | __asan_store*(addr)) before each memory access of size 1, 2, 4, 8 or 16. | | These functions check whether memory region is valid to access or not by | checking corresponding shadow memory. If access is not valid an error | printed. Signed-off-by: Sascha Hauer --- common/tlsf.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'common/tlsf.c') diff --git a/common/tlsf.c b/common/tlsf.c index 86cc684ab6..4247a9d3c7 100644 --- a/common/tlsf.c +++ b/common/tlsf.c @@ -3,9 +3,14 @@ #include #include #include "tlsfbits.h" +#include #define CHAR_BIT 8 +#ifndef CONFIG_KASAN +#define __memcpy memcpy +#endif + /* ** Constants. */ @@ -529,7 +534,7 @@ static void block_trim_free(control_t* control, block_header_t* block, size_t si } /* Trim any trailing block space off the end of a used block, return to pool. */ -static void block_trim_used(control_t* control, block_header_t* block, size_t size) +static void block_trim_used(control_t* control, block_header_t* block, size_t size, size_t used) { tlsf_assert(!block_is_free(block) && "block must be used"); if (block_can_split(block, size)) @@ -541,6 +546,10 @@ static void block_trim_used(control_t* control, block_header_t* block, size_t si remaining_block = block_merge_next(control, remaining_block); block_insert(control, remaining_block); } + + kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t), + KASAN_KMALLOC_REDZONE); + kasan_unpoison_shadow(block_to_ptr(block), used); } static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size) @@ -589,7 +598,8 @@ static block_header_t* block_locate_free(control_t* control, size_t size) return block; } -static void* block_prepare_used(control_t* control, block_header_t* block, size_t size) +static void* block_prepare_used(control_t* control, block_header_t* block, + size_t size, size_t used) { void* p = 0; if (block) @@ -598,6 +608,10 @@ static void* block_prepare_used(control_t* control, block_header_t* block, size_ block_trim_free(control, block, size); block_mark_as_used(block); p = block_to_ptr(block); + + kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t), + KASAN_KMALLOC_REDZONE); + kasan_unpoison_shadow(p, used); } return p; } @@ -907,6 +921,7 @@ tlsf_t tlsf_create_with_pool(void* mem, size_t bytes) { tlsf_t tlsf = tlsf_create(mem); tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size()); + kasan_poison_shadow(mem, bytes, KASAN_TAG_INVALID); return tlsf; } @@ -926,7 +941,8 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size) control_t* control = tlsf_cast(control_t*, tlsf); const size_t adjust = adjust_request_size(size, ALIGN_SIZE); block_header_t* block = block_locate_free(control, adjust); - return block_prepare_used(control, block, adjust); + + return block_prepare_used(control, block, adjust, size); } void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size) @@ -983,7 +999,7 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size) } } - return block_prepare_used(control, block, adjust); + return block_prepare_used(control, block, adjust, size); } void tlsf_free(tlsf_t tlsf, void* ptr) @@ -994,6 +1010,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr) control_t* control = tlsf_cast(control_t*, tlsf); block_header_t* block = block_from_ptr(ptr); tlsf_assert(!block_is_free(block) && "block already marked as free"); + kasan_poison_shadow(ptr, block_size(block), 0xff); block_mark_as_free(block); block = block_merge_prev(control, block); block = block_merge_next(control, block); @@ -1050,7 +1067,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) if (p) { const size_t minsize = tlsf_min(cursize, size); - memcpy(p, ptr, minsize); + __memcpy(p, ptr, minsize); tlsf_free(tlsf, ptr); } } @@ -1064,7 +1081,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) } /* Trim the resulting block and return the original pointer. */ - block_trim_used(control, block, adjust); + block_trim_used(control, block, adjust, size); p = ptr; } } -- cgit v1.2.3