summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2023-09-11 17:08:59 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2023-09-21 10:55:38 +0200
commit03a6a4d90ec9a2a5e6a90bdd80881e3e3ba8206f (patch)
tree23cfe1f07b36f1c8873841c8a656120bf683d946 /arch/arm/cpu
parent2401db9c04bb992fc701dcb86ecad4036f511b12 (diff)
downloadbarebox-03a6a4d90ec9a2a5e6a90bdd80881e3e3ba8206f.tar.gz
barebox-03a6a4d90ec9a2a5e6a90bdd80881e3e3ba8206f.tar.xz
ARM: mmu: catch stack overflowing into TTB with stack guard page
While barebox stack is often quite generous, due to its default of 32K, bugs can make it overflow and on ARM, this clobbers the page tables leading to even harder to debug problems than usual. Let's add a 4K buffer zone between the page tables and the stack and configure the MMU to trap all accesses into it. Note that hitting the stack guard page can be silent if the exception handler places it's frame there. Still a hanging barebox may be better than an erratically behaving one. Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20230911150900.3584523-5-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/interrupts_32.c21
-rw-r--r--arch/arm/cpu/interrupts_64.c36
-rw-r--r--arch/arm/cpu/mmu_32.c16
-rw-r--r--arch/arm/cpu/mmu_64.c15
4 files changed, 79 insertions, 9 deletions
diff --git a/arch/arm/cpu/interrupts_32.c b/arch/arm/cpu/interrupts_32.c
index 5bc790a796..468dcdd30e 100644
--- a/arch/arm/cpu/interrupts_32.c
+++ b/arch/arm/cpu/interrupts_32.c
@@ -8,7 +8,9 @@
#include <common.h>
#include <abort.h>
+#include <linux/sizes.h>
#include <asm/ptrace.h>
+#include <asm/barebox-arm.h>
#include <asm/unwind.h>
#include <init.h>
@@ -106,6 +108,22 @@ void do_prefetch_abort (struct pt_regs *pt_regs)
do_exception(pt_regs);
}
+static const char *data_abort_reason(ulong far)
+{
+ ulong guard_page;
+
+ if (far < PAGE_SIZE)
+ return "NULL pointer dereference";
+
+ if (IS_ENABLED(CONFIG_STACK_GUARD_PAGE)) {
+ guard_page = arm_mem_guard_page_get();
+ if (guard_page <= far && far < guard_page + PAGE_SIZE)
+ return "stack overflow";
+ }
+
+ return "paging request";
+}
+
/**
* The CPU catches a data abort. That really should not happen!
* @param[in] pt_regs Register set content when the accident happens
@@ -119,8 +137,7 @@ void do_data_abort (struct pt_regs *pt_regs)
asm volatile ("mrc p15, 0, %0, c6, c0, 0" : "=r" (far) : : "cc");
printf("unable to handle %s at address 0x%08x\n",
- far < PAGE_SIZE ? "NULL pointer dereference" :
- "paging request", far);
+ data_abort_reason(far), far);
do_exception(pt_regs);
}
diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index d844915fee..b3e7da1797 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -6,6 +6,7 @@
#include <common.h>
#include <abort.h>
#include <asm/ptrace.h>
+#include <asm/barebox-arm.h>
#include <asm/unwind.h>
#include <init.h>
#include <asm/system.h>
@@ -142,17 +143,38 @@ void do_bad_error(struct pt_regs *pt_regs)
extern volatile int arm_ignore_data_abort;
extern volatile int arm_data_abort_occurred;
+static const char *data_abort_reason(ulong far)
+{
+ ulong guard_page;
+
+ if (far < PAGE_SIZE)
+ return "NULL pointer dereference: ";
+
+ if (IS_ENABLED(CONFIG_STACK_GUARD_PAGE)) {
+ guard_page = arm_mem_guard_page_get();
+ if (guard_page <= far && far < guard_page + PAGE_SIZE)
+ return "Stack overflow: ";
+ }
+
+ return NULL;
+}
+
void do_sync(struct pt_regs *pt_regs, unsigned int esr, unsigned long far)
{
- if ((esr >> ESR_ELx_EC_SHIFT) == ESR_ELx_EC_DABT_CUR &&
- arm_ignore_data_abort) {
- arm_data_abort_occurred = 1;
- pt_regs->elr += 4;
- return;
+ const char *extra = NULL;
+
+ if ((esr >> ESR_ELx_EC_SHIFT) == ESR_ELx_EC_DABT_CUR) {
+ if (arm_ignore_data_abort) {
+ arm_data_abort_occurred = 1;
+ pt_regs->elr += 4;
+ return;
+ }
+
+ extra = data_abort_reason(far);
}
- printf("%s exception (ESR 0x%08x) at 0x%016lx\n", esr_get_class_string(esr),
- esr, far);
+ printf("%s%s exception (ESR 0x%08x) at 0x%016lx\n", extra ?: "",
+ esr_get_class_string(esr), esr, far);
do_exception(pt_regs);
}
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index c5d64aa88b..07b2250677 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -475,11 +475,27 @@ static void create_zero_page(void)
pr_debug("Created zero page\n");
}
+static void create_guard_page(void)
+{
+ ulong guard_page;
+
+ if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+ return;
+
+ guard_page = arm_mem_guard_page_get();
+ request_sdram_region("guard page", guard_page, PAGE_SIZE);
+ remap_range((void *)guard_page, PAGE_SIZE, MAP_FAULT);
+
+ pr_debug("Created guard page\n");
+}
+
/*
* Map vectors and zero page
*/
static void vectors_init(void)
{
+ create_guard_page();
+
/*
* First try to use the vectors where they actually are, works
* on ARMv7 and later.
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 63e7096322..aa6cd0ee79 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -204,6 +204,20 @@ static void mmu_enable(void)
set_cr(get_cr() | CR_M | CR_C | CR_I);
}
+static void create_guard_page(void)
+{
+ ulong guard_page;
+
+ if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+ return;
+
+ guard_page = arm_mem_guard_page_get();
+ request_sdram_region("guard page", guard_page, PAGE_SIZE);
+ remap_range((void *)guard_page, PAGE_SIZE, MAP_FAULT);
+
+ pr_debug("Created guard page\n");
+}
+
/*
* Prepare MMU for usage enable it.
*/
@@ -241,6 +255,7 @@ void __mmu_init(bool mmu_on)
/* Make zero page faulting to catch NULL pointer derefs */
zero_page_faulting();
+ create_guard_page();
}
void mmu_disable(void)