summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2022-08-17 13:42:40 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2022-08-18 14:39:06 +0200
commit66e233b8c04b82e34b26e22890c196c0b0ab1fb0 (patch)
treee82b938347ed2dc5d70f43a7df0d1fb1483a2697 /arch
parentd0b5f6bde15b4737f27f21f6cdb5d6b091acecd8 (diff)
downloadbarebox-66e233b8c04b82e34b26e22890c196c0b0ab1fb0.tar.gz
barebox-66e233b8c04b82e34b26e22890c196c0b0ab1fb0.tar.xz
ARM: mmu64: map reserved regions uncached
Now that we have reserved memory regions specially marked in the SDRAM bank requests, we can use this information to map these regions uncached and eXecute Never to avoid speculative access into these regions from causing hard-to-debug data aborts. The sequence used here is safe because __mmu_init turns off the MMU if enabled, so even if we overwrite early MMU uncached entries they are without effect until the MMU is turned on again, by which time the for_each_reserved_region should have disabled caching for them again (e.g. because they were listed in DT as /reserve-memory). Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20220817114244.1810531-7-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/cpu/mmu-common.h15
-rw-r--r--arch/arm/cpu/mmu_64.c10
2 files changed, 24 insertions, 1 deletions
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index ed7d5bc316..c9ea2c1228 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -3,6 +3,11 @@
#ifndef __ARM_MMU_COMMON_H
#define __ARM_MMU_COMMON_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
void dma_inv_range(void *ptr, size_t size);
void dma_flush_range(void *ptr, size_t size);
void *dma_alloc_map(size_t size, dma_addr_t *dma_handle, unsigned flags);
@@ -19,4 +24,14 @@ static inline void arm_mmu_not_initialized_error(void)
panic("MMU not initialized\n");
}
+static inline size_t resource_first_page(const struct resource *res)
+{
+ return ALIGN_DOWN(res->start, SZ_4K);
+}
+
+static inline size_t resource_count_pages(const struct resource *res)
+{
+ return ALIGN(resource_size(res), SZ_4K);
+}
+
#endif
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 06049e0003..f43ac9a121 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -201,9 +201,17 @@ void __mmu_init(bool mmu_on)
create_sections(0, 0, 1UL << (BITS_PER_VA - 1), attrs_uncached_mem());
/* Map sdram cached. */
- for_each_memory_bank(bank)
+ for_each_memory_bank(bank) {
+ struct resource *rsv;
+
create_sections(bank->start, bank->start, bank->size, CACHED_MEM);
+ for_each_reserved_region(bank, rsv) {
+ create_sections(resource_first_page(rsv), resource_first_page(rsv),
+ resource_count_pages(rsv), attrs_uncached_mem());
+ }
+ }
+
/* Make zero page faulting to catch NULL pointer derefs */
zero_page_faulting();