summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2022-09-14 15:52:49 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2022-09-14 15:52:49 +0200
commit36b901f5a62ff334fb7768aca644fe11f1efbc10 (patch)
treec13435f554963aa346411380abc30a91a94dd01d /arch
parenta9a07aa1cd94b30335e0c82c8d36e9bc7d784e49 (diff)
parent880c6b5e6f26c674821172bc4e982796e7dd5f0e (diff)
downloadbarebox-36b901f5a62ff334fb7768aca644fe11f1efbc10.tar.gz
barebox-36b901f5a62ff334fb7768aca644fe11f1efbc10.tar.xz
Merge branch 'for-next/arm'
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/cpu/mmu-common.h15
-rw-r--r--arch/arm/cpu/mmu.h9
-rw-r--r--arch/arm/cpu/mmu_64.c10
-rw-r--r--arch/arm/cpu/start.c2
-rw-r--r--arch/arm/cpu/uncompress.c2
5 files changed, 33 insertions, 5 deletions
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index ed7d5bc316..c9ea2c1228 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -3,6 +3,11 @@
#ifndef __ARM_MMU_COMMON_H
#define __ARM_MMU_COMMON_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
void dma_inv_range(void *ptr, size_t size);
void dma_flush_range(void *ptr, size_t size);
void *dma_alloc_map(size_t size, dma_addr_t *dma_handle, unsigned flags);
@@ -19,4 +24,14 @@ static inline void arm_mmu_not_initialized_error(void)
panic("MMU not initialized\n");
}
+static inline size_t resource_first_page(const struct resource *res)
+{
+ return ALIGN_DOWN(res->start, SZ_4K);
+}
+
+static inline size_t resource_count_pages(const struct resource *res)
+{
+ return ALIGN(resource_size(res), SZ_4K);
+}
+
#endif
diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu.h
index d48522d166..1499b70dd6 100644
--- a/arch/arm/cpu/mmu.h
+++ b/arch/arm/cpu/mmu.h
@@ -73,15 +73,20 @@ create_sections(uint32_t *ttb, unsigned long first,
#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT)
#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
-static inline void create_flat_mapping(uint32_t *ttb)
+static inline unsigned long attrs_uncached_mem(void)
{
unsigned int flags = PMD_SECT_DEF_UNCACHED;
if (cpu_architecture() >= CPU_ARCH_ARMv7)
flags |= PMD_SECT_XN;
+ return flags;
+}
+
+static inline void create_flat_mapping(uint32_t *ttb)
+{
/* create a flat mapping using 1MiB sections */
- create_sections(ttb, 0, 0xffffffff, flags);
+ create_sections(ttb, 0, 0xffffffff, attrs_uncached_mem());
}
#endif /* __ARM_MMU_H */
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 06049e0003..f43ac9a121 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -201,9 +201,17 @@ void __mmu_init(bool mmu_on)
create_sections(0, 0, 1UL << (BITS_PER_VA - 1), attrs_uncached_mem());
/* Map sdram cached. */
- for_each_memory_bank(bank)
+ for_each_memory_bank(bank) {
+ struct resource *rsv;
+
create_sections(bank->start, bank->start, bank->size, CACHED_MEM);
+ for_each_reserved_region(bank, rsv) {
+ create_sections(resource_first_page(rsv), resource_first_page(rsv),
+ resource_count_pages(rsv), attrs_uncached_mem());
+ }
+ }
+
/* Make zero page faulting to catch NULL pointer derefs */
zero_page_faulting();
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index 5861c15d43..672f26e006 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -171,7 +171,7 @@ __noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membas
} else {
pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
arm_early_mmu_cache_invalidate();
- mmu_early_enable(membase, memsize, ttb);
+ mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
}
}
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 2250b8ccd3..537ee63229 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -84,7 +84,7 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
if (IS_ENABLED(CONFIG_MMU_EARLY)) {
unsigned long ttb = arm_mem_ttb(membase, endmem);
pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
- mmu_early_enable(membase, memsize, ttb);
+ mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
}
free_mem_ptr = arm_mem_early_malloc(membase, endmem);