summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2023-05-11 13:07:47 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2023-05-22 12:38:17 +0200
commitd7e6afbc836dd45745e8dd0b3524d8639f33a8a2 (patch)
treed58722a51883dd15adff141ab9daddbda0c13650 /arch/arm/cpu
parent69b4886e341e59d56e4ae95b9f3fe8f1dd2e978b (diff)
downloadbarebox-d7e6afbc836dd45745e8dd0b3524d8639f33a8a2.tar.gz
barebox-d7e6afbc836dd45745e8dd0b3524d8639f33a8a2.tar.xz
ARM: mmu32: Use pages for early MMU setup
Up to now we use 1MiB sections to setup the page tables in PBL. There are two places where this leads to problems. First is OP-TEE, we have to map the OP-TEE area with PTE_EXT_XN to prevent the instruction prefetcher from speculating into that area. With the current section mapping we have to align OPTEE_SIZE to 1MiB boundaries. The second problem comes with SRAM where the PBL might be running. This SRAM has to be mapped executable, but at the same time we should map the surrounding areas non executable which is not always possible with 1MiB mapping granularity. We now have everything in place to use two level page tables from PBL, so use arch_remap_range() for the problematic cases. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/mmu_32.c29
1 files changed, 5 insertions, 24 deletions
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 785b20c7fd..cc5c92d716 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -111,6 +111,7 @@ void dma_flush_range(void *ptr, size_t size)
unsigned long end = start + size;
__dma_flush_range(start, end);
+
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
@@ -122,6 +123,7 @@ void dma_inv_range(void *ptr, size_t size)
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
+
__dma_inv_range(start, end);
}
@@ -542,16 +544,6 @@ void *dma_alloc_writecombine(size_t size, dma_addr_t *dma_handle)
return dma_alloc_map(size, dma_handle, ARCH_MAP_WRITECOMBINE);
}
-static inline void map_region(unsigned long start, unsigned long size,
- uint64_t flags)
-
-{
- start = ALIGN_DOWN(start, SZ_1M);
- size = ALIGN(size, SZ_1M);
-
- create_sections(start, start + size - 1, flags);
-}
-
void mmu_early_enable(unsigned long membase, unsigned long memsize)
{
uint32_t *ttb = (uint32_t *)arm_mem_ttb(membase + memsize);
@@ -572,21 +564,10 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
*/
create_flat_mapping();
- /*
- * There can be SoCs that have a section shared between device memory
- * and the on-chip RAM hosting the PBL. Thus mark this section
- * uncachable, but executable.
- * On such SoCs, executing from OCRAM could cause the instruction
- * prefetcher to speculatively access that device memory, triggering
- * potential errant behavior.
- *
- * If your SoC has such a memory layout, you should rewrite the code
- * here to map the OCRAM page-wise.
- */
- map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
-
/* maps main memory as cachable */
- map_region(membase, memsize - OPTEE_SIZE, PMD_SECT_DEF_CACHED);
+ arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
+ arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
+ arch_remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
__mmu_cache_on();
}