summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2023-05-26 08:33:53 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2023-05-30 12:25:47 +0200
commitb0ff5f14e7063c5e72222ce6dc6b6d9cfda2eba7 (patch)
tree546a3fade4e6d4de8fb73c1989ad7d2de4da5c31
parent602fac6d016d3ab2625199c8d7ae51273a3fae23 (diff)
downloadbarebox-b0ff5f14e7063c5e72222ce6dc6b6d9cfda2eba7.tar.gz
barebox-b0ff5f14e7063c5e72222ce6dc6b6d9cfda2eba7.tar.xz
ARM: mmu32: define early_remap_range for mmu_early_enable usage
Like done for ARM64, define early_remap_range, which should always be safe to call while MMU is disabled. This is to prepare doing cache maintenance in regular arch_remap_range. No functional change. Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20230526063354.1145474-3-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r--arch/arm/cpu/mmu_32.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 1c59225934..a324ebf71a 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -241,7 +241,7 @@ static uint32_t get_pmd_flags(int map_type)
return pte_flags_to_pmd(get_pte_flags(map_type));
}
-int arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
+static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
{
u32 virt_addr = (u32)_virt_addr;
u32 pte_flags, pmd_flags;
@@ -318,6 +318,15 @@ int arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsig
}
tlb_invalidate();
+}
+static void early_remap_range(u32 addr, size_t size, unsigned map_type)
+{
+ __arch_remap_range((void *)addr, addr, size, map_type);
+}
+
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
+{
+ __arch_remap_range(virt_addr, phys_addr, size, map_type);
return 0;
}
@@ -580,9 +589,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
create_flat_mapping();
/* maps main memory as cachable */
- remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
- remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
- remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+ early_remap_range(membase, memsize - OPTEE_SIZE, MAP_CACHED);
+ early_remap_range(membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
+ early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
__mmu_cache_on();
}