diff options
author | Ahmad Fatoum <a.fatoum@pengutronix.de> | 2023-05-26 08:33:52 +0200 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2023-05-30 12:25:47 +0200 |
commit | 602fac6d016d3ab2625199c8d7ae51273a3fae23 (patch) | |
tree | ac53a48308905525676de906218730e841a8474f | |
parent | 4b08ca92e572cd63b355131d6f522ef72db31f84 (diff) | |
download | barebox-602fac6d016d3ab2625199c8d7ae51273a3fae23.tar.gz barebox-602fac6d016d3ab2625199c8d7ae51273a3fae23.tar.xz |
ARM: mmu64: define early_remap_range for mmu_early_enable usage
Adding a new dma_inv_range/dma_flush_range into arch_remap_range before
MMU is enabled hangs, so let's define a new early_remap_range that should
always be safe to call while MMU is disabled. This is to prepare doing
cache maintenance in regular arch_remap_range in a later commit.
No functional change.
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
Link: https://lore.barebox.org/20230526063354.1145474-2-a.fatoum@pengutronix.de
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r-- | arch/arm/cpu/mmu_64.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c index 1d5a5355c6..940e0e914c 100644 --- a/arch/arm/cpu/mmu_64.c +++ b/arch/arm/cpu/mmu_64.c @@ -159,23 +159,36 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size, tlb_invalidate(); } -int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags) +static unsigned long get_pte_attrs(unsigned flags) { - unsigned long attrs; - switch (flags) { case MAP_CACHED: - attrs = CACHED_MEM; - break; + return CACHED_MEM; case MAP_UNCACHED: - attrs = attrs_uncached_mem(); - break; + return attrs_uncached_mem(); case MAP_FAULT: - attrs = 0x0; - break; + return 0x0; default: - return -EINVAL; + return ~0UL; } +} + +static void early_remap_range(uint64_t addr, size_t size, unsigned flags) +{ + unsigned long attrs = get_pte_attrs(flags); + + if (WARN_ON(attrs == ~0UL)) + return; + + create_sections(addr, addr, size, attrs); +} + +int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags) +{ + unsigned long attrs = get_pte_attrs(flags); + + if (attrs == ~0UL) + return -EINVAL; create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs); return 0; @@ -269,9 +282,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize) memset((void *)ttb, 0, GRANULE_SIZE); - remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED); - remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); - remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT); + early_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED); + early_remap_range(membase, memsize - OPTEE_SIZE, MAP_CACHED); + early_remap_range(membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT); mmu_enable(); } |