diff options
author | Ahmad Fatoum <a.fatoum@pengutronix.de> | 2023-05-22 07:28:25 +0200 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2023-05-23 09:24:52 +0200 |
commit | 9845563450b88c77710c8b7e19d98b140cb542c4 (patch) | |
tree | 9c363329dc6f441c8eccf2f1ffc82d1873afeb89 | |
parent | b53744ffe333db8ee426fe7e5e93eb82a16bd9a9 (diff) | |
download | barebox-9845563450b88c77710c8b7e19d98b140cb542c4.tar.gz barebox-9845563450b88c77710c8b7e19d98b140cb542c4.tar.xz |
treewide: use remap_range instead of arch_remap_range
The remapping in arch_remap_range is currently limited to attributes. In
a later commit, we'll start supporting non-1:1 remappings. We'll keep
remap_range as is for 1:1, so as preparation, let's switch all
arch_remap_range users that want 1:1 remappings to remap_range.
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
Link: https://lore.barebox.org/20230522052835.1039143-2-a.fatoum@pengutronix.de
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r-- | arch/arm/cpu/mmu-common.c | 8 | ||||
-rw-r--r-- | arch/arm/cpu/mmu_32.c | 12 | ||||
-rw-r--r-- | arch/arm/cpu/mmu_64.c | 14 | ||||
-rw-r--r-- | drivers/hab/habv4.c | 2 |
4 files changed, 18 insertions, 18 deletions
diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c index 02f512c2c6..5208db21ec 100644 --- a/arch/arm/cpu/mmu-common.c +++ b/arch/arm/cpu/mmu-common.c @@ -36,7 +36,7 @@ void *dma_alloc_map(size_t size, dma_addr_t *dma_handle, unsigned flags) memset(ret, 0, size); dma_flush_range(ret, size); - arch_remap_range(ret, size, flags); + remap_range(ret, size, flags); return ret; } @@ -53,19 +53,19 @@ void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle) void dma_free_coherent(void *mem, dma_addr_t dma_handle, size_t size) { size = PAGE_ALIGN(size); - arch_remap_range(mem, size, MAP_CACHED); + remap_range(mem, size, MAP_CACHED); free(mem); } void zero_page_access(void) { - arch_remap_range(0x0, PAGE_SIZE, MAP_CACHED); + remap_range(0x0, PAGE_SIZE, MAP_CACHED); } void zero_page_faulting(void) { - arch_remap_range(0x0, PAGE_SIZE, MAP_FAULT); + remap_range(0x0, PAGE_SIZE, MAP_FAULT); } static int mmu_init(void) diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c index 057458bf9a..6572a28d25 100644 --- a/arch/arm/cpu/mmu_32.c +++ b/arch/arm/cpu/mmu_32.c @@ -532,12 +532,12 @@ void __mmu_init(bool mmu_on) pos = bank->start; for_each_reserved_region(bank, rsv) { - arch_remap_range((void *)rsv->start, resource_size(rsv), MAP_UNCACHED); - arch_remap_range((void *)pos, rsv->start - pos, MAP_CACHED); + remap_range((void *)rsv->start, resource_size(rsv), MAP_UNCACHED); + remap_range((void *)pos, rsv->start - pos, MAP_CACHED); pos = rsv->end + 1; } - arch_remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED); + remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED); } } @@ -580,9 +580,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize) create_flat_mapping(); /* maps main memory as cachable */ - arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); - arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED); - arch_remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED); + remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); + remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED); + remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED); __mmu_cache_on(); } diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c index 8728c41d99..ef67a1c3a4 100644 --- a/arch/arm/cpu/mmu_64.c +++ b/arch/arm/cpu/mmu_64.c @@ -201,13 +201,13 @@ void __mmu_init(bool mmu_on) pos = bank->start; for_each_reserved_region(bank, rsv) { - arch_remap_range((void *)resource_first_page(rsv), - resource_count_pages(rsv), MAP_UNCACHED); - arch_remap_range((void *)pos, rsv->start - pos, MAP_CACHED); + remap_range((void *)resource_first_page(rsv), + resource_count_pages(rsv), MAP_UNCACHED); + remap_range((void *)pos, rsv->start - pos, MAP_CACHED); pos = rsv->end + 1; } - arch_remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED); + remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED); } /* Make zero page faulting to catch NULL pointer derefs */ @@ -257,9 +257,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize) memset((void *)ttb, 0, GRANULE_SIZE); - arch_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED); - arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); - arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT); + remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED); + remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED); + remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT); mmu_enable(); } diff --git a/drivers/hab/habv4.c b/drivers/hab/habv4.c index e8c7d3264d..b10c92ec76 100644 --- a/drivers/hab/habv4.c +++ b/drivers/hab/habv4.c @@ -619,7 +619,7 @@ static int init_imx6_hab_get_status(void) /* can happen in multi-image builds and is not an error */ return 0; - arch_remap_range(0x0, SZ_1M, MAP_CACHED); + remap_range(0x0, SZ_1M, MAP_CACHED); /* * Nobody will check the return value if there were HAB errors, but the |