summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2023-12-05 09:12:47 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2023-12-05 09:24:23 +0100
commitfc5a57f7f9c03c2426c1371be175fa5fd4434813 (patch)
tree7af621577f305200378dd06f1b8f2e51714a489d /arch/arm/cpu
parent77c8120e12d5e8923078eb5e618ebf329a30d5a3 (diff)
downloadbarebox-fc5a57f7f9c03c2426c1371be175fa5fd4434813.tar.gz
barebox-fc5a57f7f9c03c2426c1371be175fa5fd4434813.tar.xz
ARM: mmu: align size of remapped region to page size
Currently, barebox ARM arch_remap_range() will hang in an infinite loop, when called with a size that's not aligned to a page boundary. Its Linux equivalent, ioremap(), will just rounds up to page size and work correctly. Adopt the Linux behavior to make porting code easier, e.g. when calling devm_ioremap(). The only other arch_remap_range() in barebox, is PowerPC's. That one wouldn't loop indefinitely if the size isn't page aligned, so nothing to do there. Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20231205081247.4148947-1-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/mmu_32.c3
-rw-r--r--arch/arm/cpu/mmu_64.c3
2 files changed, 6 insertions, 0 deletions
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index d0ada5866f..c6eecf9c9d 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -18,6 +18,7 @@
#include <memory.h>
#include <asm/system_info.h>
#include <asm/sections.h>
+#include <linux/pagemap.h>
#include "mmu_32.h"
@@ -253,6 +254,8 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
pte_flags = get_pte_flags(map_type);
pmd_flags = pte_flags_to_pmd(pte_flags);
+ size = PAGE_ALIGN(size);
+
while (size) {
const bool pgdir_size_aligned = IS_ALIGNED(virt_addr, PGDIR_SIZE);
u32 *pgd = (u32 *)&ttb[pgd_index(virt_addr)];
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 12c4dc90b3..c6ea63e655 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -19,6 +19,7 @@
#include <asm/cache.h>
#include <memory.h>
#include <asm/system_info.h>
+#include <linux/pagemap.h>
#include "mmu_64.h"
@@ -132,6 +133,8 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
attr &= ~PTE_TYPE_MASK;
+ size = PAGE_ALIGN(size);
+
while (size) {
table = ttb;
for (level = 0; level < 4; level++) {