summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2019-11-25 15:27:43 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2019-11-27 12:34:06 +0100
commitafdb72097b4f3bffcf88ede74bcdeb4b868dd9c7 (patch)
tree42e725db458995388720815dad7c2a1ec66ab2b4 /arch/arm/cpu
parent09177059593f77716ad6cd570b392ed0be9b30e4 (diff)
downloadbarebox-afdb72097b4f3bffcf88ede74bcdeb4b868dd9c7.tar.gz
barebox-afdb72097b4f3bffcf88ede74bcdeb4b868dd9c7.tar.xz
ARM64: Switch to 4 level page tables
3 level page tables only allow to resolve 39 bit addresses. Switch to 4 level page tables to add support for bigger physical address ranges. This is needed for example on Layerscape SoCs where the PCI windows are outside the 39bit range. The early MMU support still uses 39bit addressing. We only use a single level page table in early MMU support and with 48bit addresses we wouldn't have enough granularity to map the SDRAM differently then the rest of the address space. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/mmu-early_64.c6
-rw-r--r--arch/arm/cpu/mmu_64.c7
-rw-r--r--arch/arm/cpu/mmu_64.h9
3 files changed, 13 insertions, 9 deletions
diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c
index f07d107e0d..94e372637a 100644
--- a/arch/arm/cpu/mmu-early_64.c
+++ b/arch/arm/cpu/mmu-early_64.c
@@ -48,6 +48,8 @@ static void create_sections(void *ttb, uint64_t virt, uint64_t phys,
}
}
+#define EARLY_BITS_PER_VA 39
+
void mmu_early_enable(unsigned long membase, unsigned long memsize,
unsigned long ttb)
{
@@ -64,8 +66,8 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize,
memset((void *)ttb, 0, GRANULE_SIZE);
el = current_el();
- set_ttbr_tcr_mair(el, ttb, calc_tcr(el), MEMORY_ATTRIBUTES);
- create_sections((void *)ttb, 0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
+ set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
+ create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1), UNCACHED_MEM);
create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
tlb_invalidate();
isb();
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index b45a69661e..f7a13014af 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -64,7 +64,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
pte = ttb;
- for (i = 1; i < 4; i++) {
+ for (i = 0; i < 4; i++) {
block_shift = level2shift(i);
idx = (addr & level2mask(i)) >> block_shift;
pte += idx;
@@ -129,7 +129,7 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
while (size) {
table = ttb;
- for (level = 1; level < 4; level++) {
+ for (level = 0; level < 4; level++) {
block_shift = level2shift(level);
idx = (addr & level2mask(level)) >> block_shift;
block_size = (1ULL << block_shift);
@@ -193,7 +193,8 @@ void __mmu_init(bool mmu_on)
ttb = create_table();
el = current_el();
- set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el), MEMORY_ATTRIBUTES);
+ set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el, BITS_PER_VA),
+ MEMORY_ATTRIBUTES);
pr_debug("ttb: 0x%p\n", ttb);
diff --git a/arch/arm/cpu/mmu_64.h b/arch/arm/cpu/mmu_64.h
index e2e125686d..a2a5477569 100644
--- a/arch/arm/cpu/mmu_64.h
+++ b/arch/arm/cpu/mmu_64.h
@@ -75,7 +75,9 @@ static inline uint64_t level2mask(int level)
{
uint64_t mask = -EINVAL;
- if (level == 1)
+ if (level == 0)
+ mask = L0_ADDR_MASK;
+ else if (level == 1)
mask = L1_ADDR_MASK;
else if (level == 2)
mask = L2_ADDR_MASK;
@@ -85,13 +87,12 @@ static inline uint64_t level2mask(int level)
return mask;
}
-static inline uint64_t calc_tcr(int el)
+static inline uint64_t calc_tcr(int el, int va_bits)
{
- u64 ips, va_bits;
+ u64 ips;
u64 tcr;
ips = 2;
- va_bits = BITS_PER_VA;
if (el == 1)
tcr = (ips << 32) | TCR_EPD1_DISABLE;