summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu/mmu_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu/mmu_64.c')
-rw-r--r--arch/arm/cpu/mmu_64.c222
1 files changed, 163 insertions, 59 deletions
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 98cd4c754e..f907421da9 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -1,20 +1,6 @@
-/*
- * Copyright (c) 2009-2013 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- * Copyright (c) 2016 Raphaël Poggi <poggi.raph@gmail.com>
- *
- * See file CREDITS for list of people who contributed to this
- * project.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2009-2013 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+// SPDX-FileCopyrightText: 2016 Raphaël Poggi <poggi.raph@gmail.com>
#define pr_fmt(fmt) "mmu: " fmt
@@ -24,6 +10,7 @@
#include <init.h>
#include <mmu.h>
#include <errno.h>
+#include <zero_page.h>
#include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/pgtable64.h>
@@ -32,10 +19,15 @@
#include <asm/cache.h>
#include <memory.h>
#include <asm/system_info.h>
+#include <linux/pagemap.h>
+#include <tee/optee.h>
#include "mmu_64.h"
-static uint64_t *ttb;
+static uint64_t *get_ttb(void)
+{
+ return (uint64_t *)get_ttbr(current_el());
+}
static void set_table(uint64_t *pt, uint64_t *table_addr)
{
@@ -45,7 +37,20 @@ static void set_table(uint64_t *pt, uint64_t *table_addr)
*pt = val;
}
-static uint64_t *create_table(void)
+#ifdef __PBL__
+static uint64_t *alloc_pte(void)
+{
+ static unsigned int idx;
+
+ idx++;
+
+ if (idx * GRANULE_SIZE >= ARM_EARLY_PAGETABLE_SIZE)
+ return NULL;
+
+ return (void *)get_ttb() + idx * GRANULE_SIZE;
+}
+#else
+static uint64_t *alloc_pte(void)
{
uint64_t *new_table = xmemalign(GRANULE_SIZE, GRANULE_SIZE);
@@ -54,6 +59,7 @@ static uint64_t *create_table(void)
return new_table;
}
+#endif
static __maybe_unused uint64_t *find_pte(uint64_t addr)
{
@@ -62,7 +68,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
uint64_t idx;
int i;
- pte = ttb;
+ pte = get_ttb();
for (i = 0; i < 4; i++) {
block_shift = level2shift(i);
@@ -94,7 +100,10 @@ static void split_block(uint64_t *pte, int level)
/* level describes the parent level, we need the child ones */
levelshift = level2shift(level + 1);
- new_table = create_table();
+ new_table = alloc_pte();
+ if (!new_table)
+ panic("Unable to allocate PTE\n");
+
for (i = 0; i < MAX_PTE_ENTRIES; i++) {
new_table[i] = old_pte | (i << levelshift);
@@ -111,6 +120,7 @@ static void split_block(uint64_t *pte, int level)
static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
uint64_t attr)
{
+ uint64_t *ttb = get_ttb();
uint64_t block_size;
uint64_t block_shift;
uint64_t *pte;
@@ -120,13 +130,12 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
uint64_t type;
int level;
- if (!ttb)
- arm_mmu_not_initialized_error();
-
addr = virt;
attr &= ~PTE_TYPE_MASK;
+ size = PAGE_ALIGN(size);
+
while (size) {
table = ttb;
for (level = 0; level < 4; level++) {
@@ -136,7 +145,8 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
pte = table + idx;
- if (size >= block_size && IS_ALIGNED(addr, block_size)) {
+ if (size >= block_size && IS_ALIGNED(addr, block_size) &&
+ IS_ALIGNED(phys, block_size)) {
type = (level == 3) ?
PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
*pte = phys | attr | type;
@@ -156,21 +166,42 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
tlb_invalidate();
}
-int arch_remap_range(void *_start, size_t size, unsigned flags)
+static unsigned long get_pte_attrs(unsigned flags)
{
switch (flags) {
case MAP_CACHED:
- flags = CACHED_MEM;
- break;
+ return CACHED_MEM;
case MAP_UNCACHED:
- flags = UNCACHED_MEM;
- break;
+ return attrs_uncached_mem();
+ case MAP_FAULT:
+ return 0x0;
default:
- return -EINVAL;
+ return ~0UL;
}
+}
+
+static void early_remap_range(uint64_t addr, size_t size, unsigned flags)
+{
+ unsigned long attrs = get_pte_attrs(flags);
+
+ if (WARN_ON(attrs == ~0UL))
+ return;
+
+ create_sections(addr, addr, size, attrs);
+}
+
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
+{
+ unsigned long attrs = get_pte_attrs(flags);
+
+ if (attrs == ~0UL)
+ return -EINVAL;
+
+ create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
+
+ if (flags == MAP_UNCACHED)
+ dma_inv_range(virt_addr, size);
- create_sections((uint64_t)_start, (uint64_t)_start, (uint64_t)size,
- flags);
return 0;
}
@@ -180,35 +211,57 @@ static void mmu_enable(void)
set_cr(get_cr() | CR_M | CR_C | CR_I);
}
+static void create_guard_page(void)
+{
+ ulong guard_page;
+
+ if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+ return;
+
+ guard_page = arm_mem_guard_page_get();
+ request_sdram_region("guard page", guard_page, PAGE_SIZE);
+ remap_range((void *)guard_page, PAGE_SIZE, MAP_FAULT);
+
+ pr_debug("Created guard page\n");
+}
+
/*
* Prepare MMU for usage enable it.
*/
void __mmu_init(bool mmu_on)
{
+ uint64_t *ttb = get_ttb();
struct memory_bank *bank;
- unsigned int el;
-
- if (mmu_on)
- mmu_disable();
-
- ttb = create_table();
- el = current_el();
- set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el, BITS_PER_VA),
- MEMORY_ATTRIBUTES);
- pr_debug("ttb: 0x%p\n", ttb);
-
- /* create a flat mapping */
- create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
+ if (!request_sdram_region("ttb", (unsigned long)ttb,
+ ARM_EARLY_PAGETABLE_SIZE))
+ /*
+ * This can mean that:
+ * - the early MMU code has put the ttb into a place
+ * which we don't have inside our available memory
+ * - Somebody else has occupied the ttb region which means
+ * the ttb will get corrupted.
+ */
+ pr_crit("Can't request SDRAM region for ttb at %p\n", ttb);
+
+ for_each_memory_bank(bank) {
+ struct resource *rsv;
+ resource_size_t pos;
+
+ pos = bank->start;
+
+ /* Skip reserved regions */
+ for_each_reserved_region(bank, rsv) {
+ remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+ pos = rsv->end + 1;
+ }
- /* Map sdram cached. */
- for_each_memory_bank(bank)
- create_sections(bank->start, bank->start, bank->size, CACHED_MEM);
+ remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED);
+ }
/* Make zero page faulting to catch NULL pointer derefs */
- create_sections(0x0, 0x0, 0x1000, 0x0);
-
- mmu_enable();
+ zero_page_faulting();
+ create_guard_page();
}
void mmu_disable(void)
@@ -242,15 +295,66 @@ void dma_flush_range(void *ptr, size_t size)
v8_flush_dcache_range(start, end);
}
-void dma_sync_single_for_device(dma_addr_t address, size_t size,
- enum dma_data_direction dir)
+static void init_range(size_t total_level0_tables)
+{
+ uint64_t *ttb = get_ttb();
+ uint64_t addr = 0;
+
+ while (total_level0_tables--) {
+ early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED);
+ split_block(ttb, 0);
+ addr += L0_XLAT_SIZE;
+ ttb++;
+ }
+}
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
{
+ int el;
+ u64 optee_membase;
+ unsigned long ttb = arm_mem_ttb(membase + memsize);
+
+ if (get_cr() & CR_M)
+ return;
+
+ pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
+
+ el = current_el();
+ set_ttbr_tcr_mair(el, ttb, calc_tcr(el, BITS_PER_VA), MEMORY_ATTRIBUTES);
+ if (el == 3)
+ set_ttbr_tcr_mair(2, ttb, calc_tcr(2, BITS_PER_VA), MEMORY_ATTRIBUTES);
+
+ memset((void *)ttb, 0, GRANULE_SIZE);
+
/*
- * FIXME: This function needs a device argument to support non 1:1 mappings
+ * Assume maximum BITS_PER_PA set to 40 bits.
+ * Set 1:1 mapping of VA->PA. So to cover the full 1TB range we need 2 tables.
*/
+ init_range(2);
+
+ early_remap_range(membase, memsize, MAP_CACHED);
+
+ if (optee_get_membase(&optee_membase))
+ optee_membase = membase + memsize - OPTEE_SIZE;
- if (dir == DMA_FROM_DEVICE)
- v8_inv_dcache_range(address, address + size - 1);
- else
- v8_flush_dcache_range(address, address + size - 1);
+ early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
+
+ early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+
+ mmu_enable();
+}
+
+void mmu_early_disable(void)
+{
+ unsigned int cr;
+
+ cr = get_cr();
+ cr &= ~(CR_M | CR_C);
+
+ set_cr(cr);
+ v8_flush_dcache_all();
+ tlb_invalidate();
+
+ dsb();
+ isb();
}