summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/barebox/virtual-reg.rst29
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/boards/raspberry-pi/lowlevel.c26
-rw-r--r--arch/arm/configs/multi_v8_defconfig1
-rw-r--r--arch/arm/cpu/Kconfig3
-rw-r--r--arch/arm/cpu/Makefile23
-rw-r--r--arch/arm/cpu/cache_32.c (renamed from arch/arm/cpu/cache.c)85
-rw-r--r--arch/arm/cpu/cache_64.c5
-rw-r--r--arch/arm/cpu/cpu.c2
-rw-r--r--arch/arm/cpu/dma_32.c20
-rw-r--r--arch/arm/cpu/dma_64.c16
-rw-r--r--arch/arm/cpu/entry.c2
-rw-r--r--arch/arm/cpu/entry_ll_32.S (renamed from arch/arm/cpu/entry_ll.S)0
-rw-r--r--arch/arm/cpu/exceptions_32.S (renamed from arch/arm/cpu/exceptions.S)0
-rw-r--r--arch/arm/cpu/interrupts_32.c (renamed from arch/arm/cpu/interrupts.c)0
-rw-r--r--arch/arm/cpu/lowlevel_32.S (renamed from arch/arm/cpu/lowlevel.S)0
-rw-r--r--arch/arm/cpu/mmu-common.c17
-rw-r--r--arch/arm/cpu/mmu-early.c71
-rw-r--r--arch/arm/cpu/mmu-early_64.c93
-rw-r--r--arch/arm/cpu/mmu_32.c (renamed from arch/arm/cpu/mmu.c)343
-rw-r--r--arch/arm/cpu/mmu_32.h (renamed from arch/arm/cpu/mmu.h)20
-rw-r--r--arch/arm/cpu/mmu_64.c119
-rw-r--r--arch/arm/cpu/mmuinfo.c122
-rw-r--r--arch/arm/cpu/mmuinfo_32.c80
-rw-r--r--arch/arm/cpu/mmuinfo_64.c215
-rw-r--r--arch/arm/cpu/setupc_32.S (renamed from arch/arm/cpu/setupc.S)0
-rw-r--r--arch/arm/cpu/sm.c3
-rw-r--r--arch/arm/cpu/smccc-call_32.S (renamed from arch/arm/cpu/smccc-call.S)0
-rw-r--r--arch/arm/cpu/start.c21
-rw-r--r--arch/arm/cpu/uncompress.c11
-rw-r--r--arch/arm/include/asm/barebox-arm.h60
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/mmu.h13
-rw-r--r--arch/arm/include/asm/mmuinfo.h9
-rw-r--r--arch/arm/include/asm/sysreg.h76
-rw-r--r--arch/arm/mach-imx/atf.c21
-rw-r--r--arch/arm/mach-imx/xload-common.c2
-rw-r--r--arch/powerpc/cpu-85xx/mmu.c7
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--commands/Kconfig8
-rw-r--r--commands/memtest.c5
-rw-r--r--common/Kconfig13
-rw-r--r--common/Makefile2
-rw-r--r--common/boards/qemu-virt/Makefile3
-rw-r--r--common/boards/qemu-virt/overlay-of-flash.dts5
-rw-r--r--common/memtest.c44
-rw-r--r--drivers/hab/habv4.c10
-rw-r--r--drivers/mtd/nor/cfi_flash.c5
-rw-r--r--drivers/of/platform.c20
-rw-r--r--include/mach/rockchip/bootrom.h2
-rw-r--r--include/memtest.h7
-rw-r--r--include/mmu.h20
-rw-r--r--include/zero_page.h12
-rw-r--r--test/self/Kconfig6
-rw-r--r--test/self/Makefile1
-rw-r--r--test/self/mmu.c233
56 files changed, 1291 insertions, 629 deletions
diff --git a/Documentation/devicetree/bindings/barebox/virtual-reg.rst b/Documentation/devicetree/bindings/barebox/virtual-reg.rst
new file mode 100644
index 0000000000..7d576d0cef
--- /dev/null
+++ b/Documentation/devicetree/bindings/barebox/virtual-reg.rst
@@ -0,0 +1,29 @@
+virtual-reg property
+====================
+
+The ``virtual-reg`` property provides a hint on the 32-bit virtual
+address mapping the first physical base address in the ``reg`` property.
+This is meant to allow the OS to use the boot firmware's virtual memory
+mapping to access device resources early in the kernel boot process.
+
+When barebox is compiled with ``CONFIG_MMU`` support and the
+implementation supports remapping, devices with ``virtual_reg`` will have
+all their resources remapped at the physical/virtual address offset calculated
+by subtracting ``virtual-reg`` from the first address in ``reg``.
+
+This is normally used to map I/O memory away from the zero page, so it
+can be used again to trap null pointer dereferences, while allowing
+full access to the device memory::
+
+.. code-block:: none
+
+ &{/soc} {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash@0 {
+ reg = <0 0x10000>;
+ virtual-reg = <0x1000>;
+ /* => memory region remapped from [0x1000, 0x11000] to [0x0000, 0x10000] */
+ };
+ };
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a506f1e3a3..cb88c7b330 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -78,10 +78,13 @@ endif
ifeq ($(CONFIG_CPU_V8), y)
KBUILD_CPPFLAGS += $(CFLAGS_ABI) $(arch-y) $(tune-y)
KBUILD_AFLAGS += -include asm/unified.h
-export S64 = _64
+export S64_32 = 64
+export S64 = 64
else
KBUILD_CPPFLAGS += $(CFLAGS_ABI) $(arch-y) $(tune-y) $(CFLAGS_THUMB2)
KBUILD_AFLAGS += -include asm/unified.h -msoft-float $(AFLAGS_THUMB2)
+export S64_32 = 32
+export S32 = 32
endif
# Machine directory name. This list is sorted alphanumerically
diff --git a/arch/arm/boards/raspberry-pi/lowlevel.c b/arch/arm/boards/raspberry-pi/lowlevel.c
index 742f177dec..f8078dba67 100644
--- a/arch/arm/boards/raspberry-pi/lowlevel.c
+++ b/arch/arm/boards/raspberry-pi/lowlevel.c
@@ -37,31 +37,33 @@ static void copy_vc_fdt(void *dest, void *src, unsigned long max_size)
memmove(dest, src, size);
}
-/* A pointer to the FDT created by VideoCore was passed to us in x0/r2. We
- * reserve some memory just above the region used for Barebox and copy
- * this FDT there. We fetch it from there later in rpi_devices_init().
- */
-#define rpi_stack_top(memsize) \
- arm_mem_stack_top(BCM2835_SDRAM_BASE, BCM2835_SDRAM_BASE + memsize - VIDEOCORE_FDT_SZ)
-
static inline void start_raspberry_pi(unsigned long memsize, void *fdt,
void *vc_fdt)
{
- unsigned long endmem = rpi_stack_top(memsize);
+ unsigned long endmem;
+
+ /*
+ * A pointer to the FDT created by VideoCore was passed to us in x0/r2. We
+ * reserve some memory at the end of SDRAM copy this FDT there. We fetch it
+ * from there later in rpi_devices_init().
+ */
+ memsize -= VIDEOCORE_FDT_SZ;
+ endmem = BCM2835_SDRAM_BASE + memsize;
- copy_vc_fdt((void *)endmem, vc_fdt, VIDEOCORE_FDT_SZ);
+ /* leave SZ_1K for the initial stack */
+ copy_vc_fdt((void *)endmem, vc_fdt, VIDEOCORE_FDT_SZ - SZ_1K);
fdt += get_runtime_offset();
- barebox_arm_entry(BCM2835_SDRAM_BASE, endmem - BCM2835_SDRAM_BASE, fdt);
+ barebox_arm_entry(BCM2835_SDRAM_BASE, memsize, fdt);
}
#ifdef CONFIG_CPU_V8
#define RPI_ENTRY_FUNCTION(name, memsize, fdt) \
- ENTRY_FUNCTION_WITHSTACK(name, rpi_stack_top(memsize), fdt, __x1, __x2)
+ ENTRY_FUNCTION_WITHSTACK(name, BCM2835_SDRAM_BASE + (memsize), fdt, __x1, __x2)
#else
#define RPI_ENTRY_FUNCTION(name, memsize, fdt) \
- ENTRY_FUNCTION_WITHSTACK(name, rpi_stack_top(memsize), __r0, __r1, fdt)
+ ENTRY_FUNCTION_WITHSTACK(name, BCM2835_SDRAM_BASE + (memsize), __r0, __r1, fdt)
#endif
extern char __dtb_z_bcm2835_rpi_start[];
diff --git a/arch/arm/configs/multi_v8_defconfig b/arch/arm/configs/multi_v8_defconfig
index f9782a2b2a..3e15f28c09 100644
--- a/arch/arm/configs/multi_v8_defconfig
+++ b/arch/arm/configs/multi_v8_defconfig
@@ -54,6 +54,7 @@ CONFIG_LONGHELP=y
CONFIG_CMD_IOMEM=y
CONFIG_CMD_IMD=y
CONFIG_CMD_MEMINFO=y
+CONFIG_CMD_ARM_MMUINFO=y
CONFIG_CMD_REGULATOR=y
CONFIG_CMD_MMC_EXTCSD=y
CONFIG_CMD_GO=y
diff --git a/arch/arm/cpu/Kconfig b/arch/arm/cpu/Kconfig
index 26f07043fe..40dd35833a 100644
--- a/arch/arm/cpu/Kconfig
+++ b/arch/arm/cpu/Kconfig
@@ -11,6 +11,7 @@ config CPU_32
select HAVE_MOD_ARCH_SPECIFIC
select HAS_DMA
select HAVE_PBL_IMAGE
+ select ARCH_HAS_ZERO_PAGE
config CPU_64
bool
@@ -19,6 +20,7 @@ config CPU_64
select HAVE_PBL_MULTI_IMAGES
select HAS_DMA
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_HAS_ZERO_PAGE
# Select CPU types depending on the architecture selected. This selects
# which CPUs we support in the kernel image, and the compiler instruction
@@ -92,7 +94,6 @@ config CPU_V8
select ARM_EXCEPTIONS
select GENERIC_FIND_NEXT_BIT
select ARCH_HAS_STACK_DUMP
- select ARCH_HAS_ZERO_PAGE
config CPU_XSC3
bool
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index 7674c1464c..5baff2fad0 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -2,15 +2,16 @@
obj-y += cpu.o
-obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions$(S64).o interrupts$(S64).o
-obj-$(CONFIG_MMU) += mmu$(S64).o mmu-common.o
-obj-pbl-y += lowlevel$(S64).o
-obj-pbl-$(CONFIG_MMU) += mmu-early$(S64).o
+obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions_$(S64_32).o interrupts_$(S64_32).o
+obj-$(CONFIG_MMU) += mmu-common.o
+obj-pbl-$(CONFIG_MMU) += mmu_$(S64_32).o
+obj-$(CONFIG_MMU) += dma_$(S64_32).o
+obj-pbl-y += lowlevel_$(S64_32).o
obj-pbl-$(CONFIG_CPU_32v7) += hyp.o
AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
-obj-y += start.o entry.o entry_ll$(S64).o
+obj-y += start.o entry.o entry_ll_$(S64_32).o
KASAN_SANITIZE_start.o := n
pbl-$(CONFIG_CPU_64) += head_64.o
@@ -18,7 +19,7 @@ pbl-$(CONFIG_CPU_64) += head_64.o
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT) += board-dt-2nd.o
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT_AARCH64) += board-dt-2nd-aarch64.o
-obj-pbl-y += setupc$(S64).o cache$(S64).o
+obj-pbl-y += setupc_$(S64_32).o cache_$(S64_32).o
obj-$(CONFIG_ARM_PSCI_CLIENT) += psci-client.o
@@ -26,7 +27,7 @@ obj-$(CONFIG_ARM_PSCI_CLIENT) += psci-client.o
# Any variants can be called as start-armxyz.S
#
obj-$(CONFIG_CMD_ARM_CPUINFO) += cpuinfo.o
-obj-$(CONFIG_CMD_ARM_MMUINFO) += mmuinfo.o
+obj-$(CONFIG_MMUINFO) += mmuinfo.o mmuinfo_$(S64_32).o
obj-$(CONFIG_OFDEVICE) += dtb.o
ifeq ($(CONFIG_MMU),)
@@ -35,9 +36,9 @@ endif
obj-$(CONFIG_ARM_PSCI) += psci.o
obj-$(CONFIG_ARM_PSCI_OF) += psci-of.o
-obj-pbl-$(CONFIG_ARM_SMCCC) += smccc-call$(S64).o
-AFLAGS_smccc-call$(S64).o :=-Wa,-march=armv$(if $(S64),8,7)-a
-AFLAGS_smccc-call$(S64).pbl.o :=-Wa,-march=armv$(if $(S64),8,7)-a
+obj-pbl-$(CONFIG_ARM_SMCCC) += smccc-call_$(S64_32).o
+AFLAGS_smccc-call_$(S64_32).o :=-Wa,-march=armv$(if $(S64),8,7)-a
+AFLAGS_smccc-call_$(S64_32).pbl.o :=-Wa,-march=armv$(if $(S64),8,7)-a
obj-$(CONFIG_ARM_SECURE_MONITOR) += sm.o sm_as.o
AFLAGS_sm_as.o :=-Wa,-march=armv7-a
@@ -52,7 +53,7 @@ obj-pbl-$(CONFIG_CPU_64v8) += cache-armv8.o
AFLAGS_cache-armv8.o :=-Wa,-march=armv8-a
AFLAGS-cache-armv8.pbl.o :=-Wa,-march=armv8-a
-pbl-y += entry.o entry_ll$(S64).o
+pbl-y += entry.o entry_ll_$(S64_32).o
pbl-y += uncompress.o
pbl-$(CONFIG_ARM_ATF) += atf.o
diff --git a/arch/arm/cpu/cache.c b/arch/arm/cpu/cache_32.c
index 24a02c68f3..0ac50c4d9a 100644
--- a/arch/arm/cpu/cache.c
+++ b/arch/arm/cpu/cache_32.c
@@ -6,7 +6,7 @@
#include <asm/cache.h>
#include <asm/system_info.h>
-#include "mmu.h"
+#include "mmu_32.h"
struct cache_fns {
void (*dma_clean_range)(unsigned long start, unsigned long end);
@@ -17,8 +17,6 @@ struct cache_fns {
void (*mmu_cache_flush)(void);
};
-struct cache_fns *cache_fns;
-
#define DEFINE_CPU_FNS(arch) \
void arch##_dma_clean_range(unsigned long start, unsigned long end); \
void arch##_dma_flush_range(unsigned long start, unsigned long end); \
@@ -41,50 +39,13 @@ DEFINE_CPU_FNS(v5)
DEFINE_CPU_FNS(v6)
DEFINE_CPU_FNS(v7)
-void __dma_clean_range(unsigned long start, unsigned long end)
-{
- if (cache_fns)
- cache_fns->dma_clean_range(start, end);
-}
-
-void __dma_flush_range(unsigned long start, unsigned long end)
-{
- if (cache_fns)
- cache_fns->dma_flush_range(start, end);
-}
-
-void __dma_inv_range(unsigned long start, unsigned long end)
-{
- if (cache_fns)
- cache_fns->dma_inv_range(start, end);
-}
-
-#ifdef CONFIG_MMU
-
-void __mmu_cache_on(void)
-{
- if (cache_fns)
- cache_fns->mmu_cache_on();
-}
-
-void __mmu_cache_off(void)
+static struct cache_fns *cache_functions(void)
{
- if (cache_fns)
- cache_fns->mmu_cache_off();
-}
+ static struct cache_fns *cache_fns;
-void __mmu_cache_flush(void)
-{
if (cache_fns)
- cache_fns->mmu_cache_flush();
- if (outer_cache.flush_all)
- outer_cache.flush_all();
-}
-
-#endif
+ return cache_fns;
-int arm_set_cache_functions(void)
-{
switch (cpu_architecture()) {
#ifdef CONFIG_CPU_32v4T
case CPU_ARCH_ARMv4T:
@@ -113,9 +74,45 @@ int arm_set_cache_functions(void)
while(1);
}
- return 0;
+ return cache_fns;
+}
+
+void __dma_clean_range(unsigned long start, unsigned long end)
+{
+ cache_functions()->dma_clean_range(start, end);
+}
+
+void __dma_flush_range(unsigned long start, unsigned long end)
+{
+ cache_functions()->dma_flush_range(start, end);
+}
+
+void __dma_inv_range(unsigned long start, unsigned long end)
+{
+ cache_functions()->dma_inv_range(start, end);
+}
+
+#ifdef CONFIG_MMU
+
+void __mmu_cache_on(void)
+{
+ cache_functions()->mmu_cache_on();
+}
+
+void __mmu_cache_off(void)
+{
+ cache_functions()->mmu_cache_off();
}
+void __mmu_cache_flush(void)
+{
+ cache_functions()->mmu_cache_flush();
+ if (outer_cache.flush_all)
+ outer_cache.flush_all();
+}
+
+#endif
+
/*
* Early function to flush the caches. This is for use when the
* C environment is not yet fully initialized.
diff --git a/arch/arm/cpu/cache_64.c b/arch/arm/cpu/cache_64.c
index cb7bc0945c..3a30296128 100644
--- a/arch/arm/cpu/cache_64.c
+++ b/arch/arm/cpu/cache_64.c
@@ -6,11 +6,6 @@
#include <asm/cache.h>
#include <asm/system_info.h>
-int arm_set_cache_functions(void)
-{
- return 0;
-}
-
/*
* Early function to flush the caches. This is for use when the
* C environment is not yet fully initialized.
diff --git a/arch/arm/cpu/cpu.c b/arch/arm/cpu/cpu.c
index 5b79dd2a8f..cacd442b28 100644
--- a/arch/arm/cpu/cpu.c
+++ b/arch/arm/cpu/cpu.c
@@ -18,8 +18,6 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
-#include "mmu.h"
-
/**
* Enable processor's instruction cache
*/
diff --git a/arch/arm/cpu/dma_32.c b/arch/arm/cpu/dma_32.c
new file mode 100644
index 0000000000..a66aa26b9b
--- /dev/null
+++ b/arch/arm/cpu/dma_32.c
@@ -0,0 +1,20 @@
+#include <dma.h>
+#include <asm/mmu.h>
+
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
+ enum dma_data_direction dir)
+{
+ /*
+ * FIXME: This function needs a device argument to support non 1:1 mappings
+ */
+
+ if (dir == DMA_FROM_DEVICE) {
+ __dma_inv_range(address, address + size);
+ if (outer_cache.inv_range)
+ outer_cache.inv_range(address, address + size);
+ } else {
+ __dma_clean_range(address, address + size);
+ if (outer_cache.clean_range)
+ outer_cache.clean_range(address, address + size);
+ }
+}
diff --git a/arch/arm/cpu/dma_64.c b/arch/arm/cpu/dma_64.c
new file mode 100644
index 0000000000..b4ae736c9b
--- /dev/null
+++ b/arch/arm/cpu/dma_64.c
@@ -0,0 +1,16 @@
+#include <dma.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
+ enum dma_data_direction dir)
+{
+ /*
+ * FIXME: This function needs a device argument to support non 1:1 mappings
+ */
+
+ if (dir == DMA_FROM_DEVICE)
+ v8_inv_dcache_range(address, address + size - 1);
+ else
+ v8_flush_dcache_range(address, address + size - 1);
+}
diff --git a/arch/arm/cpu/entry.c b/arch/arm/cpu/entry.c
index b863af5757..dc264c8771 100644
--- a/arch/arm/cpu/entry.c
+++ b/arch/arm/cpu/entry.c
@@ -40,5 +40,5 @@ void NAKED __noreturn barebox_arm_entry(unsigned long membase,
unsigned long memsize, void *boarddata)
{
__barebox_arm_entry(membase, memsize, boarddata,
- arm_mem_stack_top(membase, membase + memsize));
+ arm_mem_stack_top(membase + memsize));
}
diff --git a/arch/arm/cpu/entry_ll.S b/arch/arm/cpu/entry_ll_32.S
index 2800174c45..2800174c45 100644
--- a/arch/arm/cpu/entry_ll.S
+++ b/arch/arm/cpu/entry_ll_32.S
diff --git a/arch/arm/cpu/exceptions.S b/arch/arm/cpu/exceptions_32.S
index 749c713aab..749c713aab 100644
--- a/arch/arm/cpu/exceptions.S
+++ b/arch/arm/cpu/exceptions_32.S
diff --git a/arch/arm/cpu/interrupts.c b/arch/arm/cpu/interrupts_32.c
index 5bc790a796..5bc790a796 100644
--- a/arch/arm/cpu/interrupts.c
+++ b/arch/arm/cpu/interrupts_32.c
diff --git a/arch/arm/cpu/lowlevel.S b/arch/arm/cpu/lowlevel_32.S
index 960a92b78c..960a92b78c 100644
--- a/arch/arm/cpu/lowlevel.S
+++ b/arch/arm/cpu/lowlevel_32.S
diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 488a189f1c..5208db21ec 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -11,7 +11,8 @@
#include <asm/system.h>
#include <asm/barebox-arm.h>
#include <memory.h>
-#include "mmu.h"
+#include <zero_page.h>
+#include "mmu-common.h"
void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
enum dma_data_direction dir)
@@ -35,7 +36,7 @@ void *dma_alloc_map(size_t size, dma_addr_t *dma_handle, unsigned flags)
memset(ret, 0, size);
dma_flush_range(ret, size);
- arch_remap_range(ret, size, flags);
+ remap_range(ret, size, flags);
return ret;
}
@@ -52,11 +53,21 @@ void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
void dma_free_coherent(void *mem, dma_addr_t dma_handle, size_t size)
{
size = PAGE_ALIGN(size);
- arch_remap_range(mem, size, MAP_CACHED);
+ remap_range(mem, size, MAP_CACHED);
free(mem);
}
+void zero_page_access(void)
+{
+ remap_range(0x0, PAGE_SIZE, MAP_CACHED);
+}
+
+void zero_page_faulting(void)
+{
+ remap_range(0x0, PAGE_SIZE, MAP_FAULT);
+}
+
static int mmu_init(void)
{
if (list_empty(&memory_banks)) {
diff --git a/arch/arm/cpu/mmu-early.c b/arch/arm/cpu/mmu-early.c
deleted file mode 100644
index 0d528b9b9c..0000000000
--- a/arch/arm/cpu/mmu-early.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <asm/mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <asm-generic/sections.h>
-
-#include "mmu.h"
-
-static uint32_t *ttb;
-
-static inline void map_region(unsigned long start, unsigned long size,
- uint64_t flags)
-
-{
- start = ALIGN_DOWN(start, SZ_1M);
- size = ALIGN(size, SZ_1M);
-
- create_sections(ttb, start, start + size - 1, flags);
-}
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long _ttb)
-{
- ttb = (uint32_t *)_ttb;
-
- arm_set_cache_functions();
-
- set_ttbr(ttb);
-
- /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
- if (cpu_architecture() >= CPU_ARCH_ARMv7)
- set_domain(DOMAIN_CLIENT);
- else
- set_domain(DOMAIN_MANAGER);
-
- /*
- * This marks the whole address space as uncachable as well as
- * unexecutable if possible
- */
- create_flat_mapping(ttb);
-
- /*
- * There can be SoCs that have a section shared between device memory
- * and the on-chip RAM hosting the PBL. Thus mark this section
- * uncachable, but executable.
- * On such SoCs, executing from OCRAM could cause the instruction
- * prefetcher to speculatively access that device memory, triggering
- * potential errant behavior.
- *
- * If your SoC has such a memory layout, you should rewrite the code
- * here to map the OCRAM page-wise.
- */
- map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
-
- /* maps main memory as cachable */
- map_region(membase, memsize, PMD_SECT_DEF_CACHED);
-
- /*
- * With HAB enabled we call into the ROM code later in imx6_hab_get_status().
- * Map the ROM cached which has the effect that the XN bit is not set.
- */
- if (IS_ENABLED(CONFIG_HABV4) && IS_ENABLED(CONFIG_ARCH_IMX6))
- map_region(0x0, SZ_1M, PMD_SECT_DEF_CACHED);
-
- __mmu_cache_on();
-}
diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c
deleted file mode 100644
index d1f4a046bb..0000000000
--- a/arch/arm/cpu/mmu-early_64.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <dma-dir.h>
-#include <init.h>
-#include <mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/pgtable64.h>
-#include <asm/barebox-arm.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <memory.h>
-#include <asm/system_info.h>
-
-#include "mmu_64.h"
-
-static void create_sections(void *ttb, uint64_t virt, uint64_t phys,
- uint64_t size, uint64_t attr)
-{
- uint64_t block_size;
- uint64_t block_shift;
- uint64_t *pte;
- uint64_t idx;
- uint64_t addr;
- uint64_t *table;
-
- addr = virt;
-
- attr &= ~PTE_TYPE_MASK;
-
- table = ttb;
-
- while (1) {
- block_shift = level2shift(1);
- idx = (addr & level2mask(1)) >> block_shift;
- block_size = (1ULL << block_shift);
-
- pte = table + idx;
-
- *pte = phys | attr | PTE_TYPE_BLOCK;
-
- if (size < block_size)
- break;
-
- addr += block_size;
- phys += block_size;
- size -= block_size;
- }
-}
-
-#define EARLY_BITS_PER_VA 39
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb)
-{
- int el;
-
- /*
- * For the early code we only create level 1 pagetables which only
- * allow for a 1GiB granularity. If our membase is not aligned to that
- * bail out without enabling the MMU.
- */
- if (membase & ((1ULL << level2shift(1)) - 1))
- return;
-
- memset((void *)ttb, 0, GRANULE_SIZE);
-
- el = current_el();
- set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
- create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
- attrs_uncached_mem());
- create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
- tlb_invalidate();
- isb();
- set_cr(get_cr() | CR_M);
-}
-
-void mmu_early_disable(void)
-{
- unsigned int cr;
-
- cr = get_cr();
- cr &= ~(CR_M | CR_C);
-
- set_cr(cr);
- v8_flush_dcache_all();
- tlb_invalidate();
-
- dsb();
- isb();
-}
diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu_32.c
index 6388e1bf14..1c59225934 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -9,6 +9,7 @@
#include <init.h>
#include <mmu.h>
#include <errno.h>
+#include <zero_page.h>
#include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/barebox-arm.h>
@@ -18,12 +19,16 @@
#include <asm/system_info.h>
#include <asm/sections.h>
-#include "mmu.h"
+#include "mmu_32.h"
#define PTRS_PER_PTE (PGDIR_SIZE / PAGE_SIZE)
#define ARCH_MAP_WRITECOMBINE ((unsigned)-1)
-static uint32_t *ttb;
+static inline uint32_t *get_ttb(void)
+{
+ /* Clear unpredictable bits [13:0] */
+ return (uint32_t *)(get_ttbr() & ~0x3fff);
+}
/*
* Do it the simple way for now and invalidate the entire
@@ -52,29 +57,36 @@ static inline void tlb_invalidate(void)
PMD_SECT_BUFFERABLE | PMD_SECT_XN)
#define PGD_FLAGS_UNCACHED_V7 (PMD_SECT_DEF_UNCACHED | PMD_SECT_XN)
-/*
- * PTE flags to set cached and uncached areas.
- * This will be determined at runtime.
- */
-static uint32_t pte_flags_cached;
-static uint32_t pte_flags_wc;
-static uint32_t pte_flags_uncached;
-static uint32_t pgd_flags_wc;
-static uint32_t pgd_flags_uncached;
-
-#define PTE_MASK ((1 << 12) - 1)
-
static bool pgd_type_table(u32 pgd)
{
return (pgd & PMD_TYPE_MASK) == PMD_TYPE_TABLE;
}
+#define PTE_SIZE (PTRS_PER_PTE * sizeof(u32))
+
+#ifdef __PBL__
+static uint32_t *alloc_pte(void)
+{
+ static unsigned int idx = 3;
+
+ idx++;
+
+ if (idx * PTE_SIZE >= ARM_EARLY_PAGETABLE_SIZE)
+ return NULL;
+
+ return get_ttb() + idx * PTE_SIZE;
+}
+#else
+static uint32_t *alloc_pte(void)
+{
+ return xmemalign(PTE_SIZE, PTE_SIZE);
+}
+#endif
+
static u32 *find_pte(unsigned long adr)
{
u32 *table;
-
- if (!ttb)
- arm_mmu_not_initialized_error();
+ uint32_t *ttb = get_ttb();
if (!pgd_type_table(ttb[pgd_index(adr)]))
return NULL;
@@ -92,6 +104,7 @@ void dma_flush_range(void *ptr, size_t size)
unsigned long end = start + size;
__dma_flush_range(start, end);
+
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
@@ -103,6 +116,7 @@ void dma_inv_range(void *ptr, size_t size)
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
+
__dma_inv_range(start, end);
}
@@ -111,24 +125,24 @@ void dma_inv_range(void *ptr, size_t size)
* We initially create a flat uncached mapping on it.
* Not yet exported, but may be later if someone finds use for it.
*/
-static u32 *arm_create_pte(unsigned long virt, uint32_t flags)
+static u32 *arm_create_pte(unsigned long virt, unsigned long phys,
+ uint32_t flags)
{
+ uint32_t *ttb = get_ttb();
u32 *table;
int i, ttb_idx;
virt = ALIGN_DOWN(virt, PGDIR_SIZE);
+ phys = ALIGN_DOWN(phys, PGDIR_SIZE);
- table = xmemalign(PTRS_PER_PTE * sizeof(u32),
- PTRS_PER_PTE * sizeof(u32));
-
- if (!ttb)
- arm_mmu_not_initialized_error();
+ table = alloc_pte();
ttb_idx = pgd_index(virt);
for (i = 0; i < PTRS_PER_PTE; i++) {
- table[i] = virt | PTE_TYPE_SMALL | flags;
+ table[i] = phys | PTE_TYPE_SMALL | flags;
virt += PAGE_SIZE;
+ phys += PAGE_SIZE;
}
dma_flush_range(table, PTRS_PER_PTE * sizeof(u32));
@@ -146,59 +160,113 @@ static u32 pmd_flags_to_pte(u32 pmd)
pte |= PTE_BUFFERABLE;
if (pmd & PMD_SECT_CACHEABLE)
pte |= PTE_CACHEABLE;
- if (pmd & PMD_SECT_nG)
- pte |= PTE_EXT_NG;
- if (pmd & PMD_SECT_XN)
- pte |= PTE_EXT_XN;
-
- /* TEX[2:0] */
- pte |= PTE_EXT_TEX((pmd >> 12) & 7);
- /* AP[1:0] */
- pte |= ((pmd >> 10) & 0x3) << 4;
- /* AP[2] */
- pte |= ((pmd >> 15) & 0x1) << 9;
+
+ if (cpu_architecture() >= CPU_ARCH_ARMv7) {
+ if (pmd & PMD_SECT_nG)
+ pte |= PTE_EXT_NG;
+ if (pmd & PMD_SECT_XN)
+ pte |= PTE_EXT_XN;
+
+ /* TEX[2:0] */
+ pte |= PTE_EXT_TEX((pmd >> 12) & 7);
+ /* AP[1:0] */
+ pte |= ((pmd >> 10) & 0x3) << 4;
+ /* AP[2] */
+ pte |= ((pmd >> 15) & 0x1) << 9;
+ } else {
+ pte |= PTE_SMALL_AP_UNO_SRW;
+ }
return pte;
}
-int arch_remap_range(void *start, size_t size, unsigned flags)
+static u32 pte_flags_to_pmd(u32 pte)
+{
+ u32 pmd = 0;
+
+ if (pte & PTE_BUFFERABLE)
+ pmd |= PMD_SECT_BUFFERABLE;
+ if (pte & PTE_CACHEABLE)
+ pmd |= PMD_SECT_CACHEABLE;
+
+ if (cpu_architecture() >= CPU_ARCH_ARMv7) {
+ if (pte & PTE_EXT_NG)
+ pmd |= PMD_SECT_nG;
+ if (pte & PTE_EXT_XN)
+ pmd |= PMD_SECT_XN;
+
+ /* TEX[2:0] */
+ pmd |= ((pte >> 6) & 7) << 12;
+ /* AP[1:0] */
+ pmd |= ((pte >> 4) & 0x3) << 10;
+ /* AP[2] */
+ pmd |= ((pte >> 9) & 0x1) << 15;
+ } else {
+ pmd |= PMD_SECT_AP_WRITE | PMD_SECT_AP_READ;
+ }
+
+ return pmd;
+}
+
+static uint32_t get_pte_flags(int map_type)
{
- u32 addr = (u32)start;
- u32 pte_flags;
- u32 pgd_flags;
-
- BUG_ON(!IS_ALIGNED(addr, PAGE_SIZE));
-
- switch (flags) {
- case MAP_CACHED:
- pte_flags = pte_flags_cached;
- pgd_flags = PMD_SECT_DEF_CACHED;
- break;
- case MAP_UNCACHED:
- pte_flags = pte_flags_uncached;
- pgd_flags = pgd_flags_uncached;
- break;
- case ARCH_MAP_WRITECOMBINE:
- pte_flags = pte_flags_wc;
- pgd_flags = pgd_flags_wc;
- break;
- default:
- return -EINVAL;
+ if (cpu_architecture() >= CPU_ARCH_ARMv7) {
+ switch (map_type) {
+ case MAP_CACHED:
+ return PTE_FLAGS_CACHED_V7;
+ case MAP_UNCACHED:
+ return PTE_FLAGS_UNCACHED_V7;
+ case ARCH_MAP_WRITECOMBINE:
+ return PTE_FLAGS_WC_V7;
+ case MAP_FAULT:
+ default:
+ return 0x0;
+ }
+ } else {
+ switch (map_type) {
+ case MAP_CACHED:
+ return PTE_FLAGS_CACHED_V4;
+ case MAP_UNCACHED:
+ case ARCH_MAP_WRITECOMBINE:
+ return PTE_FLAGS_UNCACHED_V4;
+ case MAP_FAULT:
+ default:
+ return 0x0;
+ }
}
+}
+
+static uint32_t get_pmd_flags(int map_type)
+{
+ return pte_flags_to_pmd(get_pte_flags(map_type));
+}
+
+int arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
+{
+ u32 virt_addr = (u32)_virt_addr;
+ u32 pte_flags, pmd_flags;
+ uint32_t *ttb = get_ttb();
+
+ BUG_ON(!IS_ALIGNED(virt_addr, PAGE_SIZE));
+ BUG_ON(!IS_ALIGNED(phys_addr, PAGE_SIZE));
+
+ pte_flags = get_pte_flags(map_type);
+ pmd_flags = pte_flags_to_pmd(pte_flags);
while (size) {
- const bool pgdir_size_aligned = IS_ALIGNED(addr, PGDIR_SIZE);
- u32 *pgd = (u32 *)&ttb[pgd_index(addr)];
+ const bool pgdir_size_aligned = IS_ALIGNED(virt_addr, PGDIR_SIZE);
+ u32 *pgd = (u32 *)&ttb[pgd_index(virt_addr)];
size_t chunk;
if (size >= PGDIR_SIZE && pgdir_size_aligned &&
+ IS_ALIGNED(phys_addr, PGDIR_SIZE) &&
!pgd_type_table(*pgd)) {
/*
* TODO: Add code to discard a page table and
* replace it with a section
*/
chunk = PGDIR_SIZE;
- *pgd = addr | pgd_flags;
+ *pgd = phys_addr | pmd_flags | PMD_TYPE_SECT;
dma_flush_range(pgd, sizeof(*pgd));
} else {
unsigned int num_ptes;
@@ -213,7 +281,7 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
* was not aligned on PGDIR_SIZE boundary)
*/
chunk = pgdir_size_aligned ?
- PGDIR_SIZE : ALIGN(addr, PGDIR_SIZE) - addr;
+ PGDIR_SIZE : ALIGN(virt_addr, PGDIR_SIZE) - virt_addr;
/*
* At the same time we want to make sure that
* we don't go on remapping past requested
@@ -223,27 +291,29 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
chunk = min(chunk, size);
num_ptes = chunk / PAGE_SIZE;
- pte = find_pte(addr);
+ pte = find_pte(virt_addr);
if (!pte) {
/*
* If PTE is not found it means that
* we needs to split this section and
* create a new page table for it
*/
- table = arm_create_pte(addr, pmd_flags_to_pte(*pgd));
- pte = find_pte(addr);
+ table = arm_create_pte(virt_addr, phys_addr,
+ pmd_flags_to_pte(*pgd));
+ pte = find_pte(virt_addr);
BUG_ON(!pte);
}
for (i = 0; i < num_ptes; i++) {
- pte[i] &= ~PTE_MASK;
+ pte[i] = phys_addr + i * PAGE_SIZE;
pte[i] |= pte_flags | PTE_TYPE_SMALL;
}
dma_flush_range(pte, num_ptes * sizeof(u32));
}
- addr += chunk;
+ virt_addr += chunk;
+ phys_addr += chunk;
size -= chunk;
}
@@ -251,12 +321,33 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
return 0;
}
+static void create_sections(unsigned long first, unsigned long last,
+ unsigned int flags)
+{
+ uint32_t *ttb = get_ttb();
+ unsigned long ttb_start = pgd_index(first);
+ unsigned long ttb_end = pgd_index(last) + 1;
+ unsigned int i, addr = first;
+
+ for (i = ttb_start; i < ttb_end; i++) {
+ ttb[i] = addr | flags;
+ addr += PGDIR_SIZE;
+ }
+}
+
+static inline void create_flat_mapping(void)
+{
+ /* create a flat mapping using 1MiB sections */
+ create_sections(0, 0xffffffff, attrs_uncached_mem());
+}
+
void *map_io_sections(unsigned long phys, void *_start, size_t size)
{
unsigned long start = (unsigned long)_start, sec;
+ uint32_t *ttb = get_ttb();
for (sec = start; sec < start + size; sec += PGDIR_SIZE, phys += PGDIR_SIZE)
- ttb[pgd_index(sec)] = phys | pgd_flags_uncached;
+ ttb[pgd_index(sec)] = phys | get_pmd_flags(MAP_UNCACHED);
dma_flush_range(ttb, 0x4000);
tlb_invalidate();
@@ -297,9 +388,9 @@ static void create_vector_table(unsigned long adr)
vectors = xmemalign(PAGE_SIZE, PAGE_SIZE);
pr_debug("Creating vector table, virt = 0x%p, phys = 0x%08lx\n",
vectors, adr);
- arm_create_pte(adr, pte_flags_uncached);
+ arm_create_pte(adr, adr, get_pte_flags(MAP_UNCACHED));
pte = find_pte(adr);
- *pte = (u32)vectors | PTE_TYPE_SMALL | pte_flags_cached;
+ *pte = (u32)vectors | PTE_TYPE_SMALL | get_pte_flags(MAP_CACHED);
}
arm_fixup_vectors();
@@ -358,7 +449,6 @@ static int set_vector_table(unsigned long adr)
static void create_zero_page(void)
{
struct resource *zero_sdram;
- u32 *zero;
zero_sdram = request_sdram_region("zero page", 0x0, PAGE_SIZE);
if (zero_sdram) {
@@ -368,8 +458,7 @@ static void create_zero_page(void)
*/
pr_debug("zero page is in SDRAM area, currently not supported\n");
} else {
- zero = arm_create_pte(0x0, pte_flags_uncached);
- zero[0] = 0;
+ zero_page_faulting();
pr_debug("Created zero page\n");
}
}
@@ -413,67 +502,43 @@ static void vectors_init(void)
void __mmu_init(bool mmu_on)
{
struct memory_bank *bank;
+ uint32_t *ttb = get_ttb();
- arm_set_cache_functions();
-
- if (cpu_architecture() >= CPU_ARCH_ARMv7) {
- pte_flags_cached = PTE_FLAGS_CACHED_V7;
- pte_flags_wc = PTE_FLAGS_WC_V7;
- pgd_flags_wc = PGD_FLAGS_WC_V7;
- pgd_flags_uncached = PGD_FLAGS_UNCACHED_V7;
- pte_flags_uncached = PTE_FLAGS_UNCACHED_V7;
- } else {
- pte_flags_cached = PTE_FLAGS_CACHED_V4;
- pte_flags_wc = PTE_FLAGS_UNCACHED_V4;
- pgd_flags_wc = PMD_SECT_DEF_UNCACHED;
- pgd_flags_uncached = PMD_SECT_DEF_UNCACHED;
- pte_flags_uncached = PTE_FLAGS_UNCACHED_V4;
- }
-
- if (mmu_on) {
+ if (!request_sdram_region("ttb", (unsigned long)ttb, SZ_16K))
/*
- * Early MMU code has already enabled the MMU. We assume a
- * flat 1:1 section mapping in this case.
+ * This can mean that:
+ * - the early MMU code has put the ttb into a place
+ * which we don't have inside our available memory
+ * - Somebody else has occupied the ttb region which means
+ * the ttb will get corrupted.
*/
- /* Clear unpredictable bits [13:0] */
- ttb = (uint32_t *)(get_ttbr() & ~0x3fff);
-
- if (!request_sdram_region("ttb", (unsigned long)ttb, SZ_16K))
- /*
- * This can mean that:
- * - the early MMU code has put the ttb into a place
- * which we don't have inside our available memory
- * - Somebody else has occupied the ttb region which means
- * the ttb will get corrupted.
- */
- pr_crit("Critical Error: Can't request SDRAM region for ttb at %p\n",
+ pr_crit("Critical Error: Can't request SDRAM region for ttb at %p\n",
ttb);
- } else {
- ttb = xmemalign(ARM_TTB_SIZE, ARM_TTB_SIZE);
-
- set_ttbr(ttb);
-
- /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
- if (cpu_architecture() >= CPU_ARCH_ARMv7)
- set_domain(DOMAIN_CLIENT);
- else
- set_domain(DOMAIN_MANAGER);
-
- create_flat_mapping(ttb);
- __mmu_cache_flush();
- }
pr_debug("ttb: 0x%p\n", ttb);
vectors_init();
+ /*
+ * Early mmu init will have mapped everything but the initial memory area
+ * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
+ * all memory banks, so let's map all pages, excluding reserved memory areas,
+ * cacheable and executable.
+ */
for_each_memory_bank(bank) {
- create_sections(ttb, bank->start, bank->start + bank->size - 1,
- PMD_SECT_DEF_CACHED);
- __mmu_cache_flush();
- }
+ struct resource *rsv;
+ resource_size_t pos;
- __mmu_cache_on();
+ pos = bank->start;
+
+ for_each_reserved_region(bank, rsv) {
+ remap_range((void *)rsv->start, resource_size(rsv), MAP_UNCACHED);
+ remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+ pos = rsv->end + 1;
+ }
+
+ remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED);
+ }
}
/*
@@ -494,20 +559,30 @@ void *dma_alloc_writecombine(size_t size, dma_addr_t *dma_handle)
return dma_alloc_map(size, dma_handle, ARCH_MAP_WRITECOMBINE);
}
-void dma_sync_single_for_device(dma_addr_t address, size_t size,
- enum dma_data_direction dir)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
{
+ uint32_t *ttb = (uint32_t *)arm_mem_ttb(membase + memsize);
+
+ pr_debug("enabling MMU, ttb @ 0x%p\n", ttb);
+
+ set_ttbr(ttb);
+
+ /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
+ if (cpu_architecture() >= CPU_ARCH_ARMv7)
+ set_domain(DOMAIN_CLIENT);
+ else
+ set_domain(DOMAIN_MANAGER);
+
/*
- * FIXME: This function needs a device argument to support non 1:1 mappings
+ * This marks the whole address space as uncachable as well as
+ * unexecutable if possible
*/
+ create_flat_mapping();
- if (dir == DMA_FROM_DEVICE) {
- __dma_inv_range(address, address + size);
- if (outer_cache.inv_range)
- outer_cache.inv_range(address, address + size);
- } else {
- __dma_clean_range(address, address + size);
- if (outer_cache.clean_range)
- outer_cache.clean_range(address, address + size);
- }
+ /* maps main memory as cachable */
+ remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
+ remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
+ remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+
+ __mmu_cache_on();
}
diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu_32.h
index 1499b70dd6..607d9e8608 100644
--- a/arch/arm/cpu/mmu.h
+++ b/arch/arm/cpu/mmu_32.h
@@ -56,20 +56,6 @@ static inline void set_domain(unsigned val)
asm volatile ("mcr p15,0,%0,c3,c0,0" : : "r"(val) /*:*/);
}
-static inline void
-create_sections(uint32_t *ttb, unsigned long first,
- unsigned long last, unsigned int flags)
-{
- unsigned long ttb_start = pgd_index(first);
- unsigned long ttb_end = pgd_index(last) + 1;
- unsigned int i, addr = first;
-
- for (i = ttb_start; i < ttb_end; i++) {
- ttb[i] = addr | flags;
- addr += PGDIR_SIZE;
- }
-}
-
#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT)
#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
@@ -83,10 +69,4 @@ static inline unsigned long attrs_uncached_mem(void)
return flags;
}
-static inline void create_flat_mapping(uint32_t *ttb)
-{
- /* create a flat mapping using 1MiB sections */
- create_sections(ttb, 0, 0xffffffff, attrs_uncached_mem());
-}
-
#endif /* __ARM_MMU_H */
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index f43ac9a121..cdc4825422 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -22,7 +22,10 @@
#include "mmu_64.h"
-static uint64_t *ttb;
+static uint64_t *get_ttb(void)
+{
+ return (uint64_t *)get_ttbr(current_el());
+}
static void set_table(uint64_t *pt, uint64_t *table_addr)
{
@@ -32,7 +35,20 @@ static void set_table(uint64_t *pt, uint64_t *table_addr)
*pt = val;
}
-static uint64_t *create_table(void)
+#ifdef __PBL__
+static uint64_t *alloc_pte(void)
+{
+ static unsigned int idx;
+
+ idx++;
+
+ if (idx * GRANULE_SIZE >= ARM_EARLY_PAGETABLE_SIZE)
+ return NULL;
+
+ return get_ttb() + idx * GRANULE_SIZE;
+}
+#else
+static uint64_t *alloc_pte(void)
{
uint64_t *new_table = xmemalign(GRANULE_SIZE, GRANULE_SIZE);
@@ -41,6 +57,7 @@ static uint64_t *create_table(void)
return new_table;
}
+#endif
static __maybe_unused uint64_t *find_pte(uint64_t addr)
{
@@ -49,7 +66,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
uint64_t idx;
int i;
- pte = ttb;
+ pte = get_ttb();
for (i = 0; i < 4; i++) {
block_shift = level2shift(i);
@@ -81,7 +98,7 @@ static void split_block(uint64_t *pte, int level)
/* level describes the parent level, we need the child ones */
levelshift = level2shift(level + 1);
- new_table = create_table();
+ new_table = alloc_pte();
for (i = 0; i < MAX_PTE_ENTRIES; i++) {
new_table[i] = old_pte | (i << levelshift);
@@ -98,6 +115,7 @@ static void split_block(uint64_t *pte, int level)
static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
uint64_t attr)
{
+ uint64_t *ttb = get_ttb();
uint64_t block_size;
uint64_t block_shift;
uint64_t *pte;
@@ -107,9 +125,6 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
uint64_t type;
int level;
- if (!ttb)
- arm_mmu_not_initialized_error();
-
addr = virt;
attr &= ~PTE_TYPE_MASK;
@@ -123,7 +138,8 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
pte = table + idx;
- if (size >= block_size && IS_ALIGNED(addr, block_size)) {
+ if (size >= block_size && IS_ALIGNED(addr, block_size) &&
+ IS_ALIGNED(phys, block_size)) {
type = (level == 3) ?
PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
*pte = phys | attr | type;
@@ -143,7 +159,7 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
tlb_invalidate();
}
-int arch_remap_range(void *_start, size_t size, unsigned flags)
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
{
unsigned long attrs;
@@ -154,12 +170,14 @@ int arch_remap_range(void *_start, size_t size, unsigned flags)
case MAP_UNCACHED:
attrs = attrs_uncached_mem();
break;
+ case MAP_FAULT:
+ attrs = 0x0;
+ break;
default:
return -EINVAL;
}
- create_sections((uint64_t)_start, (uint64_t)_start, (uint64_t)size,
- attrs);
+ create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
return 0;
}
@@ -169,53 +187,31 @@ static void mmu_enable(void)
set_cr(get_cr() | CR_M | CR_C | CR_I);
}
-void zero_page_access(void)
-{
- create_sections(0x0, 0x0, PAGE_SIZE, CACHED_MEM);
-}
-
-void zero_page_faulting(void)
-{
- create_sections(0x0, 0x0, PAGE_SIZE, 0x0);
-}
-
/*
* Prepare MMU for usage enable it.
*/
void __mmu_init(bool mmu_on)
{
struct memory_bank *bank;
- unsigned int el;
-
- if (mmu_on)
- mmu_disable();
- ttb = create_table();
- el = current_el();
- set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el, BITS_PER_VA),
- MEMORY_ATTRIBUTES);
-
- pr_debug("ttb: 0x%p\n", ttb);
-
- /* create a flat mapping */
- create_sections(0, 0, 1UL << (BITS_PER_VA - 1), attrs_uncached_mem());
-
- /* Map sdram cached. */
for_each_memory_bank(bank) {
struct resource *rsv;
+ resource_size_t pos;
- create_sections(bank->start, bank->start, bank->size, CACHED_MEM);
+ pos = bank->start;
for_each_reserved_region(bank, rsv) {
- create_sections(resource_first_page(rsv), resource_first_page(rsv),
- resource_count_pages(rsv), attrs_uncached_mem());
+ remap_range((void *)resource_first_page(rsv),
+ resource_count_pages(rsv), MAP_UNCACHED);
+ remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+ pos = rsv->end + 1;
}
+
+ remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED);
}
/* Make zero page faulting to catch NULL pointer derefs */
zero_page_faulting();
-
- mmu_enable();
}
void mmu_disable(void)
@@ -249,15 +245,36 @@ void dma_flush_range(void *ptr, size_t size)
v8_flush_dcache_range(start, end);
}
-void dma_sync_single_for_device(dma_addr_t address, size_t size,
- enum dma_data_direction dir)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
+{
+ int el;
+ unsigned long ttb = arm_mem_ttb(membase + memsize);
+
+ pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
+
+ el = current_el();
+ set_ttbr_tcr_mair(el, ttb, calc_tcr(el, BITS_PER_VA), MEMORY_ATTRIBUTES);
+
+ memset((void *)ttb, 0, GRANULE_SIZE);
+
+ remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
+ remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
+ remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT);
+
+ mmu_enable();
+}
+
+void mmu_early_disable(void)
{
- /*
- * FIXME: This function needs a device argument to support non 1:1 mappings
- */
-
- if (dir == DMA_FROM_DEVICE)
- v8_inv_dcache_range(address, address + size - 1);
- else
- v8_flush_dcache_range(address, address + size - 1);
+ unsigned int cr;
+
+ cr = get_cr();
+ cr &= ~(CR_M | CR_C);
+
+ set_cr(cr);
+ v8_flush_dcache_all();
+ tlb_invalidate();
+
+ dsb();
+ isb();
}
diff --git a/arch/arm/cpu/mmuinfo.c b/arch/arm/cpu/mmuinfo.c
index 1147c0a305..44d6980a75 100644
--- a/arch/arm/cpu/mmuinfo.c
+++ b/arch/arm/cpu/mmuinfo.c
@@ -1,91 +1,85 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: 2012 Jan Luebbe <j.luebbe@pengutronix.de>, Pengutronix
/*
- * mmuinfo.c - Show MMU/cache information from cp15 registers
+ * mmuinfo.c - Show MMU/cache information
*/
#include <common.h>
#include <command.h>
+#include <getopt.h>
+#include <asm/mmuinfo.h>
+#include <asm/system_info.h>
+#include <zero_page.h>
+#include <mmu.h>
-static char *inner_attr[] = {
- "0b000 Non-cacheable",
- "0b001 Strongly-ordered",
- "0b010 (reserved)",
- "0b011 Device",
- "0b100 (reserved)",
- "0b101 Write-Back, Write-Allocate",
- "0b110 Write-Through",
- "0b111 Write-Back, no Write-Allocate",
-};
-
-static char *outer_attr[] = {
- "0b00 Non-cacheable",
- "0b01 Write-Back, Write-Allocate",
- "0b10 Write-Through, no Write-Allocate",
- "0b11 Write-Back, no Write-Allocate",
-};
-
-static void decode_par(unsigned long par)
+int mmuinfo(void *addr)
{
- printf(" Physical Address [31:12]: 0x%08lx\n", par & 0xFFFFF000);
- printf(" Reserved [11]: 0x%lx\n", (par >> 11) & 0x1);
- printf(" Not Outer Shareable [10]: 0x%lx\n", (par >> 10) & 0x1);
- printf(" Non-Secure [9]: 0x%lx\n", (par >> 9) & 0x1);
- printf(" Impl. def. [8]: 0x%lx\n", (par >> 8) & 0x1);
- printf(" Shareable [7]: 0x%lx\n", (par >> 7) & 0x1);
- printf(" Inner mem. attr. [6:4]: 0x%lx (%s)\n", (par >> 4) & 0x7,
- inner_attr[(par >> 4) & 0x7]);
- printf(" Outer mem. attr. [3:2]: 0x%lx (%s)\n", (par >> 2) & 0x3,
- outer_attr[(par >> 2) & 0x3]);
- printf(" SuperSection [1]: 0x%lx\n", (par >> 1) & 0x1);
- printf(" Failure [0]: 0x%lx\n", (par >> 0) & 0x1);
+ if (IS_ENABLED(CONFIG_CPU_V8))
+ return mmuinfo_v8(addr);
+ if (IS_ENABLED(CONFIG_CPU_V7) && cpu_architecture() == CPU_ARCH_ARMv7)
+ return mmuinfo_v7(addr);
+
+ return -ENOSYS;
}
-static int do_mmuinfo(int argc, char *argv[])
+static __maybe_unused int do_mmuinfo(int argc, char *argv[])
{
- unsigned long addr = 0, priv_read, priv_write;
+ unsigned long addr;
+ int access_zero_page = -1;
+ int opt;
- if (argc < 2)
- return COMMAND_ERROR_USAGE;
+ while ((opt = getopt(argc, argv, "zZ")) > 0) {
+ switch (opt) {
+ case 'z':
+ access_zero_page = true;
+ break;
+ case 'Z':
+ access_zero_page = false;
+ break;
+ default:
+ return COMMAND_ERROR_USAGE;
+ }
+ }
- addr = strtoul_suffix(argv[1], NULL, 0);
+ if (access_zero_page >= 0) {
+ if (argc - optind != 0)
+ return COMMAND_ERROR_USAGE;
- __asm__ __volatile__(
- "mcr p15, 0, %0, c7, c8, 0 @ write VA to PA translation (priv read)\n"
- :
- : "r" (addr)
- : "memory");
+ if (!zero_page_remappable()) {
+ pr_warn("No architecture support for zero page remap\n");
+ return -ENOSYS;
+ }
- __asm__ __volatile__(
- "mrc p15, 0, %0, c7, c4, 0 @ read PAR\n"
- : "=r" (priv_read)
- :
- : "memory");
+ if (access_zero_page)
+ zero_page_access();
+ else
+ zero_page_faulting();
- __asm__ __volatile__(
- "mcr p15, 0, %0, c7, c8, 1 @ write VA to PA translation (priv write)\n"
- :
- : "r" (addr)
- : "memory");
+ return 0;
+ }
- __asm__ __volatile__(
- "mrc p15, 0, %0, c7, c4, 0 @ read PAR\n"
- : "=r" (priv_write)
- :
- : "memory");
+ if (argc - optind != 1)
+ return COMMAND_ERROR_USAGE;
- printf("PAR result for 0x%08lx: \n", addr);
- printf(" privileged read: 0x%08lx\n", priv_read);
- decode_par(priv_read);
- printf(" privileged write: 0x%08lx\n", priv_write);
- decode_par(priv_write);
+ addr = strtoul_suffix(argv[1], NULL, 0);
- return 0;
+ return mmuinfo((void *)addr);
}
+BAREBOX_CMD_HELP_START(mmuinfo)
+BAREBOX_CMD_HELP_TEXT("Show MMU/cache information using the cp15/model-specific registers.")
+BAREBOX_CMD_HELP_TEXT("")
+BAREBOX_CMD_HELP_TEXT("Options:")
+BAREBOX_CMD_HELP_OPT ("-z", "enable access to zero page")
+BAREBOX_CMD_HELP_OPT ("-Z", "disable access to zero page")
+BAREBOX_CMD_HELP_END
+
+#ifdef CONFIG_COMMAND_SUPPORT
BAREBOX_CMD_START(mmuinfo)
.cmd = do_mmuinfo,
BAREBOX_CMD_DESC("show MMU/cache information of an address")
- BAREBOX_CMD_OPTS("ADDRESS")
+ BAREBOX_CMD_OPTS("[-zZ | ADDRESS]")
BAREBOX_CMD_GROUP(CMD_GRP_INFO)
+ BAREBOX_CMD_HELP(cmd_mmuinfo_help)
BAREBOX_CMD_END
+#endif
diff --git a/arch/arm/cpu/mmuinfo_32.c b/arch/arm/cpu/mmuinfo_32.c
new file mode 100644
index 0000000000..e26dabc9b3
--- /dev/null
+++ b/arch/arm/cpu/mmuinfo_32.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2012 Jan Luebbe <j.luebbe@pengutronix.de>, Pengutronix
+/*
+ * mmuinfo_32.c - Show MMU/cache information from cp15 registers
+ */
+
+#include <common.h>
+#include <asm/mmuinfo.h>
+
+static char *inner_attr[] = {
+ "0b000 Non-cacheable",
+ "0b001 Strongly-ordered",
+ "0b010 (reserved)",
+ "0b011 Device",
+ "0b100 (reserved)",
+ "0b101 Write-Back, Write-Allocate",
+ "0b110 Write-Through",
+ "0b111 Write-Back, no Write-Allocate",
+};
+
+static char *outer_attr[] = {
+ "0b00 Non-cacheable",
+ "0b01 Write-Back, Write-Allocate",
+ "0b10 Write-Through, no Write-Allocate",
+ "0b11 Write-Back, no Write-Allocate",
+};
+
+static void decode_par(unsigned long par)
+{
+ printf(" Physical Address [31:12]: 0x%08lx\n", par & 0xFFFFF000);
+ printf(" Reserved [11]: 0x%lx\n", (par >> 11) & 0x1);
+ printf(" Not Outer Shareable [10]: 0x%lx\n", (par >> 10) & 0x1);
+ printf(" Non-Secure [9]: 0x%lx\n", (par >> 9) & 0x1);
+ printf(" Impl. def. [8]: 0x%lx\n", (par >> 8) & 0x1);
+ printf(" Shareable [7]: 0x%lx\n", (par >> 7) & 0x1);
+ printf(" Inner mem. attr. [6:4]: 0x%lx (%s)\n", (par >> 4) & 0x7,
+ inner_attr[(par >> 4) & 0x7]);
+ printf(" Outer mem. attr. [3:2]: 0x%lx (%s)\n", (par >> 2) & 0x3,
+ outer_attr[(par >> 2) & 0x3]);
+ printf(" SuperSection [1]: 0x%lx\n", (par >> 1) & 0x1);
+ printf(" Failure [0]: 0x%lx\n", (par >> 0) & 0x1);
+}
+
+int mmuinfo_v7(void *_addr)
+{
+ unsigned long addr = (unsigned long)_addr;
+ unsigned long priv_read, priv_write;
+
+ __asm__ __volatile__(
+ "mcr p15, 0, %0, c7, c8, 0 @ write VA to PA translation (priv read)\n"
+ :
+ : "r" (addr)
+ : "memory");
+
+ __asm__ __volatile__(
+ "mrc p15, 0, %0, c7, c4, 0 @ read PAR\n"
+ : "=r" (priv_read)
+ :
+ : "memory");
+
+ __asm__ __volatile__(
+ "mcr p15, 0, %0, c7, c8, 1 @ write VA to PA translation (priv write)\n"
+ :
+ : "r" (addr)
+ : "memory");
+
+ __asm__ __volatile__(
+ "mrc p15, 0, %0, c7, c4, 0 @ read PAR\n"
+ : "=r" (priv_write)
+ :
+ : "memory");
+
+ printf("PAR result for 0x%08lx: \n", addr);
+ printf(" privileged read: 0x%08lx\n", priv_read);
+ decode_par(priv_read);
+ printf(" privileged write: 0x%08lx\n", priv_write);
+ decode_par(priv_write);
+
+ return 0;
+}
diff --git a/arch/arm/cpu/mmuinfo_64.c b/arch/arm/cpu/mmuinfo_64.c
new file mode 100644
index 0000000000..de4945f43e
--- /dev/null
+++ b/arch/arm/cpu/mmuinfo_64.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2023 Ahmad Fatoum <a.fatoum@pengutronix.de>, Pengutronix
+/*
+ * mmuinfo_64.c - Show MMU/cache information via AT instruction
+ */
+
+#include <common.h>
+#include <asm/mmuinfo.h>
+#include <asm/system.h>
+#include <asm/sysreg.h>
+#include <linux/bitfield.h>
+
+#define at_par(reg, addr) ({ \
+ asm volatile("at " reg ", %0\n" :: "r" (addr)); \
+ isb(); \
+ read_sysreg_par(); \
+})
+
+#define BITS(from, to, val) FIELD_GET(GENMASK(from, to), val)
+
+static const char *decode_devmem_attr(u8 attr)
+{
+ switch (attr & ~0x1) {
+ case 0b00000000:
+ return "0b0000 Device-nGnRnE memory";
+ case 0b00000100:
+ return "0b0100 Device-nGnRE memory";
+ case 0b00001000:
+ return "0b1000 Device-nGRE memory";
+ case 0b00001100:
+ return "0b1100 Device-GRE memory";
+ default:
+ return "unknown";
+ };
+}
+
+static char *cache_attr[] = {
+ "0b0000 Wrongly decoded",
+ "0b0001 Write-Through Transient, Write-Allocate, no Read-Allocate",
+ "0b0010 Write-Through Transient, no Write-Allocate",
+ "0b0011 Write-Through Transient, Write-Allocate",
+ "0b0100 Non-Cacheable",
+ "0b0101 Write-Back Transient, Write-Allocate, no Read-Allocate",
+ "0b0110 Write-Back Transient, no Write-Allocate",
+ "0b0111 Write-Back Transient, Write-Allocate",
+ "0b1000 Write-Through Non-transient, no Write-Allocate no Read-Allocate",
+ "0b1001 Write-Through Non-transient, Write-Allocate no Read-Allocate",
+ "0b1010 Write-Through Non-transient, no Write-Allocate",
+ "0b1011 Write-Through Non-transient, Write-Allocate",
+ "0b1100 Write-Back Non-transient, no Write-Allocate no Read-Allocate",
+ "0b1101 Write-Back Non-transient, Write-Allocate no Read-Allocate",
+ "0b1110 Write-Back Non-transient, no Write-Allocate",
+ "0b1111 Write-Back Non-transient, Write-Allocate",
+};
+
+static char *share_attr[] = {
+ "0b00 Non-Shareable",
+ "0b01 Reserved",
+ "0b10 Outer Shareable",
+ "0b11 Inner Shareable",
+};
+
+static char *stage_fault[] = {
+ "stage 1 translation",
+ "stage 2 translation",
+};
+
+static char *fault_status_leveled[] = {
+ "Address size fault", /* of translation or translation table base register */
+ "Translation fault",
+ "Access flag fault",
+ "Permission fault",
+ "Synchronous External abort", /* level -1 */
+ "Synchronous External abort", /* on translation table walk or hardware update of translation table */
+ "Synchronous parity or ECC error", /* level -1 */
+ "Synchronous parity or ECC error", /* on memory access on translation table walk or hardware update of translation table */
+};
+
+static const char *decode_fault_status_level(u8 fst)
+{
+ if (!(fst & BIT(5)))
+ return "";
+
+ switch (BITS(5, 0, fst)) {
+ case 0b010011:
+ case 0b011011:
+ return ", level -1";
+ }
+
+ switch (BITS(1, 0, fst)) {
+ case 0b00:
+ return ", level 0";
+ case 0b01:
+ return ", level 1";
+ case 0b10:
+ return ", level 2";
+ case 0b11:
+ return ", level 3";
+ }
+
+ BUG();
+}
+
+static const char *decode_fault_status(u8 fst)
+{
+
+ switch (BITS(5, 0, fst)) {
+ case 0b101001: /* When FEAT_LPA2 is implemented */
+ return "Address size fault, level -1";
+ case 0b101011: /* When FEAT_LPA2 is implemented */
+ return "Translation fault, level -1";
+ case 0b110000:
+ return "TLB conflict abort";
+ case 0b110001: /* When FEAT_HAFDBS is implemented */
+ return "Unsupported atomic hardware update fault";
+ case 0b111101: /* When EL1 is capable of using AArch32 */
+ return "Section Domain fault, from an AArch32 stage 1 EL1&0 "
+ "translation regime using Short-descriptor translation "
+ "table format";
+ case 0b111110: /* When EL1 is capable of using AArch32 */
+ return "Page Domain fault, from an AArch32 stage 1 EL1&0 "
+ "translation regime using Short-descriptor translation "
+ "table format";
+ default:
+ if (fst & BIT(5))
+ return fault_status_leveled[BITS(4, 2, fst)];
+
+ return "Reserved";
+ }
+};
+
+static void decode_par(unsigned long par)
+{
+ u8 devmem_attr = BITS(63, 56, par);
+
+ if (par & 1) {
+ printf(" Translation aborted [9:8]: because of a fault in the %s%s\n",
+ stage_fault[BITS(9, 9, par)],
+ BITS(8, 8, par) ? " during a stage 1 translation table walk" : "");
+ printf(" Fault Status Code [6:1]: 0x%02lx (%s%s)\n", BITS(6, 1, par),
+ decode_fault_status(BITS(6, 1, par)),
+ decode_fault_status_level(BITS(6, 1, par)));
+ printf(" Failure [0]: 0x1\n");
+ } else {
+ if ((devmem_attr & 0xf0) && (devmem_attr & 0x0f)) {
+ printf(" Outer mem. attr. [63:60]: 0x%02lx (%s)\n", BITS(63, 60, par),
+ cache_attr[BITS(63, 60, par)]);
+ printf(" Inner mem. attr. [59:56]: 0x%02lx (%s)\n", BITS(59, 56, par),
+ cache_attr[BITS(59, 56, par)]);
+ } else if ((devmem_attr & 0b11110010) == 0) {
+ printf(" Memory attr. [63:56]: 0x%02x (%s)\n",
+ devmem_attr, decode_devmem_attr(devmem_attr));
+ if (devmem_attr & 1)
+ printf(" (XS == 0 if FEAT_XS implemented)\n");
+ } else if (devmem_attr == 0b01000000) {
+ printf(" Outer mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Non-Cacheable");
+ printf(" Inner mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Non-Cacheable");
+ printf(" (XS == 0 if FEAT_XS implemented)\n");
+ } else if (devmem_attr == 0b10100000) {
+ printf(" Outer mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Write-Through, No Write-Allocate");
+ printf(" Inner mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Write-Through");
+ printf(" (XS == 0 if FEAT_XS implemented)\n");
+ } else if (devmem_attr == 0b11110000) {
+ printf(" Outer mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Write-Back");
+ printf(" Inner mem. attr. [63:56]: 0x%02lx (%s)\n", BITS(63, 56, par),
+ "Write-Back, Write-Allocate, Non-transient");
+ printf(" (if FEAT_MTE2 implemented)\n");
+ }
+ printf(" Physical Address [51:12]: 0x%08lx\n", par & GENMASK(51, 12));
+ printf(" Non-Secure [9]: 0x%lx\n", BITS(9, 9, par));
+ printf(" Shareability attr. [8:7]: 0x%02lx (%s)\n", BITS(8, 7, par),
+ share_attr[BITS(8, 7, par)]);
+ printf(" Failure [0]: 0x0\n");
+ }
+}
+
+int mmuinfo_v8(void *_addr)
+{
+ unsigned long addr = (unsigned long)_addr;
+ unsigned long priv_read, priv_write;
+
+ switch (current_el()) {
+ case 3:
+ priv_read = at_par("s1e3r", addr);
+ priv_write = at_par("s1e3w", addr);
+ break;
+ case 2:
+ priv_read = at_par("s1e2r", addr);
+ priv_write = at_par("s1e2w", addr);
+ break;
+ case 1:
+ priv_read = at_par("s1e1r", addr);
+ priv_write = at_par("s1e1w", addr);
+ break;
+ case 0:
+ priv_read = at_par("s1e0r", addr);
+ priv_write = at_par("s1e0w", addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ printf("PAR result for 0x%08lx: \n", addr);
+ printf(" privileged read: 0x%08lx\n", priv_read);
+ decode_par(priv_read);
+ printf(" privileged write: 0x%08lx\n", priv_write);
+ decode_par(priv_write);
+
+ return 0;
+}
diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc_32.S
index eafc9b52c6..eafc9b52c6 100644
--- a/arch/arm/cpu/setupc.S
+++ b/arch/arm/cpu/setupc_32.S
diff --git a/arch/arm/cpu/sm.c b/arch/arm/cpu/sm.c
index f5a1edbd4f..53f5142b63 100644
--- a/arch/arm/cpu/sm.c
+++ b/arch/arm/cpu/sm.c
@@ -19,8 +19,7 @@
#include <linux/arm-smccc.h>
#include <asm-generic/sections.h>
#include <asm/secure.h>
-
-#include "mmu.h"
+#include "mmu_32.h"
static unsigned int read_id_pfr1(void)
{
diff --git a/arch/arm/cpu/smccc-call.S b/arch/arm/cpu/smccc-call_32.S
index 9875e1f947..9875e1f947 100644
--- a/arch/arm/cpu/smccc-call.S
+++ b/arch/arm/cpu/smccc-call_32.S
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index be303514c2..2e987ec41d 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -111,7 +111,7 @@ static inline unsigned long arm_mem_boarddata(unsigned long membase,
unsigned long arm_mem_ramoops_get(void)
{
- return arm_mem_ramoops(0, arm_stack_top);
+ return arm_mem_ramoops(arm_stack_top);
}
EXPORT_SYMBOL_GPL(arm_mem_ramoops_get);
@@ -163,22 +163,10 @@ __noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membas
arm_membase = membase;
arm_endmem = endmem;
- arm_stack_top = arm_mem_stack_top(membase, endmem);
+ arm_stack_top = arm_mem_stack_top(endmem);
arm_barebox_size = barebox_size;
malloc_end = barebox_base;
- if (IS_ENABLED(CONFIG_MMU_EARLY)) {
- unsigned long ttb = arm_mem_ttb(membase, endmem);
-
- if (IS_ENABLED(CONFIG_PBL_IMAGE)) {
- arm_set_cache_functions();
- } else {
- pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
- arm_early_mmu_cache_invalidate();
- mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
- }
- }
-
if (boarddata) {
uint32_t totalsize = 0;
const char *name;
@@ -228,6 +216,11 @@ __noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membas
mem_malloc_init((void *)malloc_start, (void *)malloc_end - 1);
+ if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_PBL_IMAGE)) {
+ arm_early_mmu_cache_invalidate();
+ mmu_early_enable(membase, memsize);
+ }
+
if (IS_ENABLED(CONFIG_BOOTM_OPTEE))
of_add_reserve_entry(endmem - OPTEE_SIZE, endmem - 1);
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 65de87f109..a481c4634d 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -81,14 +81,11 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
pr_debug("memory at 0x%08lx, size 0x%08lx\n", membase, memsize);
- if (IS_ENABLED(CONFIG_MMU_EARLY)) {
- unsigned long ttb = arm_mem_ttb(membase, endmem);
- pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
- mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
- }
+ if (IS_ENABLED(CONFIG_MMU))
+ mmu_early_enable(membase, memsize);
- free_mem_ptr = arm_mem_early_malloc(membase, endmem);
- free_mem_end_ptr = arm_mem_early_malloc_end(membase, endmem);
+ free_mem_ptr = arm_mem_early_malloc(endmem);
+ free_mem_end_ptr = arm_mem_early_malloc_end(endmem);
pr_debug("uncompressing barebox binary at 0x%p (size 0x%08x) to 0x%08lx (uncompressed size: 0x%08x)\n",
pg_start, pg_len, barebox_base, uncompressed_len);
diff --git a/arch/arm/include/asm/barebox-arm.h b/arch/arm/include/asm/barebox-arm.h
index 0cf4549cd7..eb31ca2788 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -23,11 +23,7 @@
#include <asm/reloc.h>
#include <linux/stringify.h>
-/*
- * We have a 4GiB address space split into 1MiB sections, with each
- * section header taking 4 bytes
- */
-#define ARM_TTB_SIZE (SZ_4G / SZ_1M * sizeof(u32))
+#define ARM_EARLY_PAGETABLE_SIZE SZ_64K
void __noreturn barebox_arm_entry(unsigned long membase, unsigned long memsize, void *boarddata);
@@ -71,46 +67,42 @@ static inline void arm_fixup_vectors(void)
void *barebox_arm_boot_dtb(void);
-#define __arm_mem_scratch(endmem) ((endmem) - SZ_32K)
-
-static inline const void *arm_mem_scratch_get(void)
+static inline unsigned long arm_mem_optee(unsigned long endmem)
{
- return (const void *)__arm_mem_scratch(arm_mem_endmem_get());
+ return endmem - OPTEE_SIZE;
}
-#define arm_mem_stack_top(membase, endmem) ((endmem) - SZ_64K - OPTEE_SIZE)
+static inline unsigned long arm_mem_scratch(unsigned long endmem)
+{
+ return arm_mem_optee(endmem) - SZ_32K;
+}
-static inline unsigned long arm_mem_stack(unsigned long membase,
- unsigned long endmem)
+static inline unsigned long arm_mem_stack(unsigned long endmem)
{
- return arm_mem_stack_top(membase, endmem) - STACK_SIZE;
+ return arm_mem_scratch(endmem) - STACK_SIZE;
}
-static inline unsigned long arm_mem_ttb(unsigned long membase,
- unsigned long endmem)
+static inline unsigned long arm_mem_ttb(unsigned long endmem)
{
- endmem = arm_mem_stack(membase, endmem);
- endmem = ALIGN_DOWN(endmem, ARM_TTB_SIZE) - ARM_TTB_SIZE;
+ endmem = arm_mem_stack(endmem);
+ endmem = ALIGN_DOWN(endmem, ARM_EARLY_PAGETABLE_SIZE) - ARM_EARLY_PAGETABLE_SIZE;
return endmem;
}
-static inline unsigned long arm_mem_early_malloc(unsigned long membase,
- unsigned long endmem)
+static inline unsigned long arm_mem_early_malloc(unsigned long endmem)
{
- return arm_mem_ttb(membase, endmem) - SZ_128K;
+ return arm_mem_ttb(endmem) - SZ_128K;
}
-static inline unsigned long arm_mem_early_malloc_end(unsigned long membase,
- unsigned long endmem)
+static inline unsigned long arm_mem_early_malloc_end(unsigned long endmem)
{
- return arm_mem_ttb(membase, endmem);
+ return arm_mem_ttb(endmem);
}
-static inline unsigned long arm_mem_ramoops(unsigned long membase,
- unsigned long endmem)
+static inline unsigned long arm_mem_ramoops(unsigned long endmem)
{
- endmem = arm_mem_ttb(membase, endmem);
+ endmem = arm_mem_ttb(endmem);
#ifdef CONFIG_FS_PSTORE_RAMOOPS
endmem -= CONFIG_FS_PSTORE_RAMOOPS_SIZE;
endmem = ALIGN_DOWN(endmem, SZ_4K);
@@ -119,11 +111,21 @@ static inline unsigned long arm_mem_ramoops(unsigned long membase,
return endmem;
}
+static inline unsigned long arm_mem_stack_top(unsigned long endmem)
+{
+ return arm_mem_stack(endmem) + STACK_SIZE;
+}
+
+static inline const void *arm_mem_scratch_get(void)
+{
+ return (const void *)arm_mem_scratch(arm_mem_endmem_get());
+}
+
static inline unsigned long arm_mem_barebox_image(unsigned long membase,
unsigned long endmem,
unsigned long size)
{
- endmem = arm_mem_ramoops(membase, endmem);
+ endmem = arm_mem_ramoops(endmem);
if (IS_ENABLED(CONFIG_RELOCATABLE)) {
return ALIGN_DOWN(endmem - size, SZ_1M);
@@ -135,10 +137,6 @@ static inline unsigned long arm_mem_barebox_image(unsigned long membase,
}
}
-#ifndef CONFIG_CPU_64
-#define __ARM_SETUP_STACK(name, stack_top) if (stack_top) arm_setup_stack(stack_top)
-#endif
-
/*
* Unlike ENTRY_FUNCTION, this can be used to setup stack for a C entry
* point on both ARM32 and ARM64. ENTRY_FUNCTION on ARM64 can only be used
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index b63776a74a..261c30129a 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -18,8 +18,6 @@ static inline void icache_invalidate(void)
#endif
}
-int arm_set_cache_functions(void);
-
void arm_early_mmu_cache_flush(void);
void arm_early_mmu_cache_invalidate(void);
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index fd8e93f7a3..ebf1e096c6 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -23,7 +23,7 @@ static inline void setup_dma_coherent(unsigned long offset)
#ifdef CONFIG_MMU
#define ARCH_HAS_REMAP
#define MAP_ARCH_DEFAULT MAP_CACHED
-int arch_remap_range(void *_start, size_t size, unsigned flags);
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags);
void *map_io_sections(unsigned long physaddr, void *start, size_t size);
#else
#define MAP_ARCH_DEFAULT MAP_UNCACHED
@@ -50,14 +50,21 @@ struct outer_cache_fns {
void (*disable)(void);
};
+#ifdef __PBL__
+/* Existing platforms with non-architected outer cache initialize it
+ * outside PBL and new ones will likely only have architected caches,
+ * so we provide a dummy here
+ */
+static __maybe_unused struct outer_cache_fns outer_cache;
+#else
extern struct outer_cache_fns outer_cache;
+#endif
void __dma_clean_range(unsigned long, unsigned long);
void __dma_flush_range(unsigned long, unsigned long);
void __dma_inv_range(unsigned long, unsigned long);
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb);
+void mmu_early_enable(unsigned long membase, unsigned long memsize);
void mmu_early_disable(void);
#endif /* __ASM_MMU_H */
diff --git a/arch/arm/include/asm/mmuinfo.h b/arch/arm/include/asm/mmuinfo.h
new file mode 100644
index 0000000000..3005c388b9
--- /dev/null
+++ b/arch/arm/include/asm/mmuinfo.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ARM_ASM_MMUINFO_H__
+#define __ARM_ASM_MMUINFO_H__
+
+int mmuinfo_v7(void *addr);
+int mmuinfo_v8(void *addr);
+
+#endif
diff --git a/arch/arm/include/asm/sysreg.h b/arch/arm/include/asm/sysreg.h
new file mode 100644
index 0000000000..7d567e08d8
--- /dev/null
+++ b/arch/arm/include/asm/sysreg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#include <asm/system.h>
+#include <linux/stringify.h>
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+/*
+ * The "Z" constraint normally means a zero immediate, but when combined with
+ * the "%x0" template means XZR.
+ */
+#define write_sysreg(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile("msr " __stringify(r) ", %x0" \
+ : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * For registers without architectural names, or simply unsupported by
+ * GAS.
+ */
+#define read_sysreg_s(r) ({ \
+ u64 __val; \
+ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
+ __val; \
+})
+
+#define write_sysreg_s(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg(__scs_new, sysreg); \
+} while (0)
+
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
+#define read_sysreg_par() ({ \
+ u64 par; \
+ asm("dmb sy"); \
+ par = read_sysreg(par_el1); \
+ asm("dmb sy"); \
+ par; \
+})
+
+#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm/mach-imx/atf.c b/arch/arm/mach-imx/atf.c
index 92820d9392..38bab9c051 100644
--- a/arch/arm/mach-imx/atf.c
+++ b/arch/arm/mach-imx/atf.c
@@ -134,14 +134,13 @@ void imx8mm_load_bl33(void *bl33)
__noreturn void imx8mm_load_and_start_image_via_tfa(void)
{
void *bl33 = (void *)MX8M_ATF_BL33_BASE_ADDR;
- void *endmem = (void *)MX8M_DDR_CSD1_BASE_ADDR +
- imx8m_barebox_earlymem_size(32);
+ unsigned long endmem = MX8M_DDR_CSD1_BASE_ADDR + imx8m_barebox_earlymem_size(32);
- imx8m_save_bootrom_log(__arm_mem_scratch(endmem));
+ imx8m_save_bootrom_log((void *)arm_mem_scratch(endmem));
imx8mm_load_bl33(bl33);
if (IS_ENABLED(CONFIG_FIRMWARE_IMX8MM_OPTEE))
- imx8m_load_and_start_optee_via_tfa(imx8mm, endmem - OPTEE_SIZE, bl33);
+ imx8m_load_and_start_optee_via_tfa(imx8mm, (void *)arm_mem_optee(endmem), bl33);
else
imx8mm_load_and_start_tfa(imx8mm_bl31_bin);
}
@@ -182,14 +181,13 @@ void imx8mp_load_bl33(void *bl33)
__noreturn void imx8mp_load_and_start_image_via_tfa(void)
{
void *bl33 = (void *)MX8M_ATF_BL33_BASE_ADDR;
- void *endmem = (void *)MX8M_DDR_CSD1_BASE_ADDR +
- imx8m_barebox_earlymem_size(32);
+ unsigned long endmem = MX8M_DDR_CSD1_BASE_ADDR + imx8m_barebox_earlymem_size(32);
- imx8m_save_bootrom_log(__arm_mem_scratch(endmem));
+ imx8m_save_bootrom_log((void *)arm_mem_scratch(endmem));
imx8mp_load_bl33(bl33);
if (IS_ENABLED(CONFIG_FIRMWARE_IMX8MP_OPTEE))
- imx8m_load_and_start_optee_via_tfa(imx8mp, endmem - OPTEE_SIZE, bl33);
+ imx8m_load_and_start_optee_via_tfa(imx8mp, (void *)arm_mem_optee(endmem), bl33);
else
imx8mp_load_and_start_tfa(imx8mp_bl31_bin);
}
@@ -231,14 +229,13 @@ void imx8mn_load_bl33(void *bl33)
__noreturn void imx8mn_load_and_start_image_via_tfa(void)
{
void *bl33 = (void *)MX8M_ATF_BL33_BASE_ADDR;
- void *endmem = (void *)MX8M_DDR_CSD1_BASE_ADDR +
- imx8m_barebox_earlymem_size(16);
+ unsigned long endmem = MX8M_DDR_CSD1_BASE_ADDR + imx8m_barebox_earlymem_size(16);
- imx8m_save_bootrom_log(__arm_mem_scratch(endmem));
+ imx8m_save_bootrom_log((void *)arm_mem_scratch(endmem));
imx8mn_load_bl33(bl33);
if (IS_ENABLED(CONFIG_FIRMWARE_IMX8MN_OPTEE))
- imx8m_load_and_start_optee_via_tfa(imx8mn, endmem - OPTEE_SIZE, bl33);
+ imx8m_load_and_start_optee_via_tfa(imx8mn, (void *)arm_mem_optee(endmem), bl33);
else
imx8mn_load_and_start_tfa(imx8mn_bl31_bin);
}
diff --git a/arch/arm/mach-imx/xload-common.c b/arch/arm/mach-imx/xload-common.c
index 0d3e6be1b1..03eb2ef109 100644
--- a/arch/arm/mach-imx/xload-common.c
+++ b/arch/arm/mach-imx/xload-common.c
@@ -26,7 +26,7 @@ struct imx_scratch_space *__imx8m_scratch_space(int ddr_buswidth)
ulong endmem = MX8M_DDR_CSD1_BASE_ADDR +
imx8m_barebox_earlymem_size(ddr_buswidth);
- return (void *)__arm_mem_scratch(endmem);
+ return (void *)arm_mem_scratch(endmem);
}
#define HDR_SIZE 512
diff --git a/arch/powerpc/cpu-85xx/mmu.c b/arch/powerpc/cpu-85xx/mmu.c
index 6b93c3e8db..b484acbf80 100644
--- a/arch/powerpc/cpu-85xx/mmu.c
+++ b/arch/powerpc/cpu-85xx/mmu.c
@@ -17,13 +17,16 @@
#include <mmu.h>
#include <mach/mmu.h>
-int arch_remap_range(void *_start, size_t size, unsigned flags)
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
{
uint32_t ptr, start, tsize, valid, wimge, pte_flags;
unsigned long epn;
phys_addr_t rpn = 0;
int esel = 0;
+ if (phys_addr != virt_to_phys(virt_addr))
+ return -ENOSYS;
+
switch (flags) {
case MAP_UNCACHED:
pte_flags = MAS2_I;
@@ -35,7 +38,7 @@ int arch_remap_range(void *_start, size_t size, unsigned flags)
return -EINVAL;
}
- ptr = start = (uint32_t)_start;
+ ptr = start = (uint32_t)virt_addr;
wimge = pte_flags | MAS2_M;
while (ptr < (start + size)) {
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 81a5d7d55f..10b15a47b9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -563,7 +563,7 @@ extern int write_bat(ppc_bat_t bat, unsigned long upper, unsigned long lower);
#ifdef CONFIG_MMU
#define ARCH_HAS_REMAP
-int arch_remap_range(void *_start, size_t size, unsigned flags);
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags);
#endif
#endif
diff --git a/commands/Kconfig b/commands/Kconfig
index 76dfca2dfd..35d0079f78 100644
--- a/commands/Kconfig
+++ b/commands/Kconfig
@@ -201,12 +201,13 @@ config CMD_MEMINFO
config CMD_ARM_MMUINFO
bool "mmuinfo command"
- depends on CPU_V7
+ depends on CPU_V7 || CPU_V8
+ select MMUINFO
help
Say yes here to get a mmuinfo command to show some
- MMU and cache information using the cp15 registers.
+ MMU and cache information using the cp15/model-specific registers.
- Example:
+ Example for ARMv7:
PAR result for 0x00110000:
privileged read: 0x00110090
@@ -1707,6 +1708,7 @@ config CMD_MEMSET
config CMD_MEMTEST
tristate
+ select MEMTEST
prompt "memtest"
help
The memtest command can test the registered barebox memory.
diff --git a/commands/memtest.c b/commands/memtest.c
index 864947fa94..9fa148b3aa 100644
--- a/commands/memtest.c
+++ b/commands/memtest.c
@@ -15,6 +15,7 @@
static int do_test_one_area(struct mem_test_resource *r, int bus_only,
unsigned cache_flag)
{
+ unsigned flags = MEMTEST_VERBOSE;
int ret;
printf("Testing memory space: %pa -> %pa:\n",
@@ -22,14 +23,14 @@ static int do_test_one_area(struct mem_test_resource *r, int bus_only,
remap_range((void *)r->r->start, resource_size(r->r), cache_flag);
- ret = mem_test_bus_integrity(r->r->start, r->r->end);
+ ret = mem_test_bus_integrity(r->r->start, r->r->end, flags);
if (ret < 0)
return ret;
if (bus_only)
return 0;
- ret = mem_test_moving_inversions(r->r->start, r->r->end);
+ ret = mem_test_moving_inversions(r->r->start, r->r->end, flags);
if (ret < 0)
return ret;
printf("done.\n\n");
diff --git a/common/Kconfig b/common/Kconfig
index 2483a1089d..bac9993641 100644
--- a/common/Kconfig
+++ b/common/Kconfig
@@ -144,6 +144,9 @@ config MEMINFO
bool "display memory info"
default y
+config MEMTEST
+ bool
+
config ENVIRONMENT_VARIABLES
bool "environment variables support"
@@ -185,14 +188,8 @@ config MMU
to enable the data cache which depends on the MMU. See Documentation/mmu.txt
for further information.
-config MMU_EARLY
- bool "Enable MMU early"
- depends on ARM
- depends on MMU
- default y
- help
- This enables the MMU during early startup. This speeds up things during startup
- of barebox, but may lead to harder to debug code. If unsure say yes here.
+config MMUINFO
+ bool
config HAVE_CONFIGURABLE_TEXT_BASE
bool
diff --git a/common/Makefile b/common/Makefile
index 8dc475f324..7fb864f614 100644
--- a/common/Makefile
+++ b/common/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_BLOCK) += block.o
obj-$(CONFIG_BLSPEC) += blspec.o
obj-$(CONFIG_BOOTM) += bootm.o booti.o
obj-$(CONFIG_CMD_LOADS) += s_record.o
-obj-$(CONFIG_CMD_MEMTEST) += memtest.o
+obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_COMMAND_SUPPORT) += command.o
obj-$(CONFIG_CONSOLE_FULL) += console.o
obj-$(CONFIG_CONSOLE_SIMPLE) += console_simple.o
diff --git a/common/boards/qemu-virt/Makefile b/common/boards/qemu-virt/Makefile
index 8cacfafee7..c167277515 100644
--- a/common/boards/qemu-virt/Makefile
+++ b/common/boards/qemu-virt/Makefile
@@ -5,6 +5,9 @@ obj-y += overlay-of-flash.dtb.o
ifeq ($(CONFIG_RISCV),y)
DTC_CPP_FLAGS_overlay-of-flash.dtb := -DRISCV_VIRT=1
endif
+ifeq ($(CONFIG_ARM),y)
+DTC_CPP_FLAGS_overlay-of-flash.dtb := -DARM_VIRT=1
+endif
clean-files := *.dtb *.dtb.S .*.dtc .*.pre .*.dts *.dtb.z
clean-files += *.dtbo *.dtbo.S .*.dtso
diff --git a/common/boards/qemu-virt/overlay-of-flash.dts b/common/boards/qemu-virt/overlay-of-flash.dts
index 15c8cc450d..16b1c7923d 100644
--- a/common/boards/qemu-virt/overlay-of-flash.dts
+++ b/common/boards/qemu-virt/overlay-of-flash.dts
@@ -6,12 +6,15 @@
#ifdef RISCV_VIRT
#define PARTS_TARGET_PATH /flash@20000000
#define ENV_DEVICE_PATH "/flash@20000000/partitions/partition@3c00000"
-#else
+#elif defined ARM_VIRT
#define PARTS_TARGET_PATH /flash@0
#define ENV_DEVICE_PATH "/flash@0/partitions/partition@3c00000"
#endif
&{PARTS_TARGET_PATH} {
+#ifdef ARM_VIRT
+ virtual-reg = <0x1000>;
+#endif
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
diff --git a/common/memtest.c b/common/memtest.c
index d47e4a672e..aa16d94eed 100644
--- a/common/memtest.c
+++ b/common/memtest.c
@@ -160,7 +160,7 @@ static void mem_test_report_failure(const char *failure_description,
}
int mem_test_bus_integrity(resource_size_t _start,
- resource_size_t _end)
+ resource_size_t _end, unsigned int flags)
{
static const uint64_t bitpattern[] = {
0x0000000000000001ULL, /* single bit */
@@ -190,7 +190,8 @@ int mem_test_bus_integrity(resource_size_t _start,
dummy = start + 1;
num_words = (_end - _start + 1)/sizeof(resource_size_t);
- printf("Starting data line test.\n");
+ if (flags & MEMTEST_VERBOSE)
+ printf("Starting data line test.\n");
/*
* Data line test: write a pattern to the first
@@ -294,7 +295,8 @@ int mem_test_bus_integrity(resource_size_t _start,
*/
start[0] = anti_pattern;
- printf("Check for address bits stuck high.\n");
+ if (flags & MEMTEST_VERBOSE)
+ printf("Check for address bits stuck high.\n");
/*
* Check for address bits stuck high.
@@ -313,8 +315,8 @@ int mem_test_bus_integrity(resource_size_t _start,
*/
start[0] = pattern;
- printf("Check for address bits stuck "
- "low or shorted.\n");
+ if (flags & MEMTEST_VERBOSE)
+ printf("Check for address bits stuck low or shorted.\n");
/*
* Check for address bits stuck low or shorted.
@@ -340,7 +342,7 @@ int mem_test_bus_integrity(resource_size_t _start,
return 0;
}
-static int update_progress(resource_size_t offset)
+static int update_progress(resource_size_t offset, unsigned flags)
{
/* Only check every 4k to reduce overhead */
if (offset & (SZ_4K - 1))
@@ -349,12 +351,14 @@ static int update_progress(resource_size_t offset)
if (ctrlc())
return -EINTR;
- show_progress(offset);
+ if (flags & MEMTEST_VERBOSE)
+ show_progress(offset);
return 0;
}
-int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
+int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end,
+ unsigned flags)
{
volatile resource_size_t *start, num_words, offset, temp, anti_pattern;
int ret;
@@ -368,8 +372,12 @@ int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
start = (resource_size_t *)_start;
num_words = (_end - _start + 1)/sizeof(resource_size_t);
- printf("Starting moving inversions test of RAM:\n"
- "Fill with address, compare, fill with inverted address, compare again\n");
+ if (flags & MEMTEST_VERBOSE) {
+ printf("Starting moving inversions test of RAM:\n"
+ "Fill with address, compare, fill with inverted address, compare again\n");
+
+ init_progression_bar(3 * num_words);
+ }
/*
* Description: Test the integrity of a physical
@@ -382,11 +390,9 @@ int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
* selected by the caller.
*/
- init_progression_bar(3 * num_words);
-
/* Fill memory with a known pattern */
for (offset = 0; offset < num_words; offset++) {
- ret = update_progress(offset);
+ ret = update_progress(offset, flags);
if (ret)
return ret;
start[offset] = offset + 1;
@@ -394,7 +400,7 @@ int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
/* Check each location and invert it for the second pass */
for (offset = 0; offset < num_words; offset++) {
- ret = update_progress(num_words + offset);
+ ret = update_progress(num_words + offset, flags);
if (ret)
return ret;
@@ -413,7 +419,7 @@ int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
/* Check each location for the inverted pattern and zero it */
for (offset = 0; offset < num_words; offset++) {
- ret = update_progress(2 * num_words + offset);
+ ret = update_progress(2 * num_words + offset, flags);
if (ret)
return ret;
@@ -430,10 +436,12 @@ int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end)
start[offset] = 0;
}
- show_progress(3 * num_words);
+ if (flags & MEMTEST_VERBOSE) {
+ show_progress(3 * num_words);
- /* end of progressbar */
- printf("\n");
+ /* end of progressbar */
+ printf("\n");
+ }
return 0;
}
diff --git a/drivers/hab/habv4.c b/drivers/hab/habv4.c
index ca26773bf8..b10c92ec76 100644
--- a/drivers/hab/habv4.c
+++ b/drivers/hab/habv4.c
@@ -11,6 +11,9 @@
#include <hab.h>
#include <init.h>
#include <types.h>
+#include <mmu.h>
+#include <zero_page.h>
+#include <linux/sizes.h>
#include <linux/arm-smccc.h>
#include <asm/cache.h>
@@ -616,12 +619,17 @@ static int init_imx6_hab_get_status(void)
/* can happen in multi-image builds and is not an error */
return 0;
+ remap_range(0x0, SZ_1M, MAP_CACHED);
+
/*
* Nobody will check the return value if there were HAB errors, but the
* initcall will fail spectaculously with a strange error message.
*/
imx6_hab_get_status();
+ zero_page_faulting();
+ arch_remap_range((void *)PAGE_SIZE, SZ_1M - PAGE_SIZE, MAP_UNCACHED);
+
return 0;
}
@@ -630,7 +638,7 @@ static int init_imx6_hab_get_status(void)
* which will no longer be accessible when the MMU sets the zero page to
* faulting.
*/
-postconsole_initcall(init_imx6_hab_get_status);
+postmmu_initcall(init_imx6_hab_get_status);
int imx28_hab_get_status(void)
{
diff --git a/drivers/mtd/nor/cfi_flash.c b/drivers/mtd/nor/cfi_flash.c
index 8b5302d7a7..f1555a72a4 100644
--- a/drivers/mtd/nor/cfi_flash.c
+++ b/drivers/mtd/nor/cfi_flash.c
@@ -965,7 +965,10 @@ static int cfi_probe_one(struct flash_info *info, int num)
return PTR_ERR(iores);
info->base = IOMEM(iores->start);
- /* TODO: either remap memory region or disable NULL pointer page */
+ /*
+ * Platforms hitting this should remap memory region, e.g. via virtual-reg
+ * device tree property or disable MMU.
+ */
if (IS_ENABLED(CONFIG_MMU) && iores->start == 0)
return -EPERM;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 23b8fa7934..ab73762932 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -12,6 +12,7 @@
#include <of.h>
#include <of_address.h>
#include <linux/amba/bus.h>
+#include <mmu.h>
/**
* of_find_device_by_node - Find the platform_device associated with a node
@@ -145,6 +146,7 @@ struct device *of_platform_device_create(struct device_node *np,
struct resource *res = NULL, temp_res;
resource_size_t resinval;
int i, ret, num_reg = 0;
+ u32 virt;
if (!of_device_is_available(np))
return NULL;
@@ -186,6 +188,24 @@ struct device *of_platform_device_create(struct device_node *np,
of_dma_configure(dev, np);
+ if (num_reg && !of_property_read_u32(np, "virtual-reg", &virt)) {
+ resource_size_t remap_offset = virt - res[0].start;
+
+ for (i = 0; i < num_reg; i++) {
+ void *new_virt = (void *)res[i].start + remap_offset;
+ resource_size_t size = resource_size(&res[i]);
+
+ ret = arch_remap_range(new_virt, res[i].start, size, MAP_UNCACHED);
+ if (!ret) {
+ debug("%s: remap device %s resource %d: %pa -> 0x%p\n",
+ __func__, dev_name(dev), i, &res[i].start, new_virt);
+
+ res[i].start = (resource_size_t)new_virt;
+ res[i].end = res[i].start + size - 1;
+ }
+ }
+ }
+
resinval = (-1);
debug("%s: register device %s, io=%pa\n",
diff --git a/include/mach/rockchip/bootrom.h b/include/mach/rockchip/bootrom.h
index 96eb147ae4..5b999fc606 100644
--- a/include/mach/rockchip/bootrom.h
+++ b/include/mach/rockchip/bootrom.h
@@ -15,7 +15,7 @@ static inline void rockchip_store_bootrom_iram(ulong membase,
ulong memsize,
const void *iram)
{
- void *dst = (void *)__arm_mem_scratch(membase + memsize);
+ void *dst = (void *)arm_mem_scratch(membase + memsize);
memcpy(dst, iram, sizeof(struct rockchip_scratch_space));
}
diff --git a/include/memtest.h b/include/memtest.h
index df0a391cc3..3de30631ae 100644
--- a/include/memtest.h
+++ b/include/memtest.h
@@ -3,6 +3,7 @@
#define __MEMTEST_H
#include <linux/ioport.h>
+#include <linux/bitops.h>
struct mem_test_resource {
struct resource *r;
@@ -13,7 +14,9 @@ int mem_test_request_regions(struct list_head *list);
void mem_test_release_regions(struct list_head *list);
struct mem_test_resource *mem_test_biggest_region(struct list_head *list);
-int mem_test_bus_integrity(resource_size_t _start, resource_size_t _end);
-int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end);
+#define MEMTEST_VERBOSE BIT(0)
+
+int mem_test_bus_integrity(resource_size_t _start, resource_size_t _end, unsigned flags);
+int mem_test_moving_inversions(resource_size_t _start, resource_size_t _end, unsigned flags);
#endif /* __MEMTEST_H */
diff --git a/include/mmu.h b/include/mmu.h
index 2e23853df3..84ec6c5efb 100644
--- a/include/mmu.h
+++ b/include/mmu.h
@@ -2,8 +2,12 @@
#ifndef __MMU_H
#define __MMU_H
+#include <linux/types.h>
+#include <errno.h>
+
#define MAP_UNCACHED 0
#define MAP_CACHED 1
+#define MAP_FAULT 2
/*
* Depending on the architecture the default mapping can be
@@ -15,9 +19,10 @@
#include <asm/mmu.h>
#ifndef ARCH_HAS_REMAP
-static inline int arch_remap_range(void *start, size_t size, unsigned flags)
+static inline int arch_remap_range(void *virt_addr, phys_addr_t phys_addr,
+ size_t size, unsigned flags)
{
- if (flags == MAP_ARCH_DEFAULT)
+ if (flags == MAP_ARCH_DEFAULT && phys_addr == virt_to_phys(virt_addr))
return 0;
return -EINVAL;
@@ -36,7 +41,16 @@ static inline bool arch_can_remap(void)
static inline int remap_range(void *start, size_t size, unsigned flags)
{
- return arch_remap_range(start, size, flags);
+ return arch_remap_range(start, virt_to_phys(start), size, flags);
+}
+
+#ifdef CONFIG_MMUINFO
+int mmuinfo(void *addr);
+#else
+static inline int mmuinfo(void *addr)
+{
+ return -ENOSYS;
}
+#endif
#endif
diff --git a/include/zero_page.h b/include/zero_page.h
index a71c0e0b87..79e0f22c7b 100644
--- a/include/zero_page.h
+++ b/include/zero_page.h
@@ -20,6 +20,13 @@ void zero_page_faulting(void);
*/
void zero_page_access(void);
+void zero_page_access(void);
+
+static inline bool zero_page_remappable(void)
+{
+ return true;
+}
+
#else
static inline void zero_page_faulting(void)
@@ -30,6 +37,11 @@ static inline void zero_page_access(void)
{
}
+static inline bool zero_page_remappable(void)
+{
+ return false;
+}
+
#endif
static inline bool zero_page_contains(unsigned long addr)
diff --git a/test/self/Kconfig b/test/self/Kconfig
index ce5048c70e..c130209748 100644
--- a/test/self/Kconfig
+++ b/test/self/Kconfig
@@ -36,6 +36,7 @@ config SELFTEST_ENABLE_ALL
imply SELFTEST_FS_RAMFS
imply SELFTEST_TFTP
imply SELFTEST_JSON
+ imply SELFTEST_MMU
help
Selects all self-tests compatible with current configuration
@@ -69,4 +70,9 @@ config SELFTEST_JSON
bool "JSON selftest"
depends on JSMN
+config SELFTEST_MMU
+ bool "MMU remapping selftest"
+ select MEMTEST
+ depends on MMU
+
endif
diff --git a/test/self/Makefile b/test/self/Makefile
index 98ebd1fd66..8c816c4299 100644
--- a/test/self/Makefile
+++ b/test/self/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_SELFTEST_OF_MANIPULATION) += of_manipulation.o of_manipulation.dtb.
obj-$(CONFIG_SELFTEST_ENVIRONMENT_VARIABLES) += envvar.o
obj-$(CONFIG_SELFTEST_FS_RAMFS) += ramfs.o
obj-$(CONFIG_SELFTEST_JSON) += json.o
+obj-$(CONFIG_SELFTEST_MMU) += mmu.o
clean-files := *.dtb *.dtb.S .*.dtc .*.pre .*.dts *.dtb.z
clean-files += *.dtbo *.dtbo.S .*.dtso
diff --git a/test/self/mmu.c b/test/self/mmu.c
new file mode 100644
index 0000000000..ee6c1cd45e
--- /dev/null
+++ b/test/self/mmu.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-onl
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <common.h>
+#include <bselftest.h>
+#include <mmu.h>
+#include <memtest.h>
+#include <abort.h>
+#include <zero_page.h>
+#include <linux/sizes.h>
+
+BSELFTEST_GLOBALS();
+
+#define __expect(ret, cond, fmt, ...) do { \
+ bool __cond = (cond); \
+ int __ret = (ret); \
+ total_tests++; \
+ \
+ if (!__cond) { \
+ failed_tests++; \
+ printf("%s:%d error %pe: " fmt "\n", \
+ __func__, __LINE__, ERR_PTR(__ret), ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define expect_success(ret, ...) __expect((ret), ((ret) >= 0), __VA_ARGS__)
+
+static void memtest(void __iomem *start, size_t size, const char *desc)
+{
+ int ret;
+
+ ret = mem_test_bus_integrity((resource_size_t)start,
+ (resource_size_t)start + size - 1, 0);
+ expect_success(ret, "%s bus test", desc);
+
+ ret = mem_test_moving_inversions((resource_size_t)start,
+ (resource_size_t)start + size - 1, 0);
+ expect_success(ret, "%s moving inverstions test", desc);
+}
+
+static inline int __check_mirroring(void __iomem *a, void __iomem *b, bool is_mirror,
+ const char *func, int line)
+{
+ if ((readl(a) == readl(b)) == (is_mirror))
+ return 0;
+
+ printf("%s:%d: mirroring unexpectedly %s: (*%p = 0x%x) %s (*%p = 0x%x)\n", func, line,
+ is_mirror ? "failed" : "succeeded",
+ a, readl(a), is_mirror ? "!=" : "==", b, readl(b));
+
+ mmuinfo(a);
+ mmuinfo(b);
+
+ return -EILSEQ;
+}
+
+#define check_mirroring(a, b, is_mirror) \
+ __check_mirroring((a), (b), (is_mirror), __func__, __LINE__)
+
+static void test_remap(void)
+{
+ u8 __iomem *buffer = NULL, *mirror = NULL;
+ phys_addr_t buffer_phys;
+ int i, ret;
+
+ buffer = memalign(SZ_2M, SZ_8M);
+ if (WARN_ON(!buffer))
+ goto out;
+
+ buffer_phys = virt_to_phys(buffer);
+
+ mirror = memalign(SZ_2M, SZ_8M);
+ if (WARN_ON(!mirror))
+ goto out;
+
+ pr_debug("allocated buffer = 0x%p, mirror = 0x%p\n", buffer, mirror);
+
+ memtest(buffer, SZ_8M, "cached buffer");
+ memtest(mirror, SZ_8M, "cached mirror");
+
+ if (__is_defined(ARCH_HAS_REMAP)) {
+ skipped_tests += 10;
+ goto out;
+ }
+
+ ret = remap_range(buffer, SZ_8M, MAP_UNCACHED);
+ memtest(buffer, SZ_8M, "uncached buffer");
+
+ ret = remap_range(mirror, SZ_8M, MAP_UNCACHED);
+ memtest(mirror, SZ_8M, "uncached mirror");
+
+ for (i = 0; i < SZ_8M; i += sizeof(u32)) {
+ int m = i, b = i;
+ writel(0xDEADBEEF, &mirror[m]);
+ writel(i, &buffer[b]);
+ ret = check_mirroring(&mirror[m], &buffer[b], false);
+ if (ret)
+ break;
+ }
+
+ expect_success(ret, "asserting no mirror before remap");
+
+ ret = arch_remap_range(mirror, buffer_phys, SZ_8M, MAP_UNCACHED);
+ expect_success(ret, "remapping with mirroring");
+
+ for (i = 0; i < SZ_8M; i += sizeof(u32)) {
+ int m = i, b = i;
+ writel(0xDEADBEEF, &mirror[m]);
+ writel(i, &buffer[b]);
+ ret = check_mirroring(&mirror[m], &buffer[b], true);
+ if (ret)
+ break;
+ }
+
+ expect_success(ret, "asserting mirroring after remap");
+
+ ret = arch_remap_range(mirror, buffer_phys + SZ_4K, SZ_4M, MAP_UNCACHED);
+ expect_success(ret, "remapping with mirroring (phys += 4K)");
+
+ for (i = 0; i < SZ_4M; i += sizeof(u32)) {
+ int m = i, b = i + SZ_4K;
+ writel(0xDEADBEEF, &mirror[m]);
+ writel(i, &buffer[b]);
+ ret = check_mirroring(&mirror[m], &buffer[b], true);
+ if (ret)
+ break;
+ }
+
+ expect_success(ret, "asserting mirroring after remap (phys += 4K)");
+
+ ret = arch_remap_range(mirror + SZ_4K, buffer_phys, SZ_4M, MAP_UNCACHED);
+ expect_success(ret, "remapping with mirroring (virt += 4K)");
+
+ for (i = 0; i < SZ_4M; i += sizeof(u32)) {
+ int m = i + SZ_4K, b = i;
+ writel(0xDEADBEEF, &mirror[m]);
+ writel(i, &buffer[b]);
+ ret = check_mirroring(&mirror[m], &buffer[b], true);
+ if (ret)
+ break;
+ }
+
+ expect_success(ret, "asserting mirroring after remap (virt += 4K)");
+ return;
+
+ ret = remap_range(buffer, SZ_8M, MAP_DEFAULT);
+ expect_success(ret, "remapping buffer with default attrs");
+ memtest(buffer, SZ_8M, "newly cached buffer");
+
+ ret = remap_range(mirror, SZ_8M, MAP_DEFAULT);
+ expect_success(ret, "remapping mirror with default attrs");
+ memtest(mirror, SZ_8M, "newly cached mirror");
+
+ for (i = 0; i < SZ_8M; i += sizeof(u32)) {
+ int m = i, b = i;
+ writel(0xDEADBEEF, &mirror[m]);
+ writel(i, &buffer[b]);
+ ret = check_mirroring(&mirror[m], &buffer[b], false);
+ if (ret)
+ break;
+ }
+
+ expect_success(ret, "asserting no mirror after remap restore");
+out:
+ free(buffer);
+ free(mirror);
+}
+
+static void test_zero_page(void)
+{
+ void __iomem *null = NULL;
+
+ total_tests += 3;
+
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DATA_ABORT_MASK)) {
+ pr_info("skipping %s because %s=n\n",
+ "CONFIG_ARCH_HAS_DATA_ABORT_MASK", __func__);
+ skipped_tests += 3;
+ return;
+ }
+
+ OPTIMIZER_HIDE_VAR(null);
+
+ /* Check if *NULL traps and data_abort_mask works */
+
+ data_abort_mask();
+
+ (void)readl(null);
+
+ if (!data_abort_unmask()) {
+ printf("%s: NULL pointer access did not trap\n", __func__);
+ failed_tests++;
+ }
+
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_ZERO_PAGE)) {
+ pr_info("skipping %s because %s=n\n",
+ "CONFIG_ARCH_HAS_ZERO_PAGE", __func__);
+ skipped_tests += 2;
+ return;
+ }
+
+ /* Check if zero_page_access() works */
+
+ data_abort_mask();
+
+ zero_page_access();
+ (void)readl(null);
+ zero_page_faulting();
+
+ if (data_abort_unmask()) {
+ printf("%s: unexpected fault on zero page access\n", __func__);
+ failed_tests++;
+ }
+
+ /* Check if zero_page_faulting() works */
+
+ data_abort_mask();
+
+ (void)readl(null);
+
+ if (!data_abort_unmask()) {
+ printf("%s NULL pointer access did not trap\n", __func__);
+ failed_tests++;
+ }
+}
+
+static void test_mmu(void)
+{
+ test_zero_page();
+ test_remap();
+}
+bselftest(core, test_mmu);