summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/cpu/Kconfig1
-rw-r--r--arch/arm/cpu/mmu_64.c13
-rw-r--r--include/zero_page.h54
-rw-r--r--lib/Kconfig3
4 files changed, 70 insertions, 1 deletions
diff --git a/arch/arm/cpu/Kconfig b/arch/arm/cpu/Kconfig
index f9f52a6252..ca3bd98962 100644
--- a/arch/arm/cpu/Kconfig
+++ b/arch/arm/cpu/Kconfig
@@ -89,6 +89,7 @@ config CPU_V8
select ARM_EXCEPTIONS
select GENERIC_FIND_NEXT_BIT
select ARCH_HAS_STACK_DUMP
+ select ARCH_HAS_ZERO_PAGE
config CPU_XSC3
bool
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 7e9ae84810..06049e0003 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -10,6 +10,7 @@
#include <init.h>
#include <mmu.h>
#include <errno.h>
+#include <zero_page.h>
#include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/pgtable64.h>
@@ -168,6 +169,16 @@ static void mmu_enable(void)
set_cr(get_cr() | CR_M | CR_C | CR_I);
}
+void zero_page_access(void)
+{
+ create_sections(0x0, 0x0, PAGE_SIZE, CACHED_MEM);
+}
+
+void zero_page_faulting(void)
+{
+ create_sections(0x0, 0x0, PAGE_SIZE, 0x0);
+}
+
/*
* Prepare MMU for usage enable it.
*/
@@ -194,7 +205,7 @@ void __mmu_init(bool mmu_on)
create_sections(bank->start, bank->start, bank->size, CACHED_MEM);
/* Make zero page faulting to catch NULL pointer derefs */
- create_sections(0x0, 0x0, 0x1000, 0x0);
+ zero_page_faulting();
mmu_enable();
}
diff --git a/include/zero_page.h b/include/zero_page.h
new file mode 100644
index 0000000000..ad6861f240
--- /dev/null
+++ b/include/zero_page.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ZERO_PAGE_H
+#define __ZERO_PAGE_H
+
+#include <common.h>
+
+#if defined CONFIG_ARCH_HAS_ZERO_PAGE
+
+/*
+ * zero_page_faulting - fault when accessing the zero page
+ */
+void zero_page_faulting(void);
+
+/*
+ * zero_page_access - allow accesses to the zero page
+ *
+ * Disable the null pointer trap on the zero page if access to the zero page
+ * is actually required. Disable the trap with care and re-enable it
+ * immediately after the access to properly trap null pointers.
+ */
+void zero_page_access(void);
+
+#else
+
+static inline void zero_page_faulting(void)
+{
+}
+
+static inline void zero_page_access(void)
+{
+}
+
+#endif
+
+static inline bool zero_page_contains(unsigned long addr)
+{
+ return addr < PAGE_SIZE;
+}
+
+/*
+ * zero_page_memcpy - copy to or from an address located in the zero page
+ */
+static inline void *zero_page_memcpy(void *dest, const void *src, size_t count)
+{
+ void *ret;
+
+ zero_page_access();
+ ret = memcpy(dest, src, count);
+ zero_page_faulting();
+
+ return ret;
+}
+
+#endif /* __ZERO_PAGE_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 887f50ff00..e5831ecdb9 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,9 @@ config ARCH_HAS_STACK_DUMP
config ARCH_HAS_DATA_ABORT_MASK
bool
+config ARCH_HAS_ZERO_PAGE
+ bool
+
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool