summaryrefslogtreecommitdiffstats
path: root/arch/arm/cpu
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2017-02-06 16:11:03 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2017-02-13 08:35:42 +0100
commitcc407b41135d4a427dfb09fb6e32b831e6c98a31 (patch)
tree0ba793b8a0c041819e4409f0c31b2d4f297c997a /arch/arm/cpu
parentcdf33e6ecf4ca724d8be64fefbc4896eb8baf046 (diff)
downloadbarebox-cc407b41135d4a427dfb09fb6e32b831e6c98a31.tar.gz
barebox-cc407b41135d4a427dfb09fb6e32b831e6c98a31.tar.xz
ARM: Add PSCI support
This patch contains the barebox implementation for the ARM "Power State Coordination Interface" (PSCI). The interface is aimed at the generalization of code in the following power management scenarios: * Core idle management. * Dynamic addition and removal of cores, and secondary core boot. * big.LITTLE migration. * System shutdown and reset. In practice, all that's currently implemented is a way to enable the secondary core one some SoCs. With PSCI the Kernel is either started in nonsecure or in Hypervisor mode and PSCI is used to apply power to the secondary cores. The start mode is passed in the global.bootm.secure_state variable. This enum can contain "secure" (Kernel is started in secure mode, means no PSCI), "nonsecure" (Kernel is started in nonsecure mode, PSCI available) or "hyp" (Kernel is started in hyp mode, meaning it can support virtualization). We currently only support putting the secure monitor code into SDRAM, which means we always steal some amount of memory from the Kernel. To keep things simple for now we simply keep the whole barebox binary in memory The PSCI support has been tested on i.MX7 only so far. The only supported operations are CPU_ON and CPU_OFF. The PSCI and secure monitor code is based on the corresponding U-Boot code. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/Makefile2
-rw-r--r--arch/arm/cpu/psci.c298
-rw-r--r--arch/arm/cpu/sm.c266
-rw-r--r--arch/arm/cpu/sm_as.S168
4 files changed, 734 insertions, 0 deletions
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index e542f1741a..13b4f9590d 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -34,7 +34,9 @@ ifeq ($(CONFIG_MMU),)
obj-y += no-mmu.o
endif
+obj-$(CONFIG_ARM_PSCI) += psci.o
obj-$(CONFIG_ARM_SECURE_MONITOR) += smccc-call.o
+obj-$(CONFIG_ARM_SECURE_MONITOR) += sm.o sm_as.o
obj-$(CONFIG_CPU_32v4T) += cache-armv4.o
pbl-$(CONFIG_CPU_32v4T) += cache-armv4.o
diff --git a/arch/arm/cpu/psci.c b/arch/arm/cpu/psci.c
new file mode 100644
index 0000000000..745b8495e5
--- /dev/null
+++ b/arch/arm/cpu/psci.c
@@ -0,0 +1,298 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <common.h>
+#include <asm/psci.h>
+#include <asm/arm-smccc.h>
+#include <asm/secure.h>
+#include <asm/system.h>
+#include <restart.h>
+#include <globalvar.h>
+#include <init.h>
+#include <magicvar.h>
+
+#ifdef CONFIG_ARM_PSCI_DEBUG
+static void (*__putc)(void *ctx, int c);
+static void *putc_ctx;
+
+void psci_set_putc(void (*putcf)(void *ctx, int c), void *ctx)
+{
+ __putc = putcf;
+ putc_ctx = ctx;
+}
+
+void psci_putc(char c)
+{
+ if (__putc)
+ __putc(putc_ctx, c);
+}
+
+int psci_puts(const char *str)
+{
+ int n = 0;
+
+ while (*str) {
+ if (*str == '\n')
+ psci_putc('\r');
+
+ psci_putc(*str);
+ str++;
+ n++;
+ }
+
+ return n;
+}
+
+int psci_printf(const char *fmt, ...)
+{
+ va_list args;
+ uint i;
+ char printbuffer[128];
+
+ va_start(args, fmt);
+ i = vsprintf(printbuffer, fmt, args);
+ va_end(args);
+
+ psci_puts(printbuffer);
+
+ return i;
+}
+#endif
+
+static struct psci_ops *psci_ops;
+
+void psci_set_ops(struct psci_ops *ops)
+{
+ psci_ops = ops;
+}
+
+static unsigned long psci_version(void)
+{
+ psci_printf("%s\n", __func__);
+ return ARM_PSCI_VER_1_0;
+}
+
+static unsigned long psci_cpu_suspend(u32 power_state, unsigned long entry,
+ u32 context_id)
+{
+ psci_printf("%s\n", __func__);
+
+ if (psci_ops->cpu_off)
+ return psci_ops->cpu_suspend(power_state, entry, context_id);
+
+ return ARM_PSCI_RET_NOT_SUPPORTED;
+}
+
+static unsigned long psci_cpu_off(void)
+{
+ psci_printf("%s\n", __func__);
+
+ if (psci_ops->cpu_off)
+ return psci_ops->cpu_off();
+
+ return ARM_PSCI_RET_NOT_SUPPORTED;
+}
+
+static unsigned long cpu_entry[ARM_SECURE_MAX_CPU];
+static unsigned long context[ARM_SECURE_MAX_CPU];
+
+static unsigned long psci_cpu_on(u32 cpu_id, unsigned long entry, u32 context_id)
+{
+ psci_printf("%s: %d 0x%08lx\n", __func__, cpu_id, entry);
+
+ if (cpu_id >= ARM_SECURE_MAX_CPU)
+ return ARM_PSCI_RET_INVAL;
+
+ cpu_entry[cpu_id] = entry;
+ context[cpu_id] = context_id;
+ dsb();
+
+ if (psci_ops->cpu_on)
+ return psci_ops->cpu_on(cpu_id);
+
+ return ARM_PSCI_RET_NOT_SUPPORTED;
+}
+
+static unsigned long psci_system_off(void)
+{
+ psci_printf("%s\n", __func__);
+
+ if (psci_ops->system_reset)
+ psci_ops->system_reset();
+
+ while(1);
+
+ return 0;
+}
+
+static unsigned long psci_system_reset(void)
+{
+ psci_printf("%s\n", __func__);
+
+ if (psci_ops->system_reset)
+ psci_ops->system_reset();
+
+ restart_machine();
+}
+
+void psci_entry(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 r5, u32 r6,
+ struct arm_smccc_res *res)
+{
+ int mmuon;
+ unsigned long ttb;
+
+ mmuon = get_cr() & CR_M;
+ asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r"(ttb));
+
+ psci_printf("%s entry, function: 0x%08x\n", __func__, r0);
+
+ switch (r0) {
+ case ARM_PSCI_0_2_FN_PSCI_VERSION:
+ res->a0 = psci_version();
+ break;
+ case ARM_PSCI_0_2_FN_CPU_SUSPEND:
+ res->a0 = psci_cpu_suspend(r1, r2, r3);
+ break;
+ case ARM_PSCI_0_2_FN_CPU_OFF:
+ res->a0 = psci_cpu_off();
+ break;
+ case ARM_PSCI_0_2_FN_CPU_ON:
+ res->a0 = psci_cpu_on(r1, r2, r3);
+ break;
+ case ARM_PSCI_0_2_FN_SYSTEM_OFF:
+ psci_system_off();
+ break;
+ case ARM_PSCI_0_2_FN_SYSTEM_RESET:
+ psci_system_reset();
+ break;
+ default:
+ res->a0 = ARM_PSCI_RET_NOT_SUPPORTED;
+ break;
+ }
+}
+
+static int of_psci_fixup(struct device_node *root, void *unused)
+{
+ struct device_node *psci;
+ int ret;
+
+ if (bootm_arm_security_state() < ARM_STATE_NONSECURE)
+ return 0;
+
+ psci = of_create_node(root, "/psci");
+ if (!psci)
+ return -EINVAL;
+
+ ret = of_set_property(psci, "compatible", "arm,psci-1.0",
+ strlen("arm,psci-1.0") + 1, 1);
+ if (ret)
+ return ret;
+
+ ret = of_set_property(psci, "method", "smc",
+ strlen("smc") + 1, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int psci_cpu_entry_c(void)
+{
+ void (*entry)(u32 context);
+ int cpu;
+ u32 context_id;
+
+ __armv7_secure_monitor_install();
+ cpu = psci_get_cpu_id();
+ entry = (void *)cpu_entry[cpu];
+ context_id = context[cpu];
+
+ if (bootm_arm_security_state() == ARM_STATE_HYP)
+ armv7_switch_to_hyp();
+
+ entry(context_id);
+
+ while (1);
+}
+
+static int armv7_psci_init(void)
+{
+ return of_register_fixup(of_psci_fixup, NULL);
+}
+device_initcall(armv7_psci_init);
+
+#ifdef DEBUG
+
+#include <command.h>
+#include <getopt.h>
+#include "mmu.h"
+
+void second_entry(void)
+{
+ struct arm_smccc_res res;
+
+ psci_printf("2nd CPU online, now turn off again\n");
+
+ arm_smccc_smc(ARM_PSCI_0_2_FN_CPU_OFF,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+
+ psci_printf("2nd CPU still alive?\n");
+
+ while (1);
+}
+
+static int do_smc(int argc, char *argv[])
+{
+ int opt;
+ struct arm_smccc_res res = {
+ .a0 = 0xdeadbee0,
+ .a1 = 0xdeadbee1,
+ .a2 = 0xdeadbee2,
+ .a3 = 0xdeadbee3,
+ };
+
+ while ((opt = getopt(argc, argv, "nicz")) > 0) {
+ switch (opt) {
+ case 'n':
+ armv7_secure_monitor_install();
+ break;
+ case 'i':
+ arm_smccc_smc(ARM_PSCI_0_2_FN_PSCI_VERSION,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ printf("found psci version %ld.%ld\n", res.a0 >> 16, res.a0 & 0xffff);
+ break;
+ case 'c':
+ arm_smccc_smc(ARM_PSCI_0_2_FN_CPU_ON,
+ 1, (unsigned long)second_entry, 0, 0, 0, 0, 0, &res);
+ break;
+ }
+ }
+
+
+ return 0;
+}
+BAREBOX_CMD_HELP_START(smc)
+BAREBOX_CMD_HELP_TEXT("Secure monitor code test command")
+BAREBOX_CMD_HELP_TEXT("")
+BAREBOX_CMD_HELP_TEXT("Options:")
+BAREBOX_CMD_HELP_OPT ("-n", "Install secure monitor and switch to nonsecure mode")
+BAREBOX_CMD_HELP_OPT ("-i", "Show information about installed PSCI version")
+BAREBOX_CMD_HELP_OPT ("-c", "Start secondary CPU core")
+BAREBOX_CMD_HELP_OPT ("-z", "Turn off secondary CPU core")
+BAREBOX_CMD_HELP_END
+
+BAREBOX_CMD_START(smc)
+ .cmd = do_smc,
+ BAREBOX_CMD_DESC("secure monitor test command")
+BAREBOX_CMD_END
+#endif \ No newline at end of file
diff --git a/arch/arm/cpu/sm.c b/arch/arm/cpu/sm.c
new file mode 100644
index 0000000000..5808dfd92b
--- /dev/null
+++ b/arch/arm/cpu/sm.c
@@ -0,0 +1,266 @@
+/*
+ * (C) Copyright 2013
+ * Andre Przywara, Linaro <andre.przywara@linaro.org>
+ *
+ * Routines to transition ARMv7 processors from secure into non-secure state
+ * and from non-secure SVC into HYP mode
+ * needed to enable ARMv7 virtualization for current hypervisors
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#define pr_fmt(fmt) "secure: " fmt
+
+#include <common.h>
+#include <io.h>
+#include <asm/gic.h>
+#include <asm/system.h>
+#include <init.h>
+#include <globalvar.h>
+#include <asm/arm-smccc.h>
+#include <asm-generic/sections.h>
+#include <asm/secure.h>
+
+#include "mmu.h"
+
+/* valid bits in CBAR register / PERIPHBASE value */
+#define CBAR_MASK 0xFFFF8000
+
+static unsigned int read_id_pfr1(void)
+{
+ unsigned int reg;
+
+ asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg));
+ return reg;
+}
+
+static u32 read_nsacr(void)
+{
+ unsigned int reg;
+
+ asm("mrc p15, 0, %0, c1, c1, 2\n" : "=r"(reg));
+ return reg;
+}
+
+static void write_nsacr(u32 val)
+{
+ asm("mcr p15, 0, %0, c1, c1, 2" : : "r"(val));
+}
+
+static void write_mvbar(u32 val)
+{
+ asm("mcr p15, 0, %0, c12, c0, 1" : : "r"(val));
+}
+
+static unsigned long get_cbar(void)
+{
+ unsigned periphbase;
+
+ /* get the GIC base address from the CBAR register */
+ asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase));
+
+ /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to
+ * encode this). Bail out here since we cannot access this without
+ * enabling paging.
+ */
+ if ((periphbase & 0xff) != 0) {
+ pr_err("PERIPHBASE is above 4 GB, no access.\n");
+ return -1;
+ }
+
+ return periphbase & CBAR_MASK;
+}
+
+static unsigned long get_gicd_base_address(void)
+{
+ return get_cbar() + GIC_DIST_OFFSET;
+}
+
+static int cpu_is_virt_capable(void)
+{
+ return read_id_pfr1() & (1 << 12);
+}
+
+static unsigned long get_gicc_base_address(void)
+{
+ unsigned long adr = get_cbar();
+
+ if (cpu_is_virt_capable())
+ adr += GIC_CPU_OFFSET_A15;
+ else
+ adr += GIC_CPU_OFFSET_A9;
+
+ return adr;
+}
+
+#define GICD_IGROUPRn 0x0080
+
+int armv7_init_nonsec(void)
+{
+ void __iomem *gicd = IOMEM(get_gicd_base_address());
+ unsigned itlinesnr, i;
+ u32 val;
+
+ /*
+ * the SCR register will be set directly in the monitor mode handler,
+ * according to the spec one should not tinker with it in secure state
+ * in SVC mode. Do not try to read it once in non-secure state,
+ * any access to it will trap.
+ */
+
+ /* enable the GIC distributor */
+ val = readl(gicd + GICD_CTLR);
+ val |= 0x3;
+ writel(val, gicd + GICD_CTLR);
+
+ /* TYPER[4:0] contains an encoded number of available interrupts */
+ itlinesnr = readl(gicd + GICD_TYPER) & 0x1f;
+
+ /*
+ * Set all bits in the GIC group registers to one to allow access
+ * from non-secure state. The first 32 interrupts are private per
+ * CPU and will be set later when enabling the GIC for each core
+ */
+ for (i = 1; i <= itlinesnr; i++)
+ writel(0xffffffff, gicd + GICD_IGROUPRn + 4 * i);
+
+ return 0;
+}
+
+/*
+ * armv7_secure_monitor_install - install secure monitor
+ *
+ * This function is entered in secure mode. It installs the secure
+ * monitor code and enters it using a smc call. This function is executed
+ * on every CPU. We leave this function returns in nonsecure mode.
+ */
+int __armv7_secure_monitor_install(void)
+{
+ struct arm_smccc_res res;
+ void __iomem *gicd = IOMEM(get_gicd_base_address());
+ void __iomem *gicc = IOMEM(get_gicc_base_address());
+ u32 nsacr;
+
+ writel(0xffffffff, gicd + GICD_IGROUPRn);
+
+ writel(0x3, gicc + GICC_CTLR);
+ writel(0xff, gicc + GICC_PMR);
+
+ nsacr = read_nsacr();
+ nsacr |= 0x00043fff; /* all copros allowed in non-secure mode */
+ write_nsacr(nsacr);
+
+ write_mvbar((unsigned long)secure_monitor_init_vectors);
+
+ isb();
+
+ /* Initialize the secure monitor */
+ arm_smccc_smc(0, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ /* We're in nonsecure mode now */
+
+ return 0;
+}
+
+static bool armv7_have_security_extensions(void)
+{
+ return (read_id_pfr1() & 0xf0) != 0;
+}
+
+/*
+ * armv7_secure_monitor_install - install secure monitor
+ *
+ * This function is entered in secure mode. It installs the secure
+ * monitor code and enters it using a smc call. This function is executed
+ * once on the primary CPU only. We leave this function returns in nonsecure
+ * mode.
+ */
+int armv7_secure_monitor_install(void)
+{
+ int mmuon;
+ unsigned long ttb, vbar;
+
+ if (!armv7_have_security_extensions()) {
+ pr_err("Security extensions not implemented.\n");
+ return -EINVAL;
+ }
+
+ mmuon = get_cr() & CR_M;
+
+ vbar = get_vbar();
+
+ asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r"(ttb));
+
+ armv7_init_nonsec();
+ __armv7_secure_monitor_install();
+
+ asm volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r"(ttb));
+
+ set_vbar(vbar);
+
+ if (mmuon) {
+ /*
+ * If the MMU was already turned on in secure mode, enable it in
+ * non-secure mode aswell
+ */
+ __mmu_cache_on();
+ }
+
+ pr_debug("Initialized secure monitor\n");
+
+ return 0;
+}
+
+/*
+ * of_secure_monitor_fixup - reserve memory region for secure monitor
+ *
+ * We currently do not support putting the secure monitor into onchip RAM,
+ * hence it runs in SDRAM and we must reserve the memory region so that we
+ * won't get overwritten from the Kernel.
+ * Beware: despite the name this is not secure in any way. The Kernel obeys
+ * the reserve map, but only because it's nice. It could always overwrite the
+ * secure monitor and hijack secure mode.
+ */
+static int of_secure_monitor_fixup(struct device_node *root, void *unused)
+{
+ unsigned long res_start, res_end;
+
+ res_start = (unsigned long)_stext;
+ res_end = (unsigned long)__bss_stop;
+
+ of_add_reserve_entry(res_start, res_end);
+
+ pr_debug("Reserved memory range from 0x%08lx to 0x%08lx\n", res_start, res_end);
+
+ return 0;
+}
+
+static enum arm_security_state bootm_secure_state;
+
+static const char * const bootm_secure_state_names[] = {
+ [ARM_STATE_SECURE] = "secure",
+ [ARM_STATE_NONSECURE] = "nonsecure",
+ [ARM_STATE_HYP] = "hyp",
+};
+
+enum arm_security_state bootm_arm_security_state(void)
+{
+ return bootm_secure_state;
+}
+
+const char *bootm_arm_security_state_name(enum arm_security_state state)
+{
+ return bootm_secure_state_names[state];
+}
+
+static int sm_init(void)
+{
+ of_register_fixup(of_secure_monitor_fixup, NULL);
+
+ globalvar_add_simple_enum("bootm.secure_state",
+ (unsigned int *)&bootm_secure_state,
+ bootm_secure_state_names,
+ ARRAY_SIZE(bootm_secure_state_names));
+
+ return 0;
+}
+device_initcall(sm_init); \ No newline at end of file
diff --git a/arch/arm/cpu/sm_as.S b/arch/arm/cpu/sm_as.S
new file mode 100644
index 0000000000..09580e75de
--- /dev/null
+++ b/arch/arm/cpu/sm_as.S
@@ -0,0 +1,168 @@
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm-generic/memory_layout.h>
+#include <asm/secure.h>
+#include <asm/system.h>
+
+.arch_extension sec
+.arch_extension virt
+
+ .section ".text","ax"
+ .arm
+
+ .align 5
+.globl secure_monitor_init_vectors
+secure_monitor_init_vectors:
+1: b 1b /* reset */
+1: b 1b /* undefined instruction */
+ b secure_monitor_init /* software interrupt (SWI) */
+1: b 1b /* prefetch abort */
+1: b 1b /* data abort */
+1: b 1b /* (reserved) */
+1: b 1b /* irq (interrupt) */
+1: b 1b /* fiq (fast interrupt) */
+
+#define CPUID_ARM_GENTIMER_MASK (0xF << CPUID_ARM_GENTIMER_SHIFT)
+#define CPUID_ARM_GENTIMER_SHIFT 16
+
+#define CPUID_ARM_VIRT_MASK (0xF << CPUID_ARM_VIRT_SHIFT)
+#define CPUID_ARM_VIRT_SHIFT 12
+
+.macro is_cpu_virt_capable tmp
+ mrc p15, 0, \tmp, c0, c1, 1 @ read ID_PFR1
+ and \tmp, \tmp, #CPUID_ARM_VIRT_MASK @ mask virtualization bits
+ cmp \tmp, #(1 << CPUID_ARM_VIRT_SHIFT)
+.endm
+
+@ Requires dense and single-cluster CPU ID space
+ENTRY(psci_get_cpu_id)
+ mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
+ and r0, r0, #0xff /* return CPU ID in cluster */
+ bx lr
+ENDPROC(psci_get_cpu_id)
+
+ENTRY(secure_monitor_stack_setup)
+ mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
+ and r0, r0, #0xff /* CPU ID => r0 */
+
+ @ stack top = __secure_stack_end - (cpuid << ARM_PSCI_STACK_SHIFT)
+ ldr r1, =__secure_stack_end
+ sub r0, r1, r0, LSL #ARM_SECURE_STACK_SHIFT
+ sub r0, r0, #4 @ Save space for target PC
+
+ mov sp, r0
+ bx lr
+ENDPROC(secure_monitor_stack_setup)
+
+secure_monitor_init:
+ mov r3, lr
+
+ bl secure_monitor_stack_setup
+
+ push {r4-r7}
+ mov r7, r3
+ ldr r5, =secure_monitor_vectors @ Switch MVBAR to secure_monitor_vectors
+ mcr p15, 0, r5, c12, c0, 1
+ isb
+
+#ifdef CONFIG_MMU
+ mrc p15, 0, r5, c1, c0, 0
+ tst r5, #CR_M
+ beq 1f
+ bl __mmu_cache_off
+1:
+#endif
+ mrc p15, 0, r5, c1, c1, 0 @ read SCR
+ bic r5, r5, #0x4a @ clear IRQ, EA, nET bits
+ orr r5, r5, #0x31 @ enable NS, AW, FW bits
+ @ FIQ preserved for secure mode
+ mov r6, #SVC_MODE @ default mode is SVC
+
+ is_cpu_virt_capable r4
+
+ orreq r5, r5, #0x100 @ allow HVC instruction
+
+ mcr p15, 0, r5, c1, c1, 0 @ write SCR (with NS bit set)
+ isb
+
+ mrceq p15, 0, r0, c12, c0, 1 @ get MVBAR value
+ mcreq p15, 4, r0, c12, c0, 0 @ write HVBAR
+
+ bne 1f
+
+ @ Reset CNTVOFF to 0 before leaving monitor mode
+ mrc p15, 0, r4, c0, c1, 1 @ read ID_PFR1
+ ands r4, r4, #CPUID_ARM_GENTIMER_MASK @ test arch timer bits
+ movne r4, #0
+ mcrrne p15, 4, r4, r4, c14 @ Reset CNTVOFF to zero
+1:
+ mov lr, r7
+ mov ip, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT) @ Set A, I and F
+ tst lr, #1 @ Check for Thumb PC
+ orrne ip, ip, #PSR_T_BIT @ Set T if Thumb
+ orr ip, ip, r6 @ Slot target mode in
+ msr spsr_cxfs, ip @ Set full SPSR
+ pop {r4-r7}
+ movs pc, lr @ ERET to non-secure
+
+ .align 5
+secure_monitor_vectors:
+1: b 1b /* reset */
+1: b 1b /* undefined instruction */
+ b secure_monitor /* software interrupt (SWI) */
+1: b 1b /* prefetch abort */
+1: b 1b /* data abort */
+1: b hyp_trap /* (reserved) */
+1: b 1b /* irq (interrupt) */
+1: b 1b /* fiq (fast interrupt) */
+
+secure_monitor:
+ push {r4-r7,lr}
+
+ @ Switch to secure mode
+ mrc p15, 0, r7, c1, c1, 0
+ bic r4, r7, #1
+ mcr p15, 0, r4, c1, c1, 0
+ isb
+
+ /* r0-r6: Arguments */
+ sub sp, sp, #4*4 @ allocate result structure on stack
+ mov r12, sp
+ push {r4-r6, r12}
+ bl psci_entry
+ pop {r4-r6, r12}
+ ldm r12, {r0-r3}
+ add sp, sp, #4*4
+ /* r0-r3: results, r4-r14: preserved */
+
+ @ back to non-secure
+ mcr p15, 0, r7, c1, c1, 0
+
+ pop {r4-r7, lr}
+ movs pc, lr
+
+hyp_trap:
+ mrs lr, elr_hyp @ for older asm: .byte 0x00, 0xe3, 0x0e, 0xe1
+ mov pc, lr @ do no switch modes, but
+ @ return to caller
+
+ENTRY(armv7_switch_to_hyp)
+ mov r0, lr
+ mov r1, sp @ save SVC copy of LR and SP
+ isb
+ hvc #0 @ for older asm: .byte 0x70, 0x00, 0x40, 0xe1
+ mov sp, r1
+ mov lr, r0 @ restore SVC copy of LR and SP
+
+ bx lr
+ENDPROC(armv7_switch_to_hyp)
+
+ENTRY(psci_cpu_entry)
+ mrc p15, 0, r0, c1, c0, 1 @ ACTLR
+ orr r0, r0, #(1 << 6) @ Set SMP bit
+ mcr p15, 0, r0, c1, c0, 1 @ ACTLR
+
+ bl secure_monitor_stack_setup
+ bl psci_cpu_entry_c
+
+ENDPROC(psci_cpu_entry)