summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/bitops.h5
-rw-r--r--arch/arm/include/asm/cache.h9
-rw-r--r--arch/arm/include/asm/mmu.h14
-rw-r--r--arch/arm/include/asm/pgtable64.h140
-rw-r--r--arch/arm/include/asm/swab.h4
-rw-r--r--arch/arm/include/asm/system.h46
-rw-r--r--arch/arm/include/asm/system_info.h38
7 files changed, 252 insertions, 4 deletions
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 138ebe2d8c..b51225efe5 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -177,6 +177,11 @@ static inline unsigned long ffz(unsigned long word)
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#endif /* __ARM__USE_GENERIC_FF */
+
+#if __LINUX_ARM_ARCH__ == 8
+#include <asm-generic/bitops/__fls.h>
+#endif
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 2f6eab0e82..8fcdb64f74 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -1,9 +1,18 @@
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
+#ifdef CONFIG_CPU_64v8
+extern void v8_invalidate_icache_all(void);
+extern void v8_dcache_all(void);
+#endif
+
static inline void flush_icache(void)
{
+#if __LINUX_ARM_ARCH__ <= 7
asm volatile("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
+#else
+ v8_invalidate_icache_all();
+#endif
}
int arm_set_cache_functions(void);
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 8de6544657..f68ab37143 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,16 +6,24 @@
#include <malloc.h>
#include <xfuncs.h>
+#ifdef CONFIG_CPU_64v8
+#include <asm/pgtable64.h>
+
+#define DEV_MEM (PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF | PMD_TYPE_SECT)
+#define CACHED_MEM (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | PMD_TYPE_SECT)
+#define UNCACHED_MEM (PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF | PMD_TYPE_SECT)
+#else
#include <asm/pgtable.h>
#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT)
#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
+#endif
+
+
struct arm_memory;
-static inline void mmu_enable(void)
-{
-}
+void mmu_enable(void);
void mmu_disable(void);
static inline void arm_create_section(unsigned long virt, unsigned long phys, int size_m,
unsigned int flags)
diff --git a/arch/arm/include/asm/pgtable64.h b/arch/arm/include/asm/pgtable64.h
new file mode 100644
index 0000000000..20bea5b28a
--- /dev/null
+++ b/arch/arm/include/asm/pgtable64.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PGTABLE64_H
+#define __ASM_PGTABLE64_H
+
+#define UL(x) _AC(x, UL)
+
+#define UNUSED_DESC 0x6EbAAD0BBADbA6E0
+
+#define VA_START 0x0
+#define BITS_PER_VA 33
+
+/* Granule size of 4KB is being used */
+#define GRANULE_SIZE_SHIFT 12
+#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
+#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE)
+#define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
+
+#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3)
+#define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2)
+#define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1)
+#define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
+
+
+#define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT)
+#define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT)
+#define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT)
+
+/* These macros give the size of the region addressed by each entry of a xlat
+ table at any given level */
+#define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT)
+#define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT)
+#define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT)
+
+#define GRANULE_MASK GRANULE_SIZE
+
+
+/*
+ * Level 2 descriptor (PMD).
+ */
+#define PMD_TYPE_MASK (3 << 0)
+#define PMD_TYPE_FAULT (0 << 0)
+#define PMD_TYPE_TABLE (3 << 0)
+#define PMD_TYPE_SECT (1 << 0)
+#define PMD_TABLE_BIT (1 << 1)
+
+/*
+ * Section
+ */
+#define PMD_SECT_VALID (1 << 0)
+#define PMD_SECT_USER (1 << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (1 << 7) /* AP[2] */
+#define PMD_SECT_S (3 << 8)
+#define PMD_SECT_AF (1 << 10)
+#define PMD_SECT_NG (1 << 11)
+#define PMD_SECT_CONT (1 << 52)
+#define PMD_SECT_PXN (1 << 53)
+#define PMD_SECT_UXN (1 << 54)
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_ATTRINDX(t) ((t) << 2)
+#define PMD_ATTRINDX_MASK (7 << 2)
+
+/*
+ * Level 3 descriptor (PTE).
+ */
+#define PTE_TYPE_MASK (3 << 0)
+#define PTE_TYPE_FAULT (0 << 0)
+#define PTE_TYPE_PAGE (3 << 0)
+#define PTE_TABLE_BIT (1 << 1)
+#define PTE_USER (1 << 6) /* AP[1] */
+#define PTE_RDONLY (1 << 7) /* AP[2] */
+#define PTE_SHARED (3 << 8) /* SH[1:0], inner shareable */
+#define PTE_AF (1 << 10) /* Access Flag */
+#define PTE_NG (1 << 11) /* nG */
+#define PTE_DBM (1 << 51) /* Dirty Bit Management */
+#define PTE_CONT (1 << 52) /* Contiguous range */
+#define PTE_PXN (1 << 53) /* Privileged XN */
+#define PTE_UXN (1 << 54) /* User XN */
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PTE_ATTRINDX(t) ((t) << 2)
+#define PTE_ATTRINDX_MASK (7 << 2)
+
+/*
+ * Memory types available.
+ */
+#define MT_DEVICE_nGnRnE 0
+#define MT_DEVICE_nGnRE 1
+#define MT_DEVICE_GRE 2
+#define MT_NORMAL_NC 3
+#define MT_NORMAL 4
+#define MT_NORMAL_WT 5
+
+/*
+ * TCR flags.
+ */
+#define TCR_T0SZ(x) ((64 - (x)) << 0)
+#define TCR_IRGN_NC (0 << 8)
+#define TCR_IRGN_WBWA (1 << 8)
+#define TCR_IRGN_WT (2 << 8)
+#define TCR_IRGN_WBNWA (3 << 8)
+#define TCR_IRGN_MASK (3 << 8)
+#define TCR_ORGN_NC (0 << 10)
+#define TCR_ORGN_WBWA (1 << 10)
+#define TCR_ORGN_WT (2 << 10)
+#define TCR_ORGN_WBNWA (3 << 10)
+#define TCR_ORGN_MASK (3 << 10)
+#define TCR_SHARED_NON (0 << 12)
+#define TCR_SHARED_OUTER (2 << 12)
+#define TCR_SHARED_INNER (3 << 12)
+#define TCR_TG0_4K (0 << 14)
+#define TCR_TG0_64K (1 << 14)
+#define TCR_TG0_16K (2 << 14)
+#define TCR_EL1_IPS_BITS (UL(3) << 32) /* 42 bits physical address */
+#define TCR_EL2_IPS_BITS (3 << 16) /* 42 bits physical address */
+#define TCR_EL3_IPS_BITS (3 << 16) /* 42 bits physical address */
+
+#define TCR_EL1_RSVD (1 << 31)
+#define TCR_EL2_RSVD (1 << 31 | 1 << 23)
+#define TCR_EL3_RSVD (1 << 31 | 1 << 23)
+
+#endif
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index 9997ad20ef..3795437831 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -33,7 +33,11 @@ static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
+#if __LINUX_ARM_ARCH__ == 8
+ __asm__ ("rev %w0, %w1" : "=r" (x) : "r" (x));
+#else
__asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
+#endif
return x;
}
#define __arch_swab32 __arch_swab32
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index b118a42609..57c76186b4 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -3,7 +3,11 @@
#if __LINUX_ARM_ARCH__ >= 7
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#ifdef CONFIG_CPU_64v8
+#define dsb() __asm__ __volatile__ ("dsb sy" : : : "memory")
+#else
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#endif
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
@@ -57,17 +61,58 @@
#define CR_TE (1 << 30) /* Thumb exception enable */
#ifndef __ASSEMBLY__
+#if __LINUX_ARM_ARCH__ >= 7
+static inline unsigned int current_el(void)
+{
+ unsigned int el;
+ asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
+ return el >> 2;
+}
+
+static inline unsigned long read_mpidr(void)
+{
+ unsigned long val;
+
+ asm volatile("mrs %0, mpidr_el1" : "=r" (val));
+
+ return val;
+}
+#endif
static inline unsigned int get_cr(void)
{
unsigned int val;
+
+#ifdef CONFIG_CPU_64v8
+ unsigned int el = current_el();
+ if (el == 1)
+ asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
+ else if (el == 2)
+ asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
+ else
+ asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
+#else
asm volatile ("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
+#endif
+
return val;
}
static inline void set_cr(unsigned int val)
{
+#ifdef CONFIG_CPU_64v8
+ unsigned int el;
+
+ el = current_el();
+ if (el == 1)
+ asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
+ else if (el == 2)
+ asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
+ else
+ asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
+#else
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
: : "r" (val) : "cc");
+#endif
isb();
}
@@ -90,7 +135,6 @@ static inline void set_vbar(unsigned int vbar)
static inline unsigned int get_vbar(void) { return 0; }
static inline void set_vbar(unsigned int vbar) {}
#endif
-
#endif
#endif /* __ASM_ARM_SYSTEM_H */
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index 0761848a1a..25fffd2681 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -13,6 +13,7 @@
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
#define CPU_ARCH_ARMv7 9
+#define CPU_ARCH_ARMv8 10
#define CPU_IS_ARM720 0x41007200
#define CPU_IS_ARM720_MASK 0xff00fff0
@@ -41,6 +42,12 @@
#define CPU_IS_CORTEX_A15 0x410fc0f0
#define CPU_IS_CORTEX_A15_MASK 0xff0ffff0
+#define CPU_IS_CORTEX_A53 0x410fd034
+#define CPU_IS_CORTEX_A53_MASK 0xff0ffff0
+
+#define CPU_IS_CORTEX_A57 0x411fd070
+#define CPU_IS_CORTEX_A57_MASK 0xff0ffff0
+
#define CPU_IS_PXA250 0x69052100
#define CPU_IS_PXA250_MASK 0xfffff7f0
@@ -112,6 +119,19 @@
#define cpu_is_cortex_a15() (0)
#endif
+#ifdef CONFIG_CPU_64v8
+#ifdef ARM_ARCH
+#define ARM_MULTIARCH
+#else
+#define ARM_ARCH CPU_ARCH_ARMv8
+#endif
+#define cpu_is_cortex_a53() cpu_is_arm(CORTEX_A53)
+#define cpu_is_cortex_a57() cpu_is_arm(CORTEX_A57)
+#else
+#define cpu_is_cortex_a53() (0)
+#define cpu_is_cortex_a57() (0)
+#endif
+
#ifndef __ASSEMBLY__
#ifdef ARM_MULTIARCH
@@ -133,6 +153,23 @@ static inline int arm_early_get_cpu_architecture(void)
if (cpu_arch)
cpu_arch += CPU_ARCH_ARMv3;
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_CPU_64v8
+ unsigned int isar2;
+
+ __asm__ __volatile__(
+ "mrs %0, id_isar2_el1\n"
+ : "=r" (isar2)
+ :
+ : "memory");
+
+
+ /* Check Load/Store acquire to check if ARMv8 or not */
+
+ if (isar2 & 0x2)
+ cpu_arch = CPU_ARCH_ARMv8;
+ else
+ cpu_arch = CPU_ARCH_UNKNOWN;
+#else
unsigned int mmfr0;
/* Revised CPUID format. Read the Memory Model Feature
@@ -149,6 +186,7 @@ static inline int arm_early_get_cpu_architecture(void)
cpu_arch = CPU_ARCH_UNKNOWN;
} else
cpu_arch = CPU_ARCH_UNKNOWN;
+#endif
return cpu_arch;
}