From 7ba0f2d29959256025ece9ae961a6c3421445a7f Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 25 Jul 2018 16:44:37 +0200 Subject: ARM: mmu: fix cache flushing when replacing a section with a PTE When replacing a section with a PTE, we must make sure that the newly initialized PTE entries are flushed from the cache before changing the entry in the TTB. Otherwise a L1 TLB miss causes the hardware pagetable walker to walk into a PTE with undefined content, causing exactly that behaviour. Move all the necessary cache flushing to arm_create_pte(), to avoid any caller getting this wrong in the future. Fixes: e3e54c644180 (ARM: mmu: Implement on-demand PTE allocation) Signed-off-by: Lucas Stach --- arch/arm/cpu/mmu.c | 77 +++++++++++++++++++++++------------------------------- 1 file changed, 32 insertions(+), 45 deletions(-) diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c index f8db39f49b..88ee11cb48 100644 --- a/arch/arm/cpu/mmu.c +++ b/arch/arm/cpu/mmu.c @@ -85,34 +85,6 @@ static void arm_mmu_not_initialized_error(void) panic("MMU not initialized\n"); } -/* - * Create a second level translation table for the given virtual address. - * We initially create a flat uncached mapping on it. - * Not yet exported, but may be later if someone finds use for it. - */ -static u32 *arm_create_pte(unsigned long virt, uint32_t flags) -{ - u32 *table; - int i; - - virt = ALIGN_DOWN(virt, PGDIR_SIZE); - - table = xmemalign(PTRS_PER_PTE * sizeof(u32), - PTRS_PER_PTE * sizeof(u32)); - - if (!ttb) - arm_mmu_not_initialized_error(); - - ttb[pgd_index(virt)] = (unsigned long)table | PMD_TYPE_TABLE; - - for (i = 0; i < PTRS_PER_PTE; i++) { - table[i] = virt | PTE_TYPE_SMALL | flags; - virt += PAGE_SIZE; - } - - return table; -} - static bool pgd_type_table(u32 pgd) { return (pgd & PMD_TYPE_MASK) == PMD_TYPE_TABLE; @@ -152,6 +124,38 @@ static void dma_inv_range(unsigned long start, unsigned long end) __dma_inv_range(start, end); } +/* + * Create a second level translation table for the given virtual address. + * We initially create a flat uncached mapping on it. + * Not yet exported, but may be later if someone finds use for it. + */ +static u32 *arm_create_pte(unsigned long virt, uint32_t flags) +{ + u32 *table; + int i, ttb_idx; + + virt = ALIGN_DOWN(virt, PGDIR_SIZE); + + table = xmemalign(PTRS_PER_PTE * sizeof(u32), + PTRS_PER_PTE * sizeof(u32)); + + if (!ttb) + arm_mmu_not_initialized_error(); + + ttb_idx = pgd_index(virt); + + for (i = 0; i < PTRS_PER_PTE; i++) { + table[i] = virt | PTE_TYPE_SMALL | flags; + virt += PAGE_SIZE; + } + dma_flush_range(table, PTRS_PER_PTE * sizeof(u32)); + + ttb[ttb_idx] = (unsigned long)table | PMD_TYPE_TABLE; + dma_flush_range(ttb, sizeof(u32)); + + return table; +} + int arch_remap_range(void *start, size_t size, unsigned flags) { u32 addr = (u32)start; @@ -227,12 +231,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags) table = arm_create_pte(addr, pte_flags_cached); pte = find_pte(addr); BUG_ON(!pte); - /* - * We just split this section and - * modified it's Level 1 descriptor, - * so it needs to be flushed. - */ - dma_flush_range(pgd, sizeof(*pgd)); } for (i = 0; i < num_ptes; i++) { @@ -240,17 +238,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags) pte[i] |= pte_flags | PTE_TYPE_SMALL; } - if (table) { - /* - * If we just created a new page - * table, the whole table would have - * to be flushed, not just PTEs that - * we touched when re-mapping. - */ - pte = table; - num_ptes = PTRS_PER_PTE; - } - dma_flush_range(pte, num_ptes * sizeof(u32)); } -- cgit v1.2.3