diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2019-05-30 15:51:52 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2019-05-30 15:52:54 +1000 |
commit | 4a66f47eebe483543b1f34f36144fe5844946bdf (patch) | |
tree | 42e679b8f200178de193d8aa6188f86737f90676 | |
parent | f4436393df41f989bba9481ae74529b3f08e0f4b (diff) | |
download | linux-4a66f47eebe483543b1f34f36144fe5844946bdf.tar.gz linux-4a66f47eebe483543b1f34f36144fe5844946bdf.tar.xz |
Revert "mm/vmalloc: hugepage vmalloc mappings"
This reverts commit c353e29979762d1489a39ccc1d391726a28e3e26.
-rw-r--r-- | include/linux/vmalloc.h | 1 | ||||
-rw-r--r-- | mm/vmalloc.c | 132 |
2 files changed, 28 insertions, 105 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4c92dc608928..812bea5866d6 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -42,7 +42,6 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; - unsigned int page_shift; unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6370e0876a2c..5b9232e015fc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -36,7 +36,6 @@ #include <linux/rbtree_augmented.h> #include <linux/uaccess.h> -#include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> @@ -440,41 +439,6 @@ static int vmap_pages_range(unsigned long start, unsigned long end, return ret; } -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static int vmap_hpages_range(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift) -{ - unsigned long addr = start; - unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift); - - for (i = 0; i < nr; i++) { - int err; - - err = vmap_range_noflush(addr, - addr + (PAGE_SIZE << page_shift), - __pa(page_address(pages[i])), prot, - page_shift); - if (err) - return err; - - addr += PAGE_SIZE << page_shift; - } - flush_cache_vmap(start, end); - - return nr; -} -#else -static int vmap_hpages_range(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift) -{ - BUG_ON(page_shift != PAGE_SIZE); - return vmap_pages_range(start, end, prot, pages); -} -#endif - - int is_vmalloc_or_module_addr(const void *x) { /* @@ -497,7 +461,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; - pgd_t *pgd; + pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; @@ -509,38 +473,27 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); - pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return NULL; - p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return NULL; -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP - if (p4d_large(*p4d)) - return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); -#endif - if (WARN_ON_ONCE(p4d_bad(*p4d))) - return NULL; - pud = pud_offset(p4d, addr); - if (pud_none(*pud)) - return NULL; -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP - if (pud_large(*pud)) - return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); -#endif - if (WARN_ON_ONCE(pud_bad(*pud))) - return NULL; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + /* + * Don't dereference bad PUD or PMD (below) entries. This will also + * identify huge mappings, which we may encounter on architectures + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be + * identified as vmalloc addresses by is_vmalloc_addr(), but are + * not [unambiguously] associated with a struct page, so there is + * no correct value to return for them. + */ + WARN_ON_ONCE(pud_bad(*pud)); + if (pud_none(*pud) || pud_bad(*pud)) return NULL; -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP - if (pmd_large(*pmd)) - return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); -#endif - if (WARN_ON_ONCE(pmd_bad(*pmd))) + pmd = pmd_offset(pud, addr); + WARN_ON_ONCE(pmd_bad(*pmd)); + if (pmd_none(*pmd) || pmd_bad(*pmd)) return NULL; ptep = pte_offset_map(pmd, addr); @@ -548,7 +501,6 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); - return page; } EXPORT_SYMBOL(vmalloc_to_page); @@ -2295,9 +2247,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, return NULL; if (flags & VM_IOREMAP) - align = max(align, - 1ul << clamp_t(int, get_count_order_long(size), - PAGE_SHIFT, IOREMAP_MAX_ORDER)); + align = 1ul << clamp_t(int, get_count_order_long(size), + PAGE_SHIFT, IOREMAP_MAX_ORDER); area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) @@ -2509,7 +2460,7 @@ static void __vunmap(const void *addr, int deallocate_pages) struct page *page = area->pages[i]; BUG_ON(!page); - __free_pages(page, area->page_shift); + __free_pages(page, 0); } kvfree(area->pages); @@ -2652,17 +2603,14 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node) { struct page **pages; - unsigned long addr = (unsigned long)area->addr; - unsigned long size = get_vm_area_size(area); - unsigned int page_shift = area->page_shift; - unsigned int shift = page_shift + PAGE_SHIFT; unsigned int nr_pages, array_size, i; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? - 0 : __GFP_HIGHMEM; + 0 : + __GFP_HIGHMEM; - nr_pages = size >> shift; + nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); area->nr_pages = nr_pages; @@ -2683,8 +2631,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, for (i = 0; i < area->nr_pages; i++) { struct page *page; - page = alloc_pages_node(node, - alloc_mask|highmem_mask, page_shift); + if (node == NUMA_NO_NODE) + page = alloc_page(alloc_mask|highmem_mask); + else + page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ @@ -2696,9 +2646,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, cond_resched(); } - if (vmap_hpages_range(addr, addr + size, prot, pages, page_shift) < 0) + if (map_vm_area(area, prot, pages)) goto fail; - return area->addr; fail: @@ -2732,39 +2681,22 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { - struct vm_struct *area = NULL; + struct vm_struct *area; void *addr; unsigned long real_size = size; - unsigned long real_align = align; - unsigned int shift = PAGE_SHIFT; size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > totalram_pages()) goto fail; - if (IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) { - unsigned long size_per_node; - - size_per_node = size; - if (node == NUMA_NO_NODE) - size_per_node /= num_online_nodes(); - if (size_per_node >= PMD_SIZE) - shift = PMD_SHIFT; - } -again: - align = max(real_align, 1UL << shift); - size = ALIGN(real_size, align); - area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) goto fail; - area->page_shift = shift - PAGE_SHIFT; - addr = __vmalloc_area_node(area, gfp_mask, prot, node); if (!addr) - goto fail; + return NULL; /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -2778,16 +2710,8 @@ again: return addr; fail: - if (shift == PMD_SHIFT) { - shift = PAGE_SHIFT; - goto again; - } - - if (!area) { - /* Warn for area allocation, page allocations already warn */ - warn_alloc(gfp_mask, NULL, + warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure: %lu bytes", real_size); - } return NULL; } |