diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2022-04-11 12:37:59 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2022-04-11 12:37:59 +1000 |
commit | 4dca00818aa4f90b9a2501633fe608c05ab767db (patch) | |
tree | 790088e4919e613b542ce107d75cd5ae95313bff | |
parent | 33cc90a057c155c7e6445762f455a64785b5ef56 (diff) | |
parent | f4fe9fc39c7131fe482bc44d68cb0eff96105935 (diff) | |
download | linux-4dca00818aa4f90b9a2501633fe608c05ab767db.tar.gz linux-4dca00818aa4f90b9a2501633fe608c05ab767db.tar.xz |
Merge branch 'for-next/kspp' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git
-rw-r--r-- | arch/x86/include/asm/highmem.h | 1 | ||||
-rw-r--r-- | include/linux/highmem-internal.h | 10 | ||||
-rw-r--r-- | include/uapi/linux/stddef.h | 4 | ||||
-rw-r--r-- | mm/usercopy.c | 97 | ||||
-rw-r--r-- | security/Kconfig | 13 |
5 files changed, 43 insertions, 82 deletions
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 032e020853aa..731ee7cc40a5 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -26,6 +26,7 @@ #include <asm/tlbflush.h> #include <asm/paravirt.h> #include <asm/fixmap.h> +#include <asm/pgtable_areas.h> /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index a77be5630209..337bd9f32921 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -149,6 +149,11 @@ static inline void totalhigh_pages_add(long count) atomic_long_add(count, &_totalhigh_pages); } +static inline bool is_kmap_addr(const void *x) +{ + unsigned long addr = (unsigned long)x; + return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP); +} #else /* CONFIG_HIGHMEM */ static inline struct page *kmap_to_page(void *addr) @@ -234,6 +239,11 @@ static inline void __kunmap_atomic(void *addr) static inline unsigned int nr_free_highpages(void) { return 0; } static inline unsigned long totalhigh_pages(void) { return 0UL; } +static inline bool is_kmap_addr(const void *x) +{ + return false; +} + #endif /* CONFIG_HIGHMEM */ /* diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 3021ea25a284..7837ba4fe728 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_STDDEF_H +#define _UAPI_LINUX_STDDEF_H + #include <linux/compiler_types.h> #ifndef __always_inline @@ -41,3 +44,4 @@ struct { } __empty_ ## NAME; \ TYPE NAME[]; \ } +#endif diff --git a/mm/usercopy.c b/mm/usercopy.c index 2c235d5c2364..ac8a093e90c1 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -17,6 +17,7 @@ #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/thread_info.h> +#include <linux/vmalloc.h> #include <linux/atomic.h> #include <linux/jump_label.h> #include <asm/sections.h> @@ -157,91 +158,47 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, usercopy_abort("null address", NULL, to_user, ptr, n); } -/* Checks for allocs that are marked in some way as spanning multiple pages. */ -static inline void check_page_span(const void *ptr, unsigned long n, - struct page *page, bool to_user) +static inline void check_heap_object(const void *ptr, unsigned long n, + bool to_user) { -#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN - const void *end = ptr + n - 1; - struct page *endpage; - bool is_reserved, is_cma; - - /* - * Sometimes the kernel data regions are not marked Reserved (see - * check below). And sometimes [_sdata,_edata) does not cover - * rodata and/or bss, so check each range explicitly. - */ - - /* Allow reads of kernel rodata region (if not marked as Reserved). */ - if (ptr >= (const void *)__start_rodata && - end <= (const void *)__end_rodata) { - if (!to_user) - usercopy_abort("rodata", NULL, to_user, 0, n); - return; - } - - /* Allow kernel data region (if not marked as Reserved). */ - if (ptr >= (const void *)_sdata && end <= (const void *)_edata) - return; + struct folio *folio; - /* Allow kernel bss region (if not marked as Reserved). */ - if (ptr >= (const void *)__bss_start && - end <= (const void *)__bss_stop) + if (!virt_addr_valid(ptr)) return; - /* Is the object wholly within one base page? */ - if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == - ((unsigned long)end & (unsigned long)PAGE_MASK))) - return; + if (is_kmap_addr(ptr)) { + unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); - /* Allow if fully inside the same compound (__GFP_COMP) page. */ - endpage = virt_to_head_page(end); - if (likely(endpage == page)) + if ((unsigned long)ptr + n - 1 > page_end) + usercopy_abort("kmap", NULL, to_user, + offset_in_page(ptr), n); return; - - /* - * Reject if range is entirely either Reserved (i.e. special or - * device memory), or CMA. Otherwise, reject since the object spans - * several independently allocated pages. - */ - is_reserved = PageReserved(page); - is_cma = is_migrate_cma_page(page); - if (!is_reserved && !is_cma) - usercopy_abort("spans multiple pages", NULL, to_user, 0, n); - - for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { - page = virt_to_head_page(ptr); - if (is_reserved && !PageReserved(page)) - usercopy_abort("spans Reserved and non-Reserved pages", - NULL, to_user, 0, n); - if (is_cma && !is_migrate_cma_page(page)) - usercopy_abort("spans CMA and non-CMA pages", NULL, - to_user, 0, n); } -#endif -} -static inline void check_heap_object(const void *ptr, unsigned long n, - bool to_user) -{ - struct folio *folio; + if (is_vmalloc_addr(ptr)) { + struct vm_struct *area = find_vm_area(ptr); + unsigned long offset; - if (!virt_addr_valid(ptr)) + if (!area) { + usercopy_abort("vmalloc", "no area", to_user, 0, n); + return; + } + + offset = ptr - area->addr; + if (offset + n > get_vm_area_size(area)) + usercopy_abort("vmalloc", NULL, to_user, offset, n); return; + } - /* - * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the - * highmem page or fallback to virt_to_page(). The following - * is effectively a highmem-aware virt_to_slab(). - */ - folio = page_folio(kmap_to_page((void *)ptr)); + folio = virt_to_folio(ptr); if (folio_test_slab(folio)) { /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); - } else { - /* Verify object does not incorrectly span multiple pages. */ - check_page_span(ptr, n, folio_page(folio, 0), to_user); + } else if (folio_test_large(folio)) { + unsigned long offset = ptr - folio_address(folio); + if (offset + n > folio_size(folio)) + usercopy_abort("page alloc", NULL, to_user, offset, n); } } diff --git a/security/Kconfig b/security/Kconfig index 9b2c4925585a..f29e4c656983 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -160,20 +160,9 @@ config HARDENED_USERCOPY copy_from_user() functions) by rejecting memory ranges that are larger than the specified heap object, span multiple separately allocated pages, are not on the process stack, - or are part of the kernel text. This kills entire classes + or are part of the kernel text. This prevents entire classes of heap overflow exploits and similar kernel memory exposures. -config HARDENED_USERCOPY_PAGESPAN - bool "Refuse to copy allocations that span multiple pages" - depends on HARDENED_USERCOPY - depends on BROKEN - help - When a multi-page allocation is done without __GFP_COMP, - hardened usercopy will reject attempts to copy it. There are, - however, several cases of this in the kernel that have not all - been removed. This config is intended to be used only while - trying to find such users. - config FORTIFY_SOURCE bool "Harden common str/mem functions against buffer overflows" depends on ARCH_HAS_FORTIFY_SOURCE |