summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-09-07 14:03:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-09-07 14:03:49 -0700
commit80a77045daacc660659093b312ca0708b53ed558 (patch)
treec0c4147c449b493946be39e2147f9732594956c2
parentab29b33a84f6910ebf01a32f69a370886a4283dd (diff)
parent8e1f74ea02cf4562404c48c6882214821552c13f (diff)
downloadlinux-80a77045daacc660659093b312ca0708b53ed558.tar.gz
linux-80a77045daacc660659093b312ca0708b53ed558.tar.xz
Merge tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull more hardened usercopyfixes from Kees Cook: - force check_object_size() to be inline too - move page-spanning check behind a CONFIG since it's triggering false positives [ Changed the page-spanning config option to depend on EXPERT in the merge. That way it still gets build testing, and you can enable it if you want to, but is never enabled for "normal" configurations ] * tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: usercopy: remove page-spanning test for now usercopy: force check_object_size() inline
-rw-r--r--include/linux/thread_info.h4
-rw-r--r--mm/usercopy.c61
-rw-r--r--security/Kconfig11
3 files changed, 48 insertions, 28 deletions
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 10c9e601398b..2b5b10eed74f 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -118,8 +118,8 @@ static inline int arch_within_stack_frames(const void * const stack,
extern void __check_object_size(const void *ptr, unsigned long n,
bool to_user);
-static inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
if (!__builtin_constant_p(n))
__check_object_size(ptr, n, to_user);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a3cc3052f830..089328f2b920 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
return NULL;
}
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
- bool to_user)
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
{
- struct page *page, *endpage;
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
+ struct page *endpage;
bool is_reserved, is_cma;
/*
- * Some architectures (arm64) return true for virt_addr_valid() on
- * vmalloced addresses. Work around this by checking for vmalloc
- * first.
- */
- if (is_vmalloc_addr(ptr))
- return NULL;
-
- if (!virt_addr_valid(ptr))
- return NULL;
-
- page = virt_to_head_page(ptr);
-
- /* Check slab allocator for flags and size. */
- if (PageSlab(page))
- return __check_heap_object(ptr, n, page);
-
- /*
* Sometimes the kernel data regions are not marked Reserved (see
* check below). And sometimes [_sdata,_edata) does not cover
* rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
((unsigned long)end & (unsigned long)PAGE_MASK)))
return NULL;
- /* Allow if start and end are inside the same compound page. */
+ /* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end);
if (likely(endpage == page))
return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma)
- goto reject;
+ return "<spans multiple pages>";
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page))
- goto reject;
+ return "<spans Reserved and non-Reserved pages>";
if (is_cma && !is_migrate_cma_page(page))
- goto reject;
+ return "<spans CMA and non-CMA pages>";
}
+#endif
return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ */
+ if (is_vmalloc_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
-reject:
- return "<spans multiple pages>";
+ /* Verify object does not incorrectly span multiple pages. */
+ return check_page_span(ptr, n, page, to_user);
}
/*
diff --git a/security/Kconfig b/security/Kconfig
index da10d9b573a4..118f4549404e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
or are part of the kernel text. This kills entire classes
of heap overflow exploits and similar kernel memory exposures.
+config HARDENED_USERCOPY_PAGESPAN
+ bool "Refuse to copy allocations that span multiple pages"
+ depends on HARDENED_USERCOPY
+ depends on EXPERT
+ help
+ When a multi-page allocation is done without __GFP_COMP,
+ hardened usercopy will reject attempts to copy it. There are,
+ however, several cases of this in the kernel that have not all
+ been removed. This config is intended to be used only while
+ trying to find such users.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig