From 3fa6c507319c897598512da91c010a4ad2ed682c Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 28 Jul 2016 15:48:50 -0700 Subject: mm: optimize copy_page_to/from_iter_iovec copy_page_to_iter_iovec() and copy_page_from_iter_iovec() copy some data to userspace or from userspace. These functions have a fast path where they map a page using kmap_atomic and a slow path where they use kmap. kmap is slower than kmap_atomic, so the fast path is preferred. However, on kernels without highmem support, kmap just calls page_address, so there is no need to avoid kmap. On kernels without highmem support, the fast path just increases code size (and cache footprint) and it doesn't improve copy performance in any way. This patch enables the fast path only if CONFIG_HIGHMEM is defined. Code size reduced by this patch: x86 (without highmem) 928 x86-64 960 sparc64 848 alpha 1136 pa-risc 1200 [akpm@linux-foundation.org: use IS_ENABLED(), per Andi] Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1607221711410.4818@file01.intranet.prod.int.rdu2.redhat.com Signed-off-by: Mikulas Patocka Cc: Hugh Dickins Cc: Michal Hocko Cc: Alexander Viro Cc: Mel Gorman Cc: Johannes Weiner Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/iov_iter.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/iov_iter.c b/lib/iov_iter.c index d67c8288d95d..9e8c7386b3a0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -144,7 +144,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (!fault_in_pages_writeable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { kaddr = kmap_atomic(page); from = kaddr + offset; @@ -175,6 +175,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ + kaddr = kmap(page); from = kaddr + offset; left = __copy_to_user(buf, from, copy); @@ -193,6 +194,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b bytes -= copy; } kunmap(page); + done: if (skip == iov->iov_len) { iov++; @@ -225,7 +227,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (!fault_in_pages_readable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { kaddr = kmap_atomic(page); to = kaddr + offset; @@ -256,6 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ + kaddr = kmap(page); to = kaddr + offset; left = __copy_from_user(to, buf, copy); @@ -274,6 +277,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes -= copy; } kunmap(page); + done: if (skip == iov->iov_len) { iov++; -- cgit v1.2.3 From 80a9201a5965f4715d5c09790862e0df84ce0614 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 28 Jul 2016 15:49:07 -0700 Subject: mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB For KASAN builds: - switch SLUB allocator to using stackdepot instead of storing the allocation/deallocation stacks in the objects; - change the freelist hook so that parts of the freelist can be put into the quarantine. [aryabinin@virtuozzo.com: fixes] Link: http://lkml.kernel.org/r/1468601423-28676-1-git-send-email-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/1468347165-41906-3-git-send-email-glider@google.com Signed-off-by: Alexander Potapenko Cc: Andrey Konovalov Cc: Christoph Lameter Cc: Dmitry Vyukov Cc: Steven Rostedt (Red Hat) Cc: Joonsoo Kim Cc: Kostya Serebryany Cc: Andrey Ryabinin Cc: Kuthonuzo Luruo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 2 ++ include/linux/slab_def.h | 3 ++- include/linux/slub_def.h | 4 +++ lib/Kconfig.kasan | 4 +-- mm/kasan/Makefile | 3 +-- mm/kasan/kasan.c | 63 ++++++++++++++++++++++++------------------------ mm/kasan/kasan.h | 3 +-- mm/kasan/report.c | 8 +++--- mm/slab.h | 2 ++ mm/slub.c | 57 +++++++++++++++++++++++++++++++++---------- 10 files changed, 93 insertions(+), 56 deletions(-) (limited to 'lib') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index ac4b3c46a84d..c9cf374445d8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -77,6 +77,7 @@ void kasan_free_shadow(const struct vm_struct *vm); size_t ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct kmem_cache *cache); #else /* CONFIG_KASAN */ @@ -121,6 +122,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} static inline void kasan_unpoison_slab(const void *ptr) { } +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 339ba027ade9..4ad2c5a26399 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -88,7 +88,8 @@ struct kmem_cache { }; static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, - void *x) { + void *x) +{ void *object = x - (x - page->s_mem) % cache->size; void *last_object = page->s_mem + (cache->num - 1) * cache->size; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cf501cf8e6db..75f56c2ef2d4 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -104,6 +104,10 @@ struct kmem_cache { unsigned int *random_seq; #endif +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 67d8c6838ba9..bd38aab05929 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" - depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) + depends on SLUB || (SLAB && !DEBUG_SLAB) select CONSTRUCTORS - select STACKDEPOT if SLAB + select STACKDEPOT help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 1548749a3d45..2976a9ee104f 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -obj-y := kasan.o report.o kasan_init.o -obj-$(CONFIG_SLAB) += quarantine.o +obj-y := kasan.o report.o kasan_init.o quarantine.o diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 6845f9294696..b6f99e81bfeb 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order) KASAN_FREE_PAGE); } -#ifdef CONFIG_SLAB /* * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. @@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) { int redzone_adjust; - /* Make sure the adjusted size is still less than - * KMALLOC_MAX_CACHE_SIZE. - * TODO: this check is only useful for SLAB, but not SLUB. We'll need - * to skip it for SLUB when it starts using kasan_cache_create(). - */ - if (*size > KMALLOC_MAX_CACHE_SIZE - - sizeof(struct kasan_alloc_meta) - - sizeof(struct kasan_free_meta)) - return; - *flags |= SLAB_KASAN; + int orig_size = *size; + /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; *size += sizeof(struct kasan_alloc_meta); @@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size, } redzone_adjust = optimal_redzone(cache->object_size) - (*size - cache->object_size); + if (redzone_adjust > 0) *size += redzone_adjust; - *size = min(KMALLOC_MAX_CACHE_SIZE, - max(*size, - cache->object_size + - optimal_redzone(cache->object_size))); + + *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + + optimal_redzone(cache->object_size))); + + /* + * If the metadata doesn't fit, don't enable KASAN at all. + */ + if (*size <= cache->kasan_info.alloc_meta_offset || + *size <= cache->kasan_info.free_meta_offset) { + cache->kasan_info.alloc_meta_offset = 0; + cache->kasan_info.free_meta_offset = 0; + *size = orig_size; + return; + } + + *flags |= SLAB_KASAN; } -#endif void kasan_cache_shrink(struct kmem_cache *cache) { @@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache) quarantine_remove_cache(cache); } +size_t kasan_metadata_size(struct kmem_cache *cache) +{ + return (cache->kasan_info.alloc_meta_offset ? + sizeof(struct kasan_alloc_meta) : 0) + + (cache->kasan_info.free_meta_offset ? + sizeof(struct kasan_free_meta) : 0); +} + void kasan_poison_slab(struct page *page) { kasan_poison_shadow(page_address(page), @@ -431,16 +442,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) kasan_poison_shadow(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), KASAN_KMALLOC_REDZONE); -#ifdef CONFIG_SLAB if (cache->flags & SLAB_KASAN) { struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); alloc_info->state = KASAN_STATE_INIT; } -#endif } -#ifdef CONFIG_SLAB static inline int in_irqentry_text(unsigned long ptr) { return (ptr >= (unsigned long)&__irqentry_text_start && @@ -501,7 +509,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); return (void *)object + cache->kasan_info.free_meta_offset; } -#endif void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) { @@ -522,16 +529,16 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) bool kasan_slab_free(struct kmem_cache *cache, void *object) { -#ifdef CONFIG_SLAB /* RCU slabs could be legally used after free within the RCU period */ if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) return false; if (likely(cache->flags & SLAB_KASAN)) { - struct kasan_alloc_meta *alloc_info = - get_alloc_info(cache, object); - struct kasan_free_meta *free_info = - get_free_info(cache, object); + struct kasan_alloc_meta *alloc_info; + struct kasan_free_meta *free_info; + + alloc_info = get_alloc_info(cache, object); + free_info = get_free_info(cache, object); switch (alloc_info->state) { case KASAN_STATE_ALLOC: @@ -550,10 +557,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) } } return false; -#else - kasan_poison_slab_free(cache, object); - return false; -#endif } void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, @@ -576,7 +579,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, kasan_unpoison_shadow(object, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, KASAN_KMALLOC_REDZONE); -#ifdef CONFIG_SLAB if (cache->flags & SLAB_KASAN) { struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); @@ -585,7 +587,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, alloc_info->alloc_size = size; set_track(&alloc_info->track, flags); } -#endif } EXPORT_SYMBOL(kasan_kmalloc); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index fb87923552ef..31972cdba433 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -95,7 +95,6 @@ struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, struct kasan_free_meta *get_free_info(struct kmem_cache *cache, const void *object); - static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) @@ -110,7 +109,7 @@ static inline bool kasan_report_enabled(void) void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -#ifdef CONFIG_SLAB +#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); void quarantine_reduce(void); void quarantine_remove_cache(struct kmem_cache *cache); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index b3c122ddd454..861b9776841a 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -116,7 +116,6 @@ static inline bool init_task_stack_addr(const void *addr) sizeof(init_thread_union.stack)); } -#ifdef CONFIG_SLAB static void print_track(struct kasan_track *track) { pr_err("PID = %u\n", track->pid); @@ -130,8 +129,8 @@ static void print_track(struct kasan_track *track) } } -static void object_err(struct kmem_cache *cache, struct page *page, - void *object, char *unused_reason) +static void kasan_object_err(struct kmem_cache *cache, struct page *page, + void *object, char *unused_reason) { struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); struct kasan_free_meta *free_info; @@ -162,7 +161,6 @@ static void object_err(struct kmem_cache *cache, struct page *page, break; } } -#endif static void print_address_description(struct kasan_access_info *info) { @@ -177,7 +175,7 @@ static void print_address_description(struct kasan_access_info *info) struct kmem_cache *cache = page->slab_cache; object = nearest_obj(cache, page, (void *)info->access_addr); - object_err(cache, page, object, + kasan_object_err(cache, page, object, "kasan: bad access detected"); return; } diff --git a/mm/slab.h b/mm/slab.h index f33980ab0406..9653f2e2591a 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -369,6 +369,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s) if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->object_size; # endif + if (s->flags & SLAB_KASAN) + return s->object_size; /* * If we have the need to store the freelist pointer * back there or track user information then we can diff --git a/mm/slub.c b/mm/slub.c index 1cdde1a5ba5f..74e7c8c30db8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -454,8 +454,6 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) */ #if defined(CONFIG_SLUB_DEBUG_ON) static int slub_debug = DEBUG_DEFAULT_FLAGS; -#elif defined(CONFIG_KASAN) -static int slub_debug = SLAB_STORE_USER; #else static int slub_debug; #endif @@ -660,6 +658,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); + off += kasan_metadata_size(s); + if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ print_section("Padding ", p + off, size_from_object(s) - off); @@ -787,6 +787,8 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) /* We also have user information there */ off += 2 * sizeof(struct track); + off += kasan_metadata_size(s); + if (size_from_object(s) == off) return 1; @@ -1322,8 +1324,10 @@ static inline void kfree_hook(const void *x) kasan_kfree_large(x); } -static inline void slab_free_hook(struct kmem_cache *s, void *x) +static inline void *slab_free_hook(struct kmem_cache *s, void *x) { + void *freeptr; + kmemleak_free_recursive(x, s->flags); /* @@ -1344,7 +1348,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(x, s->object_size); + freeptr = get_freepointer(s, x); + /* + * kasan_slab_free() may put x into memory quarantine, delaying its + * reuse. In this case the object's freelist pointer is changed. + */ kasan_slab_free(s, x); + return freeptr; } static inline void slab_free_freelist_hook(struct kmem_cache *s, @@ -1362,11 +1372,11 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s, void *object = head; void *tail_obj = tail ? : head; + void *freeptr; do { - slab_free_hook(s, object); - } while ((object != tail_obj) && - (object = get_freepointer(s, object))); + freeptr = slab_free_hook(s, object); + } while ((object != tail_obj) && (object = freeptr)); #endif } @@ -2878,16 +2888,13 @@ slab_empty: * same page) possible by specifying head and tail ptr, plus objects * count (cnt). Bulk free indicated by tail pointer being set. */ -static __always_inline void slab_free(struct kmem_cache *s, struct page *page, - void *head, void *tail, int cnt, - unsigned long addr) +static __always_inline void do_slab_free(struct kmem_cache *s, + struct page *page, void *head, void *tail, + int cnt, unsigned long addr) { void *tail_obj = tail ? : head; struct kmem_cache_cpu *c; unsigned long tid; - - slab_free_freelist_hook(s, head, tail); - redo: /* * Determine the currently cpus per cpu slab. @@ -2921,6 +2928,27 @@ redo: } +static __always_inline void slab_free(struct kmem_cache *s, struct page *page, + void *head, void *tail, int cnt, + unsigned long addr) +{ + slab_free_freelist_hook(s, head, tail); + /* + * slab_free_freelist_hook() could have put the items into quarantine. + * If so, no need to free them. + */ + if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU)) + return; + do_slab_free(s, page, head, tail, cnt, addr); +} + +#ifdef CONFIG_KASAN +void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) +{ + do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); +} +#endif + void kmem_cache_free(struct kmem_cache *s, void *x) { s = cache_from_obj(s, x); @@ -3363,7 +3391,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min) static int calculate_sizes(struct kmem_cache *s, int forced_order) { unsigned long flags = s->flags; - unsigned long size = s->object_size; + size_t size = s->object_size; int order; /* @@ -3422,7 +3450,10 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * the object. */ size += 2 * sizeof(struct track); +#endif + kasan_cache_create(s, &size, &s->flags); +#ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_RED_ZONE) { /* * Add some empty padding so that we can catch -- cgit v1.2.3 From 87cc271d5e4320d705cfdf59f68d4d037b3511b2 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 28 Jul 2016 15:49:10 -0700 Subject: lib/stackdepot.c: use __GFP_NOWARN for stack allocations This (large, atomic) allocation attempt can fail. We expect and handle that, so avoid the scary warning. Link: http://lkml.kernel.org/r/20160720151905.GB19146@node.shutemov.name Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Michal Hocko Cc: Rik van Riel Cc: David Rientjes Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/stackdepot.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 53ad6c0831ae..60f77f1d470a 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -242,6 +242,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, */ alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); if (page) prealloc = page_address(page); -- cgit v1.2.3