summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2018-04-05 16:20:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 21:36:23 -0700
commitf4957d5bd09165b165df851fbf8c658f7fcd9922 (patch)
tree021c72366564d4082e2885999640b79d174e5296 /mm
parent361d575e5c7a39c73a8a9bdd504c1d1274f280aa (diff)
downloadlinux-0-day-f4957d5bd09165b165df851fbf8c658f7fcd9922.tar.gz
linux-0-day-f4957d5bd09165b165df851fbf8c658f7fcd9922.tar.xz
slab: make kmem_cache_create() work with 32-bit sizes
struct kmem_cache::size and ::align were always 32-bit. Out of curiosity I created 4GB kmem_cache, it oopsed with division by 0. kmem_cache_create(1UL<<32+1) created 1-byte cache as expected. size_t doesn't work and never did. Link: http://lkml.kernel.org/r/20180305200730.15812-6-adobriyan@gmail.com Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c19
-rw-r--r--mm/slub.c2
4 files changed, 15 insertions, 14 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9095c39454251..ba25d8363eb26 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1877,7 +1877,7 @@ slab_flags_t kmem_cache_flags(unsigned long object_size,
}
struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *cachep;
diff --git a/mm/slab.h b/mm/slab.h
index 2a6d88044a561..0809580428feb 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -101,11 +101,11 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
unsigned int useroffset, unsigned int usersize);
int slab_unmergeable(struct kmem_cache *s);
-struct kmem_cache *find_mergeable(size_t size, size_t align,
+struct kmem_cache *find_mergeable(unsigned size, unsigned align,
slab_flags_t flags, const char *name, void (*ctor)(void *));
#ifndef CONFIG_SLOB
struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *));
slab_flags_t kmem_cache_flags(unsigned long object_size,
@@ -113,7 +113,7 @@ slab_flags_t kmem_cache_flags(unsigned long object_size,
void (*ctor)(void *));
#else
static inline struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{ return NULL; }
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2a7f09ce7c84c..a4545a61a7c85 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -82,7 +82,7 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM
-static int kmem_cache_sanity_check(const char *name, size_t size)
+static int kmem_cache_sanity_check(const char *name, unsigned int size)
{
struct kmem_cache *s = NULL;
@@ -113,7 +113,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
return 0;
}
#else
-static inline int kmem_cache_sanity_check(const char *name, size_t size)
+static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
{
return 0;
}
@@ -280,8 +280,8 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
-static unsigned long calculate_alignment(slab_flags_t flags,
- unsigned long align, unsigned long size)
+static unsigned int calculate_alignment(slab_flags_t flags,
+ unsigned int align, unsigned int size)
{
/*
* If the user wants hardware cache aligned objects then follow that
@@ -291,7 +291,7 @@ static unsigned long calculate_alignment(slab_flags_t flags,
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign;
+ unsigned int ralign;
ralign = cache_line_size();
while (size <= ralign / 2)
@@ -331,7 +331,7 @@ int slab_unmergeable(struct kmem_cache *s)
return 0;
}
-struct kmem_cache *find_mergeable(size_t size, size_t align,
+struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
slab_flags_t flags, const char *name, void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -379,7 +379,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
}
static struct kmem_cache *create_cache(const char *name,
- size_t object_size, size_t size, size_t align,
+ unsigned int object_size, unsigned int size, unsigned int align,
slab_flags_t flags, size_t useroffset,
size_t usersize, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
@@ -452,7 +452,8 @@ out_free_cache:
* as davem.
*/
struct kmem_cache *
-kmem_cache_create_usercopy(const char *name, size_t size, size_t align,
+kmem_cache_create_usercopy(const char *name,
+ unsigned int size, unsigned int align,
slab_flags_t flags, size_t useroffset, size_t usersize,
void (*ctor)(void *))
{
@@ -532,7 +533,7 @@ out_unlock:
EXPORT_SYMBOL(kmem_cache_create_usercopy);
struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t align,
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
diff --git a/mm/slub.c b/mm/slub.c
index d92218ed7f1c7..495c785c8d7e7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4242,7 +4242,7 @@ void __init kmem_cache_init_late(void)
}
struct kmem_cache *
-__kmem_cache_alias(const char *name, size_t size, size_t align,
+__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s, *c;