summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-12-02 17:49:41 +0900
committerPekka Enberg <penberg@kernel.org>2014-02-08 12:12:06 +0200
commitf315e3fa1cf5b3317fc948708645fff889ce1e63 (patch)
tree2c7e3db6b372504ca115c5dcd2b23a8996e4a06a
parente5c58dfdcbd36f6b4c4c92c31cf6753d22da630a (diff)
downloadlinux-f315e3fa1cf5b3317fc948708645fff889ce1e63.tar.gz
linux-f315e3fa1cf5b3317fc948708645fff889ce1e63.tar.xz
slab: restrict the number of objects in a slab
To prepare to implement byte sized index for managing the freelist of a slab, we should restrict the number of objects in a slab to be less or equal to 256, since byte only represent 256 different values. Setting the size of object to value equal or more than newly introduced SLAB_OBJ_MIN_SIZE ensures that the number of objects in a slab is less or equal to 256 for a slab with 1 page. If page size is rather larger than 4096, above assumption would be wrong. In this case, we would fall back on 2 bytes sized index. If minimum size of kmalloc is less than 16, we use it as minimum object size and give up this optimization. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slab.h11
-rw-r--r--mm/slab.c21
2 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9260abdd67df..d015dec02bf3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -201,6 +201,17 @@ struct kmem_cache {
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
+
+/*
+ * This restriction comes from byte sized index implementation.
+ * Page size is normally 2^12 bytes and, in this case, if we want to use
+ * byte sized index which can represent 2^8 entries, the size of the object
+ * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
+ * If minimum size of kmalloc is less than 16, we use it as minimum object
+ * size and give up to use byte sized index.
+ */
+#define SLAB_OBJ_MIN_SIZE (KMALLOC_SHIFT_LOW < 4 ? \
+ (1 << KMALLOC_SHIFT_LOW) : 16)
#endif
#ifdef CONFIG_SLUB
diff --git a/mm/slab.c b/mm/slab.c
index 878354b26b72..9d4c7b50dfdc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -157,6 +157,17 @@
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
+#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
+ <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
+
+#if FREELIST_BYTE_INDEX
+typedef unsigned char freelist_idx_t;
+#else
+typedef unsigned short freelist_idx_t;
+#endif
+
+#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
+
/*
* true if a page was allocated from pfmemalloc reserves for network-based
* swap
@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
+ /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
+ if (num > SLAB_OBJ_MAX_NUM)
+ break;
+
if (flags & CFLGS_OFF_SLAB) {
/*
* Max number of objs-per-slab for caches which
@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, cachep->align);
+ /*
+ * We should restrict the number of objects in a slab to implement
+ * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
+ */
+ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
+ size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
left_over = calculate_slab_order(cachep, size, cachep->align, flags);