summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 15:14:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-14 15:14:29 -0700
commit54be8200198ddfc6cb396720460c19881fac2d5a (patch)
tree58ccab6e0cfb35b30e7e16804f15fe9c94628f12 /include
parent41d9884c44237cd66e2bdbc412028b29196b344c (diff)
parentc25f195e828f847735c7626b5693ddc3b853d245 (diff)
downloadlinux-0-day-54be8200198ddfc6cb396720460c19881fac2d5a.tar.gz
linux-0-day-54be8200198ddfc6cb396720460c19881fac2d5a.tar.xz
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h57
-rw-r--r--include/linux/slob_def.h8
2 files changed, 42 insertions, 23 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c621752caa66..6c5cc0ea87134 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -169,11 +169,7 @@ struct kmem_cache {
struct list_head list; /* List of all slab caches on the system */
};
-#define KMALLOC_MAX_SIZE (1UL << 30)
-
-#include <linux/slob_def.h>
-
-#else /* CONFIG_SLOB */
+#endif /* CONFIG_SLOB */
/*
* Kmalloc array related definitions
@@ -195,7 +191,9 @@ struct kmem_cache {
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
-#else
+#endif
+
+#ifdef CONFIG_SLUB
/*
* SLUB allocates up to order 2 pages directly and otherwise
* passes the request to the page allocator.
@@ -207,6 +205,19 @@ struct kmem_cache {
#endif
#endif
+#ifdef CONFIG_SLOB
+/*
+ * SLOB passes all page size and larger requests to the page allocator.
+ * No kmalloc array is necessary since objects of different sizes can
+ * be allocated from the same page.
+ */
+#define KMALLOC_SHIFT_MAX 30
+#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 3
+#endif
+#endif
+
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
@@ -221,6 +232,7 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif
+#ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
@@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
+#endif /* !CONFIG_SLOB */
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
-#elif defined(CONFIG_SLUB)
+#endif
+
+#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
-#else
-#error "Unknown slab allocator"
+#endif
+
+#ifdef CONFIG_SLOB
+#include <linux/slob_def.h>
#endif
/*
@@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
*/
static __always_inline int kmalloc_size(int n)
{
+#ifndef CONFIG_SLOB
if (n > 2)
return 1 << n;
@@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
return 192;
-
+#endif
return 0;
}
-#endif /* !CONFIG_SLOB */
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
@@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
/**
- * kmalloc_array - allocate memory for an array.
- * @n: number of elements.
- * @size: element size.
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* The @flags argument may be one of:
@@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
* There are other flags available as well, but these are not intended
* for general use, and so are not documented here. For a full list of
* potential flags, always refer to linux/gfp.h.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags);
+
+/**
+ * kmalloc_array - allocate memory for an array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
*/
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
@@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
/**
* kmalloc_node - allocate memory from a specific node
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kcalloc).
+ * @flags: the type of memory to allocate (see kmalloc).
* @node: node to allocate from.
*
* kmalloc() for non-local nodes, used to allocate from a specific node
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index f28e14a12e3fc..095a5a4a85165 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-/**
- * kmalloc - allocate memory
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kcalloc).
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- */
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __kmalloc_node(size, flags, NUMA_NO_NODE);