summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorYang Shi <yang.shi@linaro.org>2016-05-19 17:10:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 19:12:14 -0700
commita3187e438bc6565d6e54a550a19073d1b453f041 (patch)
tree429048ae9d34159d97fa98407cab6074891cabcb /mm/slab.c
parentc7ce4f60ac199fb3521c5fcd64da21cee801ec2b (diff)
downloadlinux-0-day-a3187e438bc6565d6e54a550a19073d1b453f041.tar.gz
linux-0-day-a3187e438bc6565d6e54a550a19073d1b453f041.tar.xz
mm: slab: remove ZONE_DMA_FLAG
Now we have IS_ENABLED helper to check if a Kconfig option is enabled or not, so ZONE_DMA_FLAG sounds no longer useful. And, the use of ZONE_DMA_FLAG in slab looks pointless according to the comment [1] from Johannes Weiner, so remove them and ORing passed in flags with the cache gfp flags has been done in kmem_getpages(). [1] https://lkml.org/lkml/2014/9/25/553 Link: http://lkml.kernel.org/r/1462381297-11009-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi <yang.shi@linaro.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1ee26a0d358fa..d81565a92864b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2236,7 +2236,7 @@ done:
cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
cachep->flags = flags;
cachep->allocflags = __GFP_COMP;
- if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
+ if (flags & SLAB_CACHE_DMA)
cachep->allocflags |= GFP_DMA;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
@@ -2664,16 +2664,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
}
}
-static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
-{
- if (CONFIG_ZONE_DMA_FLAG) {
- if (flags & GFP_DMA)
- BUG_ON(!(cachep->allocflags & GFP_DMA));
- else
- BUG_ON(cachep->allocflags & GFP_DMA);
- }
-}
-
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
{
void *objp;
@@ -2753,14 +2743,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
local_irq_enable();
/*
- * The test for missing atomic flag is performed here, rather than
- * the more obvious place, simply to reduce the critical path length
- * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
- * will eventually be caught here (where it matters).
- */
- kmem_flagcheck(cachep, flags);
-
- /*
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
@@ -3145,9 +3127,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
might_sleep_if(gfpflags_allow_blocking(flags));
-#if DEBUG
- kmem_flagcheck(cachep, flags);
-#endif
}
#if DEBUG