summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-05-19 17:10:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 19:12:14 -0700
commita5aa63a5f7352aa8991f64d46854dcb8d3788d55 (patch)
treeccbed2b6746ad69a5b354d46b1f239b1bbb7d84a /mm/slab.c
parent8888177ea116d4d14ca0a2ba054d02f35b0dae29 (diff)
downloadlinux-0-day-a5aa63a5f7352aa8991f64d46854dcb8d3788d55.tar.gz
linux-0-day-a5aa63a5f7352aa8991f64d46854dcb8d3788d55.tar.xz
mm/slab: drain the free slab as much as possible
slabs_tofree() implies freeing all free slab. We can do it with just providing INT_MAX. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f36d3493f49f2..a998d35599a37 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -895,12 +895,6 @@ static int init_cache_node_node(int node)
return 0;
}
-static inline int slabs_tofree(struct kmem_cache *cachep,
- struct kmem_cache_node *n)
-{
- return (n->free_objects + cachep->num - 1) / cachep->num;
-}
-
static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
@@ -965,7 +959,7 @@ free_slab:
n = get_node(cachep, node);
if (!n)
continue;
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
}
}
@@ -1117,7 +1111,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
@@ -2311,7 +2305,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
check_irq_on();
for_each_kmem_cache_node(cachep, node, n) {
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);