summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-07-10 15:48:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 16:32:31 -0700
commit7f252f277b66854c61d3abdd4c196d6dc64fa333 (patch)
treed83c538b6f8420feb1ce51771da7da13d4cf3a74 /mm
parent9f123ab544df1c92acd6a029067e8bde44780740 (diff)
downloadlinux-0-day-7f252f277b66854c61d3abdd4c196d6dc64fa333.tar.gz
linux-0-day-7f252f277b66854c61d3abdd4c196d6dc64fa333.tar.xz
mm, memory_hotplug: simplify empty node mask handling in new_node_page
new_node_page tries to allocate the target page on a different NUMA node than the source page. This makes sense in most cases during the hotplug because we are likely to offline the whole numa node. But there are cases where there are no other nodes to fallback (e.g. when offlining parts of the only existing node) and we have to fallback to allocating from the source node. The current code does that but it can be simplified by checking the nmask and updating it before we even try to allocate rather than special casing it. This patch shouldn't introduce any functional change. Link: http://lkml.kernel.org/r/20170608074553.22152-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: zhong jiang <zhongjiang@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 32abde2e2472d..f42a8ef93ec47 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1436,7 +1436,15 @@ static struct page *new_node_page(struct page *page, unsigned long private,
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
int nid = page_to_nid(page);
nodemask_t nmask = node_states[N_MEMORY];
- struct page *new_page = NULL;
+
+ /*
+ * try to allocate from a different node but reuse this node if there
+ * are no other online nodes to be used (e.g. we are offlining a part
+ * of the only existing node)
+ */
+ node_clear(nid, nmask);
+ if (nodes_empty(nmask))
+ node_set(nid, nmask);
/*
* TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1447,18 +1455,11 @@ static struct page *new_node_page(struct page *page, unsigned long private,
return alloc_huge_page_node(page_hstate(compound_head(page)),
next_node_in(nid, nmask));
- node_clear(nid, nmask);
-
if (PageHighMem(page)
|| (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
- if (!nodes_empty(nmask))
- new_page = __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
- if (!new_page)
- new_page = __alloc_pages(gfp_mask, 0, nid);
-
- return new_page;
+ return __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
}
#define NR_OFFLINE_AT_ONCE_PAGES (256)