summaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-06-30 14:57:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 19:44:56 -0700
commit0e1cc95b4cc7293bb7b39175035e7f7e45c90977 (patch)
tree61b1173bce0ef2738aec4259956c1ecc52280dfa /mm/internal.h
parent74033a798f5a5db368126ee6f690111cf019bf7a (diff)
downloadlinux-0-day-0e1cc95b4cc7293bb7b39175035e7f7e45c90977.tar.gz
linux-0-day-0e1cc95b4cc7293bb7b39175035e7f7e45c90977.tar.xz
mm: meminit: finish initialisation of struct pages before basic setup
Waiman Long reported that 24TB machines hit OOM during basic setup when struct page initialisation was deferred. One approach is to initialise memory on demand but it interferes with page allocator paths. This patch creates dedicated threads to initialise memory before basic setup. It then blocks on a rw_semaphore until completion as a wait_queue and counter is overkill. This may be slower to boot but it's simplier overall and also gets rid of a section mangling which existed so kswapd could do the initialisation. [akpm@linux-foundation.org: include rwsem.h, use DECLARE_RWSEM, fix comment, remove unneeded cast] Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Waiman Long <waiman.long@hp.com Cc: Nathan Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Scott Norton <scott.norton@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h24
1 files changed, 0 insertions, 24 deletions
diff --git a/mm/internal.h b/mm/internal.h
index a48cbefde8cae..36b23f1e2ca62 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -379,30 +379,6 @@ static inline void mminit_verify_zonelist(void)
}
#endif /* CONFIG_DEBUG_MEMORY_INIT */
-/*
- * Deferred struct page initialisation requires init functions that are freed
- * before kswapd is available. Reuse the memory hotplug section annotation
- * to mark the required code.
- *
- * __defermem_init is code that always exists but is annotated __meminit to
- * avoid section warnings.
- * __defer_init code gets marked __meminit when deferring struct page
- * initialistion but is otherwise in the init section.
- */
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-#define __defermem_init __meminit
-#define __defer_init __meminit
-
-void deferred_init_memmap(int nid);
-#else
-#define __defermem_init
-#define __defer_init __init
-
-static inline void deferred_init_memmap(int nid)
-{
-}
-#endif
-
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
#if defined(CONFIG_SPARSEMEM)
extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,