summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJiang Liu <liuj97@gmail.com>2013-07-03 15:03:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:07:33 -0700
commit170a5a7eb2bf10161197e5490fbc29ca4561aedb (patch)
tree20568fb63cc21f6ac6bbbcd83e30d943d1a95e83 /arch/x86
parentc3d5f5f0c2bc4eabeaf49f1a21e1aeb965246cd2 (diff)
downloadlinux-170a5a7eb2bf10161197e5490fbc29ca4561aedb.tar.gz
linux-170a5a7eb2bf10161197e5490fbc29ca4561aedb.tar.xz
mm: make __free_pages_bootmem() only available at boot time
In order to simpilify management of totalram_pages and zone->managed_pages, make __free_pages_bootmem() only available at boot time. With this change applied, __free_pages_bootmem() will only be used by bootmem.c and nobootmem.c at boot time, so mark it as __init. Other callers of __free_pages_bootmem() have been converted to use free_reserved_page(), which handles totalram_pages and zone->managed_pages in a safer way. This patch also fix a bug in free_pagetable() for x86_64, which should increase zone->managed_pages instead of zone->present_pages when freeing reserved pages. And now we have managed_pages_count_lock to protect totalram_pages and zone->managed_pages, so remove the redundant ppb_lock lock in put_page_bootmem(). This greatly simplifies the locking rules. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan@kernel.org> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Tejun Heo <tj@kernel.org> Cc: Will Deacon <will.deacon@arm.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_64.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b7bdf7bebf3b..ec312a92b137 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -712,36 +712,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory);
static void __meminit free_pagetable(struct page *page, int order)
{
- struct zone *zone;
- bool bootmem = false;
unsigned long magic;
unsigned int nr_pages = 1 << order;
/* bootmem page has reserved flag */
if (PageReserved(page)) {
__ClearPageReserved(page);
- bootmem = true;
magic = (unsigned long)page->lru.next;
if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
while (nr_pages--)
put_page_bootmem(page++);
} else
- __free_pages_bootmem(page, order);
+ while (nr_pages--)
+ free_reserved_page(page++);
} else
free_pages((unsigned long)page_address(page), order);
-
- /*
- * SECTION_INFO pages and MIX_SECTION_INFO pages
- * are all allocated by bootmem.
- */
- if (bootmem) {
- zone = page_zone(page);
- zone_span_writelock(zone);
- zone->present_pages += nr_pages;
- zone_span_writeunlock(zone);
- totalram_pages += nr_pages;
- }
}
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)