From 19fc3f0acde32636529969570055c7e2a744787c Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Mon, 28 Apr 2008 02:12:20 -0700 Subject: hugetlb: decrease hugetlb_lock cycling in gather_surplus_huge_pages To reduce hugetlb_lock acquisitions and releases when freeing excess surplus pages, scan the page list in two parts. First, transfer the needed pages to the hugetlb pool. Then drop the lock and free the remaining pages back to the buddy allocator. In the common case there are zero excess pages and no lock operations are required. Thanks Mel Gorman for this improvement. Signed-off-by: Adam Litke Cc: Mel Gorman Cc: Dave Hansen Cc: William Lee Irwin III Cc: Andy Whitcroft Cc: Mel Gorman Cc: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3737d82f5225b..93ea46a0fba46 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -372,11 +372,19 @@ retry: resv_huge_pages += delta; ret = 0; free: + /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + if ((--needed) < 0) + break; list_del(&page->lru); - if ((--needed) >= 0) - enqueue_huge_page(page); - else { + enqueue_huge_page(page); + } + + /* Free unnecessary surplus pages to the buddy allocator */ + if (!list_empty(&surplus_list)) { + spin_unlock(&hugetlb_lock); + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); /* * The page has a reference count of zero already, so * call free_huge_page directly instead of using @@ -384,10 +392,9 @@ free: * unlocked which is safe because free_huge_page takes * hugetlb_lock before deciding how to free the page. */ - spin_unlock(&hugetlb_lock); free_huge_page(page); - spin_lock(&hugetlb_lock); } + spin_lock(&hugetlb_lock); } return ret; -- cgit v1.2.3