summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJann Horn <jannh@google.com>2020-09-17 11:56:43 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2020-09-17 11:56:43 +1000
commitd6b414850a3db97b86fbb426c6b8cea041f448eb (patch)
treef3d48baf1653b536daeaf597b60d010afb5f840d
parent1d08153f09b0fdc8796451ecf2c4fec2bc099e3c (diff)
downloadlinux-d6b414850a3db97b86fbb426c6b8cea041f448eb.tar.gz
linux-d6b414850a3db97b86fbb426c6b8cea041f448eb.tar.xz
mm/gup: take mmap_lock in get_dump_page()
Properly take the mmap_lock before calling into the GUP code from get_dump_page(); and play nice, allowing the GUP code to drop the mmap_lock if it has to sleep. As Linus pointed out, we don't actually need the VMA because __get_user_pages() will flush the dcache for us if necessary. Link: http://lkml.kernel.org/r/20200827114932.3572699-7-jannh@google.com Signed-off-by: Jann Horn <jannh@google.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: "Eric W . Biederman" <ebiederm@xmission.com> Cc: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
-rw-r--r--mm/gup.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 702eebe7a551..2bb04fddd1a5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1537,19 +1537,23 @@ finish_or_fault:
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
- * Called without mmap_lock, but after all other threads have been killed.
+ * Called without mmap_lock (takes and releases the mmap_lock by itself).
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
- struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
struct page *page;
+ int locked = 1;
+ int ret;
- if (__get_user_pages_locked(current->mm, addr, 1, &page, &vma, NULL,
- FOLL_FORCE | FOLL_DUMP | FOLL_GET) < 1)
+ if (mmap_read_lock_killable(mm))
return NULL;
- flush_cache_page(vma, addr, page_to_pfn(page));
- return page;
+ ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET);
+ if (locked)
+ mmap_read_unlock(mm);
+ return (ret == 1) ? page : NULL;
}
#endif /* CONFIG_ELF_CORE */