summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2017-02-24 14:58:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 17:46:55 -0800
commit6a328a626f98bb856551e506cabc7c8b969aafa3 (patch)
treedeb7ddcd0228acd7f03c88b64cdc4ec46b8077ce /mm
parent14fa2daa15887f9246cfedc345e83e8d24cb9058 (diff)
downloadlinux-6a328a626f98bb856551e506cabc7c8b969aafa3.tar.gz
linux-6a328a626f98bb856551e506cabc7c8b969aafa3.tar.xz
mm: convert page_mapped_in_vma() to use page_vma_mapped_walk()
For consistency, it worth converting all page_check_address() to page_vma_mapped_walk(), so we could drop the former. Link: http://lkml.kernel.org/r/20170129173858.45174-11-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_vma_mapped.c30
-rw-r--r--mm/rmap.c26
2 files changed, 30 insertions, 26 deletions
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index dc1a54826cf2..a23001a22c15 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -186,3 +186,33 @@ next_pte: do {
}
}
}
+
+/**
+ * page_mapped_in_vma - check whether a page is really mapped in a VMA
+ * @page: the page to test
+ * @vma: the VMA to test
+ *
+ * Returns 1 if the page is mapped into the page tables of the VMA, 0
+ * if the page is not mapped into the page tables of this VMA. Only
+ * valid for normal file or anonymous VMAs.
+ */
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+{
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .flags = PVMW_SYNC,
+ };
+ unsigned long start, end;
+
+ start = __vma_address(page, vma);
+ end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+
+ if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+ return 0;
+ pvmw.address = max(start, vma->vm_start);
+ if (!page_vma_mapped_walk(&pvmw))
+ return 0;
+ page_vma_mapped_walk_done(&pvmw);
+ return 1;
+}
diff --git a/mm/rmap.c b/mm/rmap.c
index 11668fb881d8..80525820aada 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -756,32 +756,6 @@ check:
return NULL;
}
-/**
- * page_mapped_in_vma - check whether a page is really mapped in a VMA
- * @page: the page to test
- * @vma: the VMA to test
- *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA. Only
- * valid for normal file or anonymous VMAs.
- */
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
-{
- unsigned long address;
- pte_t *pte;
- spinlock_t *ptl;
-
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- return 0;
- pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
- if (!pte) /* the page is not in this mm */
- return 0;
- pte_unmap_unlock(pte, ptl);
-
- return 1;
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* Check that @page is mapped at @address into @mm. In contrast to