summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 09:16:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 09:16:18 -0800
commita65981109f294ba7e64b33ad3b4575a4636fce66 (patch)
tree1061a49f11544e18775630938a8bc53920fa0421 /mm
parent3fed6ae4b027f9c93be18520f87bd06bdffd196b (diff)
parentb685a7350ae76bc0f388e24b36d06a63776c68ee (diff)
downloadlinux-0-day-a65981109f294ba7e64b33ad3b4575a4636fce66.tar.gz
linux-0-day-a65981109f294ba7e64b33ad3b4575a4636fce66.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - procfs updates - various misc bits - lib/ updates - epoll updates - autofs - fatfs - a few more MM bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (58 commits) mm/page_io.c: fix polled swap page in checkpatch: add Co-developed-by to signature tags docs: fix Co-Developed-by docs drivers/base/platform.c: kmemleak ignore a known leak fs: don't open code lru_to_page() fs/: remove caller signal_pending branch predictions mm/: remove caller signal_pending branch predictions arch/arc/mm/fault.c: remove caller signal_pending_branch predictions kernel/sched/: remove caller signal_pending branch predictions kernel/locking/mutex.c: remove caller signal_pending branch predictions mm: select HAVE_MOVE_PMD on x86 for faster mremap mm: speed up mremap by 20x on large regions mm: treewide: remove unused address argument from pte_alloc functions initramfs: cleanup incomplete rootfs scripts/gdb: fix lx-version string output kernel/kcov.c: mark write_comp_data() as notrace kernel/sysctl: add panic_print into sysctl panic: add options to print system info when panic happens bfs: extra sanity checking and static inode bitmap exec: separate MM_ANONPAGES and RLIMIT_STACK accounting ...
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/gup.c2
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/kasan/init.c2
-rw-r--r--mm/memory.c17
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mremap.c66
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/swap.c2
-rw-r--r--mm/userfaultfd.c2
11 files changed, 87 insertions, 22 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 29655fb47a2c4..9f5e323e883e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1125,7 +1125,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
break;
}
- if (unlikely(signal_pending_state(state, current))) {
+ if (signal_pending_state(state, current)) {
ret = -EINTR;
break;
}
diff --git a/mm/gup.c b/mm/gup.c
index 6f591ccb8eca7..05acd7e2eb22e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -727,7 +727,7 @@ retry:
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
- if (unlikely(fatal_signal_pending(current))) {
+ if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cbd977b1d60d4..faf357eaf0cee 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -568,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}
- pgtable = pte_alloc_one(vma->vm_mm, haddr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) {
ret = VM_FAULT_OOM;
goto release;
@@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct page *zero_page;
bool set;
vm_fault_t ret;
- pgtable = pte_alloc_one(vma->vm_mm, haddr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
zero_page = mm_get_huge_zero_page(vma->vm_mm);
@@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm, addr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (!pgtable)
return VM_FAULT_OOM;
}
@@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!vma_is_anonymous(vma))
return 0;
- pgtable = pte_alloc_one(dst_mm, addr);
+ pgtable = pte_alloc_one(dst_mm);
if (unlikely(!pgtable))
goto out;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e37efd5d83183..7450888109651 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4231,7 +4231,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
- if (unlikely(fatal_signal_pending(current))) {
+ if (fatal_signal_pending(current)) {
remainder = 0;
break;
}
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index 34afad56497bd..45a1b5e38e1e2 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -123,7 +123,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
pte_t *p;
if (slab_is_available())
- p = pte_alloc_one_kernel(&init_mm, addr);
+ p = pte_alloc_one_kernel(&init_mm);
else
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
if (!p)
diff --git a/mm/memory.c b/mm/memory.c
index 2dd2f9ab57f46..a52663c0612d4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
}
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
- pgtable_t new = pte_alloc_one(mm, address);
+ pgtable_t new = pte_alloc_one(mm);
if (!new)
return -ENOMEM;
@@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
return 0;
}
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+int __pte_alloc_kernel(pmd_t *pmd)
{
- pte_t *new = pte_alloc_one_kernel(&init_mm, address);
+ pte_t *new = pte_alloc_one_kernel(&init_mm);
if (!new)
return -ENOMEM;
@@ -2896,7 +2896,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*
* Here we only have down_read(mmap_sem).
*/
- if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
+ if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM;
/* See the comment in pte_alloc_one_map() */
@@ -3043,7 +3043,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
spin_unlock(vmf->ptl);
vmf->prealloc_pte = NULL;
- } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
+ } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM;
}
map_pte:
@@ -3122,7 +3122,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
- vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
+ vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
smp_wmb(); /* See comment in __pte_alloc() */
@@ -3360,8 +3360,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
start_pgoff + nr_pages - 1);
if (pmd_none(*vmf->pmd)) {
- vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
- vmf->address);
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
if (!vmf->prealloc_pte)
goto out;
smp_wmb(); /* See comment in __pte_alloc() */
diff --git a/mm/migrate.c b/mm/migrate.c
index 5d1839a9148df..ccf8966caf6fe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2636,7 +2636,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
*
* Here we only have down_read(mmap_sem).
*/
- if (pte_alloc(mm, pmdp, addr))
+ if (pte_alloc(mm, pmdp))
goto abort;
/* See the comment in pte_alloc_one_map() */
diff --git a/mm/mremap.c b/mm/mremap.c
index def01d86e36fd..3320616ed93f4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
drop_rmap_locks(vma);
}
+#ifdef CONFIG_HAVE_MOVE_PMD
+static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, unsigned long old_end,
+ pmd_t *old_pmd, pmd_t *new_pmd)
+{
+ spinlock_t *old_ptl, *new_ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t pmd;
+
+ if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
+ || old_end - old_addr < PMD_SIZE)
+ return false;
+
+ /*
+ * The destination pmd shouldn't be established, free_pgtables()
+ * should have release it.
+ */
+ if (WARN_ON(!pmd_none(*new_pmd)))
+ return false;
+
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_sem prevents deadlock.
+ */
+ old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+ new_ptl = pmd_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+ /* Clear the pmd */
+ pmd = *old_pmd;
+ pmd_clear(old_pmd);
+
+ VM_BUG_ON(!pmd_none(*new_pmd));
+
+ /* Set the new pmd */
+ set_pmd_at(mm, new_addr, new_pmd, pmd);
+ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+
+ return true;
+}
+#endif
+
unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
@@ -235,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
split_huge_pmd(vma, old_pmd, old_addr);
if (pmd_trans_unstable(old_pmd))
continue;
+ } else if (extent == PMD_SIZE) {
+#ifdef CONFIG_HAVE_MOVE_PMD
+ /*
+ * If the extent is PMD-sized, try to speed the move by
+ * moving at the PMD level if possible.
+ */
+ bool moved;
+
+ if (need_rmap_locks)
+ take_rmap_locks(vma);
+ moved = move_normal_pmd(vma, old_addr, new_addr,
+ old_end, old_pmd, new_pmd);
+ if (need_rmap_locks)
+ drop_rmap_locks(vma);
+ if (moved)
+ continue;
+#endif
}
- if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
+
+ if (pte_alloc(new_vma->vm_mm, new_pmd))
break;
next = (new_addr + PMD_SIZE) & PMD_MASK;
if (extent > next - new_addr)
diff --git a/mm/page_io.c b/mm/page_io.c
index d975fa3f02aa5..2e8019d0e048d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -401,6 +401,8 @@ int swap_readpage(struct page *page, bool synchronous)
get_task_struct(current);
bio->bi_private = current;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (synchronous)
+ bio->bi_opf |= REQ_HIPRI;
count_vm_event(PSWPIN);
bio_get(bio);
qc = submit_bio(bio);
@@ -410,7 +412,7 @@ int swap_readpage(struct page *page, bool synchronous)
break;
if (!blk_poll(disk->queue, qc, true))
- break;
+ io_schedule();
}
__set_current_state(TASK_RUNNING);
bio_put(bio);
diff --git a/mm/swap.c b/mm/swap.c
index 4d8a1f1afaab4..4929bc1be60ef 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -126,7 +126,7 @@ void put_pages_list(struct list_head *pages)
while (!list_empty(pages)) {
struct page *victim;
- victim = list_entry(pages->prev, struct page, lru);
+ victim = lru_to_page(pages);
list_del(&victim->lru);
put_page(victim);
}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 48368589f5199..065c1ce191c47 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -550,7 +550,7 @@ retry:
break;
}
if (unlikely(pmd_none(dst_pmdval)) &&
- unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
+ unlikely(__pte_alloc(dst_mm, dst_pmd))) {
err = -ENOMEM;
break;
}