summaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-07-26 15:24:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 16:19:19 -0700
commite5e3f4c4f0e95ecbad2f8d2f4f6a29bb8a90226b (patch)
tree2925a64ce37477aad18eec13c20ba8fe3ad31ea9 /mm/oom_kill.c
parentba6c19fd113a3965f8cf4c183a813d528008d03e (diff)
downloadlinux-e5e3f4c4f0e95ecbad2f8d2f4f6a29bb8a90226b.tar.gz
linux-e5e3f4c4f0e95ecbad2f8d2f4f6a29bb8a90226b.tar.xz
mm, oom_reaper: make sure that mmput_async is called only when memory was reaped
Tetsuo is worried that mmput_async might still lead to a premature new oom victim selection due to the following race: __oom_reap_task exit_mm find_lock_task_mm atomic_inc(mm->mm_users) # = 2 task_unlock task_lock task->mm = NULL up_read(&mm->mmap_sem) < somebody write locks mmap_sem > task_unlock mmput atomic_dec_and_test # = 1 exit_oom_victim down_read_trylock # failed - no reclaim mmput_async # Takes unpredictable amount of time < new OOM situation > the final __mmput will be executed in the delayed context which might happen far in the future. Such a race is highly unlikely because the write holder of mmap_sem would have to be an external task (all direct holders are already killed or exiting) and it usually have to pin mm_users in order to do anything reasonable. We can, however, make sure that the mmput_async is only called when we do not back off and reap some memory. That would reduce the impact of the delayed __mmput because the real content would be already freed. Pin mm_count to keep it alive after we drop task_lock and before we try to get mmap_sem. If the mmap_sem succeeds we can try to grab mm_users reference and then go on with unmapping the address space. It is not clear whether this race is possible at all but it is better to be more robust and do not pin mm_users unless we are sure we are actually doing some real work during __oom_reap_task. Link: http://lkml.kernel.org/r/1465306987-30297-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c11f8bdd0c12..d4a929d79470 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -452,7 +452,7 @@ static bool __oom_reap_task(struct task_struct *tsk)
* We have to make sure to not race with the victim exit path
* and cause premature new oom victim selection:
* __oom_reap_task exit_mm
- * atomic_inc_not_zero
+ * mmget_not_zero
* mmput
* atomic_dec_and_test
* exit_oom_victim
@@ -474,12 +474,22 @@ static bool __oom_reap_task(struct task_struct *tsk)
if (!p)
goto unlock_oom;
mm = p->mm;
- atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
- goto unlock_oom;
+ goto mm_drop;
+ }
+
+ /*
+ * increase mm_users only after we know we will reap something so
+ * that the mmput_async is called only when we have reaped something
+ * and delayed __mmput doesn't matter that much
+ */
+ if (!mmget_not_zero(mm)) {
+ up_read(&mm->mmap_sem);
+ goto mm_drop;
}
tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -521,15 +531,16 @@ static bool __oom_reap_task(struct task_struct *tsk)
* to release its memory.
*/
set_bit(MMF_OOM_REAPED, &mm->flags);
-unlock_oom:
- mutex_unlock(&oom_lock);
/*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
- if (mm)
- mmput_async(mm);
+ mmput_async(mm);
+mm_drop:
+ mmdrop(mm);
+unlock_oom:
+ mutex_unlock(&oom_lock);
return ret;
}