summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-05-26 17:32:23 +0300
committerIngo Molnar <mingo@kernel.org>2013-05-28 09:41:11 +0200
commit662bbcb2747c2422cf98d3d97619509379eee466 (patch)
treed733c7257e59bde654d963cce0e26454a134d787
parent114276ac0a3beb9c391a410349bd770653e185ce (diff)
downloadlinux-662bbcb2747c2422cf98d3d97619509379eee466.tar.gz
linux-662bbcb2747c2422cf98d3d97619509379eee466.tar.xz
mm, sched: Allow uaccess in atomic with pagefault_disable()
This changes might_fault() so that it does not trigger a false positive diagnostic for e.g. the following sequence: spin_lock_irqsave() pagefault_disable() copy_to_user() pagefault_enable() spin_unlock_irqrestore() In particular vhost wants to do this, to call socket ops from under a lock. There are 3 cases to consider: - CONFIG_PROVE_LOCKING - might_fault is non-inline so it's easy to move the in_atomic test to fix up the false positive warning. - CONFIG_DEBUG_ATOMIC_SLEEP - might_fault is currently inline, but we are calling a non-inline __might_sleep anyway, so let's use the non-line version of might_fault that does the right thing. - !CONFIG_DEBUG_ATOMIC_SLEEP && !CONFIG_PROVE_LOCKING __might_sleep is a nop so might_fault is a nop. Make this explicit. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1369577426-26721-11-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/kernel.h7
-rw-r--r--mm/memory.c11
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 24719eaa1209..4c7e2e508766 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,13 +193,10 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void);
#else
-static inline void might_fault(void)
-{
- __might_sleep(__FILE__, __LINE__, 0);
-}
+static inline void might_fault(void) { }
#endif
extern struct atomic_notifier_head panic_notifier_list;
diff --git a/mm/memory.c b/mm/memory.c
index c1f190f51f6f..d7d54a114773 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4210,7 +4210,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
@@ -4222,14 +4222,17 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- __might_sleep(__FILE__, __LINE__, 0);
-
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (!in_atomic() && current->mm)
+ if (in_atomic())
+ return;
+
+ __might_sleep(__FILE__, __LINE__, 0);
+
+ if (current->mm)
might_lock_read(&current->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);