summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/preempt.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-11-07 14:01:00 +0100
committerIngo Molnar <mingo@kernel.org>2016-11-16 10:29:04 +0100
commitf285144f81e814f39342dbf5321d6ba939890b1b (patch)
treed9194b57593b5767efab27870bfda51e730b4798 /arch/x86/include/asm/preempt.h
parent527b0a76f41d062381adbb55c8eb61e32cb0bfc9 (diff)
downloadlinux-0-day-f285144f81e814f39342dbf5321d6ba939890b1b.tar.gz
linux-0-day-f285144f81e814f39342dbf5321d6ba939890b1b.tar.xz
sched/x86: Do not clear PREEMPT_NEED_RESCHED on preempt count reset
The per-cpu preempt count of x86 contains two values, the actual preempt count and the inverted PREEMPT_NEED_RESCHED bit. If a corrupted preempt count is detected the preempt_count_set() function is used to reset the preempt count. In case the inverted PREEMPT_NEED_RESCHED bit is zero at the time of the reset, the preemption indication is lost. Use raw_cpu_cmpxchg_4() to reset only the count part and leave the PREEMPT_NEED_RESCHED bit as it is. This improves the kernel's behavior when it runs into preempt count leaks and tries to fix them up. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1478523660-733-1-git-send-email-schwidefsky@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/preempt.h')
-rw-r--r--arch/x86/include/asm/preempt.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 17f2186457012..ec1f3c6511506 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -24,7 +24,13 @@ static __always_inline int preempt_count(void)
static __always_inline void preempt_count_set(int pc)
{
- raw_cpu_write_4(__preempt_count, pc);
+ int old, new;
+
+ do {
+ old = raw_cpu_read_4(__preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old);
}
/*