summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-12-11 13:48:43 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-02-23 19:59:51 -0800
commit4914950aaa12de8a2004b8031b45480526caf42b (patch)
tree9675c74cb3de70f93da8ed9b6648c0550e37f135 /kernel/rcu/tree.c
parent8994515cf0030e5020d67bb837a9d9a92441d0f7 (diff)
downloadlinux-0-day-4914950aaa12de8a2004b8031b45480526caf42b.tar.gz
linux-0-day-4914950aaa12de8a2004b8031b45480526caf42b.tar.xz
rcu: Stop treating in-kernel CPU-bound workloads as errors
Commit 4a81e8328d379 ("Reduce overhead of cond_resched() checks for RCU") handles the error case where a nohz_full loops indefinitely in the kernel with the scheduling-clock interrupt disabled. However, this handling includes IPIing the CPU running the offending loop, which is not what we want for real-time workloads. And there are starting to be real-time CPU-bound in-kernel workloads, and these must be handled without IPIing the CPU, at least not in the common case. Therefore, this situation can no longer be dismissed as an error case. This commit therefore splits the handling out, so that the setting of bits in the per-CPU rcu_sched_qs_mask variable is done relatively early, but if the problem persists, resched_cpu() is eventually used to IPI the CPU containing the offending loop. Assuming that in-kernel CPU-bound loops used by real-time tasks contain frequent calls cond_resched_rcu_qs() (as in more than once per few tens of milliseconds), the real-time tasks will never be IPIed. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a91836868aded..68f4bee3ecc34 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1173,15 +1173,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
smp_mb(); /* ->cond_resched_completed before *rcrmp. */
WRITE_ONCE(*rcrmp,
READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
- resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
- rdp->rsp->jiffies_resched += 5; /* Enable beating. */
- } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
- /* Time to beat on that CPU again! */
- resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
- rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
}
+ rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
}
+ /* And if it has been a really long time, kick the CPU as well. */
+ if (ULONG_CMP_GE(jiffies,
+ rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
+ ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
+ resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
+
return 0;
}