summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPrateek Sood <prsood@codeaurora.org>2017-11-15 19:50:15 +0530
committerTejun Heo <tj@kernel.org>2017-11-27 08:48:10 -0800
commit1599a185f0e6113be185b9fb809c621c73865829 (patch)
treedd2f4646ce247a91b3b531c32f3331f354a024de /kernel
parentaa24163b2ee5c92120e32e99b5a93143a0f4258e (diff)
downloadlinux-0-day-1599a185f0e6113be185b9fb809c621c73865829.tar.gz
linux-0-day-1599a185f0e6113be185b9fb809c621c73865829.tar.xz
cpuset: Make cpuset hotplug synchronous
Convert cpuset_hotplug_workfn() into synchronous call for cpu hotplug path. For memory hotplug path it still gets queued as a work item. Since cpuset_hotplug_workfn() can be made synchronous for cpu hotplug path, it is not required to wait for cpuset hotplug while thawing processes. Signed-off-by: Prateek Sood <prsood@codeaurora.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset.c41
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/sched/core.c1
3 files changed, 20 insertions, 24 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index cab5fd1ee7671..227bc25d951dd 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2277,15 +2277,8 @@ retry:
mutex_unlock(&cpuset_mutex);
}
-static bool force_rebuild;
-
-void cpuset_force_rebuild(void)
-{
- force_rebuild = true;
-}
-
/**
- * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
+ * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
@@ -2300,7 +2293,7 @@ void cpuset_force_rebuild(void)
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
*/
-static void cpuset_hotplug_workfn(struct work_struct *work)
+static void cpuset_hotplug(bool use_cpu_hp_lock)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
@@ -2358,25 +2351,31 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated || force_rebuild) {
- force_rebuild = false;
- rebuild_sched_domains();
+ if (cpus_updated) {
+ if (use_cpu_hp_lock)
+ rebuild_sched_domains();
+ else {
+ /* Acquiring cpu_hotplug_lock is not required.
+ * When cpuset_hotplug() is called in hotplug path,
+ * cpu_hotplug_lock is held by the hotplug context
+ * which is waiting for cpuhp_thread_fun to indicate
+ * completion of callback.
+ */
+ mutex_lock(&cpuset_mutex);
+ rebuild_sched_domains_cpuslocked();
+ mutex_unlock(&cpuset_mutex);
+ }
}
}
-void cpuset_update_active_cpus(void)
+static void cpuset_hotplug_workfn(struct work_struct *work)
{
- /*
- * We're inside cpu hotplug critical region which usually nests
- * inside cgroup synchronization. Bounce actual hotplug processing
- * to a work item to avoid reverse locking order.
- */
- schedule_work(&cpuset_hotplug_work);
+ cpuset_hotplug(true);
}
-void cpuset_wait_for_hotplug(void)
+void cpuset_update_active_cpus(void)
{
- flush_work(&cpuset_hotplug_work);
+ cpuset_hotplug(false);
}
/*
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7381d49a44db5..c326d7235c5f0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -204,8 +204,6 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
- cpuset_wait_for_hotplug();
-
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3a..88b3450b29abf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5624,7 +5624,6 @@ static void cpuset_cpu_active(void)
* restore the original sched domains by considering the
* cpuset configurations.
*/
- cpuset_force_rebuild();
}
cpuset_update_active_cpus();
}