summaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-07-09 16:39:48 -0400
committerJens Axboe <axboe@fb.com>2015-07-09 14:41:08 -0600
commit144232b34258c1fc19729e077c6fb161e30da07b (patch)
tree8f048fd73d864a8f41917068d6e610fc5b0e0f0b /block/blk-cgroup.c
parent838f13bf4b6737d4aec508558e45f81798fc2677 (diff)
downloadlinux-0-day-144232b34258c1fc19729e077c6fb161e30da07b.tar.gz
linux-0-day-144232b34258c1fc19729e077c6fb161e30da07b.tar.xz
blkcg: blkcg_css_alloc() should grab blkcg_pol_mutex while iterating blkcg_policy[]
An entry in blkcg_policy[] is stable while there are non-bypassing in-flight IOs on a request_queue which has the policy activated. This is why most derefs of blkcg_policy[] don't need explicit locking; however, blkcg_css_alloc() isn't invoked from IO path and thus doesn't have this protection and may race policies being added and removed. Fix it by adding explicit blkcg_pol_mutex protection around blkcg_policy[] iteration in blkcg_css_alloc(). Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: e48453c386f3 ("block, cgroup: implement policy-specific per-blkcg data") Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Arianna Avanzini <avanzini.arianna@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2ff74ffcbb279..05b893de516bd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -844,6 +844,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
goto free_blkcg;
}
+ mutex_lock(&blkcg_pol_mutex);
+
for (i = 0; i < BLKCG_MAX_POLS ; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkcg_policy_data *cpd;
@@ -860,6 +862,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
BUG_ON(blkcg->pd[i]);
cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
if (!cpd) {
+ mutex_unlock(&blkcg_pol_mutex);
ret = ERR_PTR(-ENOMEM);
goto free_pd_blkcg;
}
@@ -868,6 +871,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
pol->cpd_init_fn(blkcg);
}
+ mutex_unlock(&blkcg_pol_mutex);
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);