summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 18:56:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 18:56:14 -0700
commitb0a1ea51bda4c2bcdde460221e1772f3a4f8c44f (patch)
tree9684c11b72718cd7e96e5eb93298690269ecf447 /block
parent33e247c7e58d335d70ecb84fd869091e2e4b8dcb (diff)
parent69d7fde5909b614114343974cfc52cb8ff30b544 (diff)
downloadlinux-0-day-b0a1ea51bda4c2bcdde460221e1772f3a4f8c44f.tar.gz
linux-0-day-b0a1ea51bda4c2bcdde460221e1772f3a4f8c44f.tar.xz
Merge branch 'for-4.3/blkcg' of git://git.kernel.dk/linux-block
Pull blk-cg updates from Jens Axboe: "A bit later in the cycle, but this has been in the block tree for a a while. This is basically four patchsets from Tejun, that improve our buffered cgroup writeback. It was dependent on the other cgroup changes, but they went in earlier in this cycle. Series 1 is set of 5 patches that has cgroup writeback updates: - bdi_writeback iteration fix which could lead to some wb's being skipped or repeated during e.g. sync under memory pressure. - Simplification of wb work wait mechanism. - Writeback tracepoints updated to report cgroup. Series 2 is is a set of updates for the CFQ cgroup writeback handling: cfq has always charged all async IOs to the root cgroup. It didn't have much choice as writeback didn't know about cgroups and there was no way to tell who to blame for a given writeback IO. writeback finally grew support for cgroups and now tags each writeback IO with the appropriate cgroup to charge it against. This patchset updates cfq so that it follows the blkcg each bio is tagged with. Async cfq_queues are now shared across cfq_group, which is per-cgroup, instead of per-request_queue cfq_data. This makes all IOs follow the weight based IO resource distribution implemented by cfq. - Switched from GFP_ATOMIC to GFP_NOWAIT as suggested by Jeff. - Other misc review points addressed, acks added and rebased. Series 3 is the blkcg policy cleanup patches: This patchset contains assorted cleanups for blkcg_policy methods and blk[c]g_policy_data handling. - alloc/free added for blkg_policy_data. exit dropped. - alloc/free added for blkcg_policy_data. - blk-throttle's async percpu allocation is replaced with direct allocation. - all methods now take blk[c]g_policy_data instead of blkcg_gq or blkcg. And finally, series 4 is a set of patches cleaning up the blkcg stats handling: blkcg's stats have always been somwhat of a mess. This patchset tries to improve the situation a bit. - The following patches added to consolidate blkcg entry point and blkg creation. This is in itself is an improvement and helps colllecting common stats on bio issue. - per-blkg stats now accounted on bio issue rather than request completion so that bio based and request based drivers can behave the same way. The issue was spotted by Vivek. - cfq-iosched implements custom recursive stats and blk-throttle implements custom per-cpu stats. This patchset make blkcg core support both by default. - cfq-iosched and blk-throttle keep track of the same stats multiple times. Unify them" * 'for-4.3/blkcg' of git://git.kernel.dk/linux-block: (45 commits) blkcg: use CGROUP_WEIGHT_* scale for io.weight on the unified hierarchy blkcg: s/CFQ_WEIGHT_*/CFQ_WEIGHT_LEGACY_*/ blkcg: implement interface for the unified hierarchy blkcg: misc preparations for unified hierarchy interface blkcg: separate out tg_conf_updated() from tg_set_conf() blkcg: move body parsing from blkg_conf_prep() to its callers blkcg: mark existing cftypes as legacy blkcg: rename subsystem name from blkio to io blkcg: refine error codes returned during blkcg configuration blkcg: remove unnecessary NULL checks from __cfqg_set_weight_device() blkcg: reduce stack usage of blkg_rwstat_recursive_sum() blkcg: remove cfqg_stats->sectors blkcg: move io_service_bytes and io_serviced stats into blkcg_gq blkcg: make blkg_[rw]stat_recursive_sum() to be able to index into blkcg_gq blkcg: make blkcg_[rw]stat per-cpu blkcg: add blkg_[rw]stat->aux_cnt and replace cfq_group->dead_stats with it blkcg: consolidate blkg creation in blkcg_bio_issue_check() blk-throttle: improve queue bypass handling blkcg: move root blkg lookup optimization from throtl_lookup_tg() to __blkg_lookup() blkcg: inline [__]blkg_lookup() ...
Diffstat (limited to 'block')
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup.c524
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-throttle.c505
-rw-r--r--block/blk.h5
-rw-r--r--block/cfq-iosched.c651
6 files changed, 891 insertions, 800 deletions
diff --git a/block/bio.c b/block/bio.c
index 515b5434fe2de..ad3f276d74bcb 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1990,7 +1990,7 @@ int bio_associate_current(struct bio *bio)
get_io_context_active(ioc);
bio->bi_ioc = ioc;
- bio->bi_css = task_get_css(current, blkio_cgrp_id);
+ bio->bi_css = task_get_css(current, io_cgrp_id);
return 0;
}
EXPORT_SYMBOL_GPL(bio_associate_current);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6283b3f5db50..ac8370cb25157 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -24,6 +24,7 @@
#include <linux/genhd.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/ctype.h>
#include <linux/blk-cgroup.h>
#include "blk.h"
@@ -68,9 +69,14 @@ static void blkg_free(struct blkcg_gq *blkg)
return;
for (i = 0; i < BLKCG_MAX_POLS; i++)
- kfree(blkg->pd[i]);
+ if (blkg->pd[i])
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- blk_exit_rl(&blkg->rl);
+ if (blkg->blkcg != &blkcg_root)
+ blk_exit_rl(&blkg->rl);
+
+ blkg_rwstat_exit(&blkg->stat_ios);
+ blkg_rwstat_exit(&blkg->stat_bytes);
kfree(blkg);
}
@@ -93,6 +99,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
if (!blkg)
return NULL;
+ if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
+ blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
+ goto err_free;
+
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
@@ -113,7 +123,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
continue;
/* alloc per-policy data and attach it to blkg */
- pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
+ pd = pol->pd_alloc_fn(gfp_mask, q->node);
if (!pd)
goto err_free;
@@ -129,26 +139,11 @@ err_free:
return NULL;
}
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
- bool update_hint)
+struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
+ struct request_queue *q, bool update_hint)
{
struct blkcg_gq *blkg;
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
/*
* Hint didn't match. Look up from the radix tree. Note that the
* hint can only be updated under queue_lock as otherwise @blkg
@@ -166,29 +161,11 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
return NULL;
}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
- */
-struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
- return __blkg_lookup(blkcg, q, false);
-}
-EXPORT_SYMBOL_GPL(blkg_lookup);
+EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
/*
* If @new_blkg is %NULL, this function tries to allocate a new one as
- * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
+ * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct request_queue *q,
@@ -203,12 +180,12 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
- ret = -EINVAL;
+ ret = -ENODEV;
goto err_free_blkg;
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
- blkcg->css.id, GFP_ATOMIC);
+ blkcg->css.id, GFP_NOWAIT);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@@ -216,7 +193,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
+ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@@ -229,7 +206,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
if (WARN_ON_ONCE(!blkg->parent)) {
- ret = -EINVAL;
+ ret = -ENODEV;
goto err_put_congested;
}
blkg_get(blkg->parent);
@@ -240,7 +217,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_init_fn)
- pol->pd_init_fn(blkg);
+ pol->pd_init_fn(blkg->pd[i]);
}
/* insert */
@@ -254,7 +231,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_online_fn)
- pol->pd_online_fn(blkg);
+ pol->pd_online_fn(blkg->pd[i]);
}
}
blkg->online = true;
@@ -303,7 +280,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
+ return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -327,11 +304,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
return blkg;
}
}
-EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
+ struct blkcg_gq *parent = blkg->parent;
int i;
lockdep_assert_held(blkg->q->queue_lock);
@@ -345,8 +322,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_offline_fn)
- pol->pd_offline_fn(blkg);
+ pol->pd_offline_fn(blkg->pd[i]);
+ }
+
+ if (parent) {
+ blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
+ blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
}
+
blkg->online = false;
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
@@ -400,15 +383,6 @@ static void blkg_destroy_all(struct request_queue *q)
void __blkg_release_rcu(struct rcu_head *rcu_head)
{
struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
- int i;
-
- /* tell policies that this one is being freed */
- for (i = 0; i < BLKCG_MAX_POLS; i++) {
- struct blkcg_policy *pol = blkcg_policy[i];
-
- if (blkg->pd[i] && pol->pd_exit_fn)
- pol->pd_exit_fn(blkg);
- }
/* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
@@ -472,12 +446,14 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ blkg_rwstat_reset(&blkg->stat_bytes);
+ blkg_rwstat_reset(&blkg->stat_ios);
+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
- if (blkcg_policy_enabled(blkg->q, pol) &&
- pol->pd_reset_stats_fn)
- pol->pd_reset_stats_fn(blkg);
+ if (blkg->pd[i] && pol->pd_reset_stats_fn)
+ pol->pd_reset_stats_fn(blkg->pd[i]);
}
}
@@ -486,13 +462,14 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
return 0;
}
-static const char *blkg_dev_name(struct blkcg_gq *blkg)
+const char *blkg_dev_name(struct blkcg_gq *blkg)
{
/* some drivers (floppy) instantiate a queue w/o disk registered */
if (blkg->q->backing_dev_info.dev)
return dev_name(blkg->q->backing_dev_info.dev);
return NULL;
}
+EXPORT_SYMBOL_GPL(blkg_dev_name);
/**
* blkcg_print_blkgs - helper for printing per-blkg data
@@ -581,9 +558,10 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
for (i = 0; i < BLKG_RWSTAT_NR; i++)
seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
- (unsigned long long)rwstat->cnt[i]);
+ (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
- v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+ v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
+ atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
return v;
}
@@ -620,31 +598,122 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
}
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
+static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+
+/**
+ * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
+ * @sf: seq_file to print to
+ * @v: unused
+ *
+ * To be used as cftype->seq_show to print blkg->stat_bytes.
+ * cftype->private must be set to the blkcg_policy.
+ */
+int blkg_print_stat_bytes(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
+ offsetof(struct blkcg_gq, stat_bytes), true);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
+
+/**
+ * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
+ * @sf: seq_file to print to
+ * @v: unused
+ *
+ * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
+ * must be set to the blkcg_policy.
+ */
+int blkg_print_stat_ios(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
+ offsetof(struct blkcg_gq, stat_ios), true);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
+
+static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd,
+ int off)
+{
+ struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
+ NULL, off);
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+
+/**
+ * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
+ * @sf: seq_file to print to
+ * @v: unused
+ */
+int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat_field_recursive,
+ (void *)seq_cft(sf)->private,
+ offsetof(struct blkcg_gq, stat_bytes), true);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
+
+/**
+ * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
+ * @sf: seq_file to print to
+ * @v: unused
+ */
+int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat_field_recursive,
+ (void *)seq_cft(sf)->private,
+ offsetof(struct blkcg_gq, stat_ios), true);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
+
/**
* blkg_stat_recursive_sum - collect hierarchical blkg_stat
- * @pd: policy private data of interest
- * @off: offset to the blkg_stat in @pd
+ * @blkg: blkg of interest
+ * @pol: blkcg_policy which contains the blkg_stat
+ * @off: offset to the blkg_stat in blkg_policy_data or @blkg
+ *
+ * Collect the blkg_stat specified by @blkg, @pol and @off and all its
+ * online descendants and their aux counts. The caller must be holding the
+ * queue lock for online tests.
*
- * Collect the blkg_stat specified by @off from @pd and all its online
- * descendants and return the sum. The caller must be holding the queue
- * lock for online tests.
+ * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
+ * at @off bytes into @blkg's blkg_policy_data of the policy.
*/
-u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
+u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off)
{
- struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
struct cgroup_subsys_state *pos_css;
u64 sum = 0;
- lockdep_assert_held(pd->blkg->q->queue_lock);
+ lockdep_assert_held(blkg->q->queue_lock);
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
- struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
- struct blkg_stat *stat = (void *)pos_pd + off;
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ struct blkg_stat *stat;
+
+ if (!pos_blkg->online)
+ continue;
+
+ if (pol)
+ stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
+ else
+ stat = (void *)blkg + off;
- if (pos_blkg->online)
- sum += blkg_stat_read(stat);
+ sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
}
rcu_read_unlock();
@@ -654,37 +723,43 @@ EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
/**
* blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
- * @pd: policy private data of interest
- * @off: offset to the blkg_stat in @pd
+ * @blkg: blkg of interest
+ * @pol: blkcg_policy which contains the blkg_rwstat
+ * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
+ *
+ * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
+ * online descendants and their aux counts. The caller must be holding the
+ * queue lock for online tests.
*
- * Collect the blkg_rwstat specified by @off from @pd and all its online
- * descendants and return the sum. The caller must be holding the queue
- * lock for online tests.
+ * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
+ * is at @off bytes into @blkg's blkg_policy_data of the policy.
*/
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
- int off)
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off)
{
- struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
struct cgroup_subsys_state *pos_css;
struct blkg_rwstat sum = { };
int i;
- lockdep_assert_held(pd->blkg->q->queue_lock);
+ lockdep_assert_held(blkg->q->queue_lock);
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
- struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
- struct blkg_rwstat *rwstat = (void *)pos_pd + off;
- struct blkg_rwstat tmp;
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ struct blkg_rwstat *rwstat;
if (!pos_blkg->online)
continue;
- tmp = blkg_rwstat_read(rwstat);
+ if (pol)
+ rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
+ else
+ rwstat = (void *)pos_blkg + off;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum.cnt[i] += tmp.cnt[i];
+ atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
+ &sum.aux_cnt[i]);
}
rcu_read_unlock();
@@ -700,29 +775,34 @@ EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
* @ctx: blkg_conf_ctx to be filled
*
* Parse per-blkg config update from @input and initialize @ctx with the
- * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
- * value. This function returns with RCU read lock and queue lock held and
- * must be paired with blkg_conf_finish().
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
+ * part of @input following MAJ:MIN. This function returns with RCU read
+ * lock and queue lock held and must be paired with blkg_conf_finish().
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- const char *input, struct blkg_conf_ctx *ctx)
+ char *input, struct blkg_conf_ctx *ctx)
__acquires(rcu) __acquires(disk->queue->queue_lock)
{
struct gendisk *disk;
struct blkcg_gq *blkg;
unsigned int major, minor;
- unsigned long long v;
- int part, ret;
+ int key_len, part, ret;
+ char *body;
- if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
+ if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
return -EINVAL;
+ body = input + key_len;
+ if (!isspace(*body))
+ return -EINVAL;
+ body = skip_spaces(body);
+
disk = get_gendisk(MKDEV(major, minor), &part);
if (!disk)
- return -EINVAL;
+ return -ENODEV;
if (part) {
put_disk(disk);
- return -EINVAL;
+ return -ENODEV;
}
rcu_read_lock();
@@ -731,7 +811,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
if (blkcg_policy_enabled(disk->queue, pol))
blkg = blkg_lookup_create(blkcg, disk->queue);
else
- blkg = ERR_PTR(-EINVAL);
+ blkg = ERR_PTR(-EOPNOTSUPP);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
@@ -753,7 +833,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
ctx->disk = disk;
ctx->blkg = blkg;
- ctx->v = v;
+ ctx->body = body;
return 0;
}
EXPORT_SYMBOL_GPL(blkg_conf_prep);
@@ -774,8 +854,55 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
}
EXPORT_SYMBOL_GPL(blkg_conf_finish);
+static int blkcg_print_stat(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ const char *dname;
+ struct blkg_rwstat rwstat;
+ u64 rbytes, wbytes, rios, wios;
+
+ dname = blkg_dev_name(blkg);
+ if (!dname)
+ continue;
+
+ spin_lock_irq(blkg->q->queue_lock);
+
+ rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+ offsetof(struct blkcg_gq, stat_bytes));
+ rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+ wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+
+ rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+ offsetof(struct blkcg_gq, stat_ios));
+ rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+ wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+
+ spin_unlock_irq(blkg->q->queue_lock);
+
+ if (rbytes || wbytes || rios || wios)
+ seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
+ dname, rbytes, wbytes, rios, wios);
+ }
+
+ rcu_read_unlock();
+ return 0;
+}
+
struct cftype blkcg_files[] = {
{
+ .name = "stat",
+ .seq_show = blkcg_print_stat,
+ },
+ { } /* terminate */
+};
+
+struct cftype blkcg_legacy_files[] = {
+ {
.name = "reset_stats",
.write_u64 = blkcg_reset_stats,
},
@@ -822,18 +949,19 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
static void blkcg_css_free(struct cgroup_subsys_state *css)
{
struct blkcg *blkcg = css_to_blkcg(css);
+ int i;
mutex_lock(&blkcg_pol_mutex);
+
list_del(&blkcg->all_blkcgs_node);
- mutex_unlock(&blkcg_pol_mutex);
- if (blkcg != &blkcg_root) {
- int i;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (blkcg->cpd[i])
+ blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
- for (i = 0; i < BLKCG_MAX_POLS; i++)
- kfree(blkcg->pd[i]);
- kfree(blkcg);
- }
+ mutex_unlock(&blkcg_pol_mutex);
+
+ kfree(blkcg);
}
static struct cgroup_subsys_state *
@@ -847,13 +975,12 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
if (!parent_css) {
blkcg = &blkcg_root;
- goto done;
- }
-
- blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
- if (!blkcg) {
- ret = ERR_PTR(-ENOMEM);
- goto free_blkcg;
+ } else {
+ blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
+ if (!blkcg) {
+ ret = ERR_PTR(-ENOMEM);
+ goto free_blkcg;
+ }
}
for (i = 0; i < BLKCG_MAX_POLS ; i++) {
@@ -866,23 +993,23 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
* check if the policy requires any specific per-cgroup
* data: if it does, allocate and initialize it.
*/
- if (!pol || !pol->cpd_size)
+ if (!pol || !pol->cpd_alloc_fn)
continue;
- BUG_ON(blkcg->pd[i]);
- cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd) {
ret = ERR_PTR(-ENOMEM);
goto free_pd_blkcg;
}
- blkcg->pd[i] = cpd;
+ blkcg->cpd[i] = cpd;
+ cpd->blkcg = blkcg;
cpd->plid = i;
- pol->cpd_init_fn(blkcg);
+ if (pol->cpd_init_fn)
+ pol->cpd_init_fn(cpd);
}
-done:
spin_lock_init(&blkcg->lock);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -894,7 +1021,8 @@ done:
free_pd_blkcg:
for (i--; i >= 0; i--)
- kfree(blkcg->pd[i]);
+ if (blkcg->cpd[i])
+ blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
free_blkcg:
kfree(blkcg);
mutex_unlock(&blkcg_pol_mutex);
@@ -938,7 +1066,7 @@ int blkcg_init_queue(struct request_queue *q)
radix_tree_preload_end();
if (IS_ERR(blkg)) {
- kfree(new_blkg);
+ blkg_free(new_blkg);
return PTR_ERR(blkg);
}
@@ -1015,12 +1143,35 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
return ret;
}
-struct cgroup_subsys blkio_cgrp_subsys = {
+static void blkcg_bind(struct cgroup_subsys_state *root_css)
+{
+ int i;
+
+ mutex_lock(&blkcg_pol_mutex);
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkcg *blkcg;
+
+ if (!pol || !pol->cpd_bind_fn)
+ continue;
+
+ list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
+ if (blkcg->cpd[pol->plid])
+ pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
+ }
+ mutex_unlock(&blkcg_pol_mutex);
+}
+
+struct cgroup_subsys io_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.can_attach = blkcg_can_attach,
- .legacy_cftypes = blkcg_files,
+ .bind = blkcg_bind,
+ .dfl_cftypes = blkcg_files,
+ .legacy_cftypes = blkcg_legacy_files,
+ .legacy_name = "blkio",
#ifdef CONFIG_MEMCG
/*
* This ensures that, if available, memcg is automatically enabled
@@ -1030,7 +1181,7 @@ struct cgroup_subsys blkio_cgrp_subsys = {
.depends_on = 1 << memory_cgrp_id,
#endif
};
-EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
+EXPORT_SYMBOL_GPL(io_cgrp_subsys);
/**
* blkcg_activate_policy - activate a blkcg policy on a request_queue
@@ -1051,65 +1202,54 @@ EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
- LIST_HEAD(pds);
+ struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg;
- struct blkg_policy_data *pd, *nd;
- int cnt = 0, ret;
+ int ret;
if (blkcg_policy_enabled(q, pol))
return 0;
- /* count and allocate policy_data for all existing blkgs */
blk_queue_bypass_start(q);
- spin_lock_irq(q->queue_lock);
- list_for_each_entry(blkg, &q->blkg_list, q_node)
- cnt++;
- spin_unlock_irq(q->queue_lock);
-
- /* allocate per-blkg policy data for all existing blkgs */
- while (cnt--) {
- pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
- if (!pd) {
+pd_prealloc:
+ if (!pd_prealloc) {
+ pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
+ if (!pd_prealloc) {
ret = -ENOMEM;
- goto out_free;
+ goto out_bypass_end;
}
- list_add_tail(&pd->alloc_node, &pds);
}
- /*
- * Install the allocated pds and cpds. With @q bypassing, no new blkg
- * should have been created while the queue lock was dropped.
- */
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
- if (WARN_ON(list_empty(&pds))) {
- /* umm... this shouldn't happen, just abort */
- ret = -ENOMEM;
- goto out_unlock;
- }
- pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
- list_del_init(&pd->alloc_node);
+ struct blkg_policy_data *pd;
- /* grab blkcg lock too while installing @pd on @blkg */
- spin_lock(&blkg->blkcg->lock);
+ if (blkg->pd[pol->plid])
+ continue;
+
+ pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+ if (!pd)
+ swap(pd, pd_prealloc);
+ if (!pd) {
+ spin_unlock_irq(q->queue_lock);
+ goto pd_prealloc;
+ }
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
- pol->pd_init_fn(blkg);
-
- spin_unlock(&blkg->blkcg->lock);
+ if (pol->pd_init_fn)
+ pol->pd_init_fn(pd);
}
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
-out_unlock:
+
spin_unlock_irq(q->queue_lock);
-out_free:
+out_bypass_end:
blk_queue_bypass_end(q);
- list_for_each_entry_safe(pd, nd, &pds, alloc_node)
- kfree(pd);
+ if (pd_prealloc)
+ pol->pd_free_fn(pd_prealloc);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1139,13 +1279,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
- if (pol->pd_offline_fn)
- pol->pd_offline_fn(blkg);
- if (pol->pd_exit_fn)
- pol->pd_exit_fn(blkg);
-
- kfree(blkg->pd[pol->plid]);
- blkg->pd[pol->plid] = NULL;
+ if (blkg->pd[pol->plid]) {
+ if (pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg->pd[pol->plid]);
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
spin_unlock(&blkg->blkcg->lock);
}
@@ -1167,9 +1306,6 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg *blkcg;
int i, ret;
- if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
- return -EINVAL;
-
mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex);
@@ -1186,36 +1322,42 @@ int blkcg_policy_register(struct blkcg_policy *pol)
blkcg_policy[pol->plid] = pol;
/* allocate and install cpd's */
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
struct blkcg_policy_data *cpd;
- cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd) {
mutex_unlock(&blkcg_pol_mutex);
goto err_free_cpds;
}
- blkcg->pd[pol->plid] = cpd;
+ blkcg->cpd[pol->plid] = cpd;
+ cpd->blkcg = blkcg;
cpd->plid = pol->plid;
- pol->cpd_init_fn(blkcg);
+ pol->cpd_init_fn(cpd);
}
}
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
- if (pol->cftypes)
- WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
- pol->cftypes));
+ if (pol->dfl_cftypes)
+ WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
+ pol->dfl_cftypes));
+ if (pol->legacy_cftypes)
+ WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
+ pol->legacy_cftypes));
mutex_unlock(&blkcg_pol_register_mutex);
return 0;
err_free_cpds:
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
- kfree(blkcg->pd[pol->plid]);
- blkcg->pd[pol->plid] = NULL;
+ if (blkcg->cpd[pol->plid]) {
+ pol->cpd_free_fn(blkcg->cpd[pol->plid]);
+ blkcg->cpd[pol->plid] = NULL;
+ }
}
}
blkcg_policy[pol->plid] = NULL;
@@ -1242,16 +1384,20 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
goto out_unlock;
/* kill the intf files first */
- if (pol->cftypes)
- cgroup_rm_cftypes(pol->cftypes);
+ if (pol->dfl_cftypes)
+ cgroup_rm_cftypes(pol->dfl_cftypes);
+ if (pol->legacy_cftypes)
+ cgroup_rm_cftypes(pol->legacy_cftypes);
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
- kfree(blkcg->pd[pol->plid]);
- blkcg->pd[pol->plid] = NULL;
+ if (blkcg->cpd[pol->plid]) {
+ pol->cpd_free_fn(blkcg->cpd[pol->plid]);
+ blkcg->cpd[pol->plid] = NULL;
+ }
}
}
blkcg_policy[pol->plid] = NULL;
diff --git a/block/blk-core.c b/block/blk-core.c
index 60912e983f16a..2eb722d48773c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1888,8 +1888,8 @@ generic_make_request_checks(struct bio *bio)
*/
create_io_context(GFP_ATOMIC, q->node);
- if (blk_throtl_bio(q, bio))
- return false; /* throttled, will be resubmitted later */
+ if (!blkcg_bio_issue_check(q, bio))
+ return false;
trace_block_bio_queue(q, bio);
return true;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index b23193518ac7a..c75a2636dd404 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -83,14 +83,6 @@ enum tg_state_flags {
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
-/* Per-cpu group stats */
-struct tg_stats_cpu {
- /* total bytes transferred */
- struct blkg_rwstat service_bytes;
- /* total IOs serviced, post merge */
- struct blkg_rwstat serviced;
-};
-
struct throtl_grp {
/* must be the first member */
struct blkg_policy_data pd;
@@ -141,12 +133,6 @@ struct throtl_grp {
/* When did we start a new slice */
unsigned long slice_start[2];
unsigned long slice_end[2];
-
- /* Per cpu stats pointer */
- struct tg_stats_cpu __percpu *stats_cpu;
-
- /* List of tgs waiting for per cpu stats memory to be allocated */
- struct list_head stats_alloc_node;
};
struct throtl_data
@@ -168,13 +154,6 @@ struct throtl_data
struct work_struct dispatch_work;
};
-/* list and work item to allocate percpu group stats */
-static DEFINE_SPINLOCK(tg_stats_alloc_lock);
-static LIST_HEAD(tg_stats_alloc_list);
-
-static void tg_stats_alloc_fn(struct work_struct *);
-static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
-
static void throtl_pending_timer_fn(unsigned long arg);
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
@@ -192,11 +171,6 @@ static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
return pd_to_blkg(&tg->pd);
}
-static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
-{
- return blkg_to_tg(td->queue->root_blkg);
-}
-
/**
* sq_to_tg - return the throl_grp the specified service queue belongs to
* @sq: the throtl_service_queue of interest
@@ -256,53 +230,6 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
} \
} while (0)
-static void tg_stats_init(struct tg_stats_cpu *tg_stats)
-{
- blkg_rwstat_init(&tg_stats->service_bytes);
- blkg_rwstat_init(&tg_stats->serviced);
-}
-
-/*
- * Worker for allocating per cpu stat for tgs. This is scheduled on the
- * system_wq once there are some groups on the alloc_list waiting for
- * allocation.
- */
-static void tg_stats_alloc_fn(struct work_struct *work)
-{
- static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
- struct delayed_work *dwork = to_delayed_work(work);
- bool empty = false;
-
-alloc_stats:
- if (!stats_cpu) {
- int cpu;
-
- stats_cpu = alloc_percpu(struct tg_stats_cpu);
- if (!stats_cpu) {
- /* allocation failed, try again after some time */
- schedule_delayed_work(dwork, msecs_to_jiffies(10));
- return;
- }
- for_each_possible_cpu(cpu)
- tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
- }
-
- spin_lock_irq(&tg_stats_alloc_lock);
-
- if (!list_empty(&tg_stats_alloc_list)) {
- struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
- struct throtl_grp,
- stats_alloc_node);
- swap(tg->stats_cpu, stats_cpu);
- list_del_init(&tg->stats_alloc_node);
- }
-
- empty = list_empty(&tg_stats_alloc_list);
- spin_unlock_irq(&tg_stats_alloc_lock);
- if (!empty)
- goto alloc_stats;
-}
-
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
INIT_LIST_HEAD(&qn->node);
@@ -387,29 +314,46 @@ static struct bio *throtl_pop_queued(struct list_head *queued,
}
/* init a service_queue, assumes the caller zeroed it */
-static void throtl_service_queue_init(struct throtl_service_queue *sq,
- struct throtl_service_queue *parent_sq)
+static void throtl_service_queue_init(struct throtl_service_queue *sq)
{
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
sq->pending_tree = RB_ROOT;
- sq->parent_sq = parent_sq;
setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
(unsigned long)sq);
}
-static void throtl_service_queue_exit(struct throtl_service_queue *sq)
+static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
{
- del_timer_sync(&sq->pending_timer);
+ struct throtl_grp *tg;
+ int rw;
+
+ tg = kzalloc_node(sizeof(*tg), gfp, node);
+ if (!tg)
+ return NULL;
+
+ throtl_service_queue_init(&tg->service_queue);
+
+ for (rw = READ; rw <= WRITE; rw++) {
+ throtl_qnode_init(&tg->qnode_on_self[rw], tg);
+ throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
+ }
+
+ RB_CLEAR_NODE(&tg->rb_node);
+ tg->bps[READ] = -1;
+ tg->bps[WRITE] = -1;
+ tg->iops[READ] = -1;
+ tg->iops[WRITE] = -1;
+
+ return &tg->pd;
}
-static void throtl_pd_init(struct blkcg_gq *blkg)
+static void throtl_pd_init(struct blkg_policy_data *pd)
{
- struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct throtl_grp *tg = pd_to_tg(pd);
+ struct blkcg_gq *blkg = tg_to_blkg(tg);
struct throtl_data *td = blkg->q->td;
- struct throtl_service_queue *parent_sq;
- unsigned long flags;
- int rw;
+ struct throtl_service_queue *sq = &tg->service_queue;
/*
* If on the default hierarchy, we switch to properly hierarchical
@@ -424,35 +368,10 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
* Limits of a group don't interact with limits of other groups
* regardless of the position of the group in the hierarchy.
*/
- parent_sq = &td->service_queue;
-
+ sq->parent_sq = &td->service_queue;
if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
- parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
-
- throtl_service_queue_init(&tg->service_queue, parent_sq);
-
- for (rw = READ; rw <= WRITE; rw++) {
- throtl_qnode_init(&tg->qnode_on_self[rw], tg);
- throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
- }
-
- RB_CLEAR_NODE(&tg->rb_node);
+ sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
tg->td = td;
-
- tg->bps[READ] = -1;
- tg->bps[WRITE] = -1;
- tg->iops[READ] = -1;
- tg->iops[WRITE] = -1;
-
- /*
- * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
- * but percpu allocator can't be called from IO path. Queue tg on
- * tg_stats_alloc_list and allocate from work item.
- */
- spin_lock_irqsave(&tg_stats_alloc_lock, flags);
- list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
- schedule_delayed_work(&tg_stats_alloc_work, 0);
- spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
/*
@@ -470,83 +389,21 @@ static void tg_update_has_rules(struct throtl_grp *tg)
(tg->bps[rw] != -1 || tg->iops[rw] != -1);
}
-static void throtl_pd_online(struct blkcg_gq *blkg)
+static void throtl_pd_online(struct blkg_policy_data *pd)
{
/*
* We don't want new groups to escape the limits of its ancestors.
* Update has_rules[] after a new group is brought online.
*/
- tg_update_has_rules(blkg_to_tg(blkg));
-}
-
-static void throtl_pd_exit(struct blkcg_gq *blkg)
-{
- struct throtl_grp *tg = blkg_to_tg(blkg);
- unsigned long flags;
-
- spin_lock_irqsave(&tg_stats_alloc_lock, flags);
- list_del_init(&tg->stats_alloc_node);
- spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
-
- free_percpu(tg->stats_cpu);
-
- throtl_service_queue_exit(&tg->service_queue);
-}
-
-static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
-{
- struct throtl_grp *tg = blkg_to_tg(blkg);
- int cpu;
-
- if (tg->stats_cpu == NULL)
- return;
-
- for_each_possible_cpu(cpu) {
- struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
-
- blkg_rwstat_reset(&sc->service_bytes);
- blkg_rwstat_reset(&sc->serviced);
- }
-}
-
-static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
- struct blkcg *blkcg)
-{
- /*
- * This is the common case when there are no blkcgs. Avoid lookup
- * in this case
- */
- if (blkcg == &blkcg_root)
- return td_root_tg(td);
-
- return blkg_to_tg(blkg_lookup(blkcg, td->queue));
+ tg_update_has_rules(pd_to_tg(pd));
}
-static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
- struct blkcg *blkcg)
+static void throtl_pd_free(struct blkg_policy_data *pd)
{
- struct request_queue *q = td->queue;
- struct throtl_grp *tg = NULL;
-
- /*
- * This is the common case when there are no blkcgs. Avoid lookup
- * in this case
- */
- if (blkcg == &blkcg_root) {
- tg = td_root_tg(td);
- } else {
- struct blkcg_gq *blkg;
-
- blkg = blkg_lookup_create(blkcg, q);
-
- /* if %NULL and @q is alive, fall back to root_tg */
- if (!IS_ERR(blkg))
- tg = blkg_to_tg(blkg);
- else if (!blk_queue_dying(q))
- tg = td_root_tg(td);
- }
+ struct throtl_grp *tg = pd_to_tg(pd);
- return tg;
+ del_timer_sync(&tg->service_queue.pending_timer);
+ kfree(tg);
}
static struct throtl_grp *
@@ -956,32 +813,6 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
return 0;
}
-static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
- int rw)
-{
- struct throtl_grp *tg = blkg_to_tg(blkg);
- struct tg_stats_cpu *stats_cpu;
- unsigned long flags;
-
- /* If per cpu stats are not allocated yet, don't do any accounting. */
- if (tg->stats_cpu == NULL)
- return;
-
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(tg->stats_cpu);
-
- blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
- blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
-
- local_irq_restore(flags);
-}
-
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
@@ -995,17 +826,9 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
* more than once as a throttled bio will go through blk-throtl the
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
- *
- * Dispatch stats aren't recursive and each @bio should only be
- * accounted by the @tg it was originally associated with. Let's
- * update the stats when setting REQ_THROTTLED for the first time
- * which is guaranteed to be for the @bio's original tg.
*/
- if (!(bio->bi_rw & REQ_THROTTLED)) {
+ if (!(bio->bi_rw & REQ_THROTTLED))
bio->bi_rw |= REQ_THROTTLED;
- throtl_update_dispatch_stats(tg_to_blkg(tg),
- bio->bi_iter.bi_size, bio->bi_rw);
- }
}
/**
@@ -1285,34 +1108,6 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
}
}
-static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct throtl_grp *tg = pd_to_tg(pd);
- struct blkg_rwstat rwstat = { }, tmp;
- int i, cpu;
-
- if (tg->stats_cpu == NULL)
- return 0;
-
- for_each_possible_cpu(cpu) {
- struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
-
- tmp = blkg_rwstat_read((void *)sc + off);
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- rwstat.cnt[i] += tmp.cnt[i];
- }
-
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat,
- &blkcg_policy_throtl, seq_cft(sf)->private, true);
- return 0;
-}
-
static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{
@@ -1349,31 +1144,11 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
return 0;
}
-static ssize_t tg_set_conf(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off, bool is_u64)
+static void tg_conf_updated(struct throtl_grp *tg)
{
- struct blkcg *blkcg = css_to_blkcg(of_css(of));
- struct blkg_conf_ctx ctx;
- struct throtl_grp *tg;
- struct throtl_service_queue *sq;
- struct blkcg_gq *blkg;
+ struct throtl_service_queue *sq = &tg->service_queue;
struct cgroup_subsys_state *pos_css;
- int ret;
-
- ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
- if (ret)
- return ret;
-
- tg = blkg_to_tg(ctx.blkg);
- sq = &tg->service_queue;
-
- if (!ctx.v)
- ctx.v = -1;
-
- if (is_u64)
- *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v;
- else
- *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v;
+ struct blkcg_gq *blkg;
throtl_log(&tg->service_queue,
"limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
@@ -1387,7 +1162,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
* restrictions in the whole hierarchy and allows them to bypass
* blk-throttle.
*/
- blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
+ blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
tg_update_has_rules(blkg_to_tg(blkg));
/*
@@ -1405,9 +1180,39 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
tg_update_disptime(tg);
throtl_schedule_next_dispatch(sq->parent_sq, true);
}
+}
+
+static ssize_t tg_set_conf(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off, bool is_u64)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ int ret;
+ u64 v;
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+ if (sscanf(ctx.body, "%llu", &v) != 1)
+ goto out_finish;
+ if (!v)
+ v = -1;
+
+ tg = blkg_to_tg(ctx.blkg);
+
+ if (is_u64)
+ *(u64 *)((void *)tg + of_cft(of)->private) = v;
+ else
+ *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
+
+ tg_conf_updated(tg);
+ ret = 0;
+out_finish:
blkg_conf_finish(&ctx);
- return nbytes;
+ return ret ?: nbytes;
}
static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
@@ -1422,7 +1227,7 @@ static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
return tg_set_conf(of, buf, nbytes, off, false);
}
-static struct cftype throtl_files[] = {
+static struct cftype throtl_legacy_files[] = {
{
.name = "throttle.read_bps_device",
.private = offsetof(struct throtl_grp, bps[READ]),
@@ -1449,13 +1254,124 @@ static struct cftype throtl_files[] = {
},
{
.name = "throttle.io_service_bytes",
- .private = offsetof(struct tg_stats_cpu, service_bytes),
- .seq_show = tg_print_cpu_rwstat,
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_bytes,
},
{
.name = "throttle.io_serviced",
- .private = offsetof(struct tg_stats_cpu, serviced),
- .seq_show = tg_print_cpu_rwstat,
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_ios,
+ },
+ { } /* terminate */
+};
+
+static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ struct throtl_grp *tg = pd_to_tg(pd);
+ const char *dname = blkg_dev_name(pd->blkg);
+ char bufs[4][21] = { "max", "max", "max", "max" };
+
+ if (!dname)
+ return 0;
+ if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
+ tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
+ return 0;
+
+ if (tg->bps[READ] != -1)
+ snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
+ if (tg->bps[WRITE] != -1)
+ snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
+ if (tg->iops[READ] != -1)
+ snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
+ if (tg->iops[WRITE] != -1)
+ snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
+
+ seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
+ dname, bufs[0], bufs[1], bufs[2], bufs[3]);
+ return 0;
+}
+
+static int tg_print_max(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
+ &blkcg_policy_throtl, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static ssize_t tg_set_max(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ u64 v[4];
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
+
+ tg = blkg_to_tg(ctx.blkg);
+
+ v[0] = tg->bps[READ];
+ v[1] = tg->bps[WRITE];
+ v[2] = tg->iops[READ];
+ v[3] = tg->iops[WRITE];
+
+ while (true) {
+ char tok[27]; /* wiops=18446744073709551616 */
+ char *p;
+ u64 val = -1;
+ int len;
+
+ if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
+ break;
+ if (tok[0] == '\0')
+ break;
+ ctx.body += len;
+
+ ret = -EINVAL;
+ p = tok;
+ strsep(&p, "=");
+ if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
+ goto out_finish;
+
+ ret = -ERANGE;
+ if (!val)
+ goto out_finish;
+
+ ret = -EINVAL;
+ if (!strcmp(tok, "rbps"))
+ v[0] = val;
+ else if (!strcmp(tok, "wbps"))
+ v[1] = val;
+ else if (!strcmp(tok, "riops"))
+ v[2] = min_t(u64, val, UINT_MAX);
+ else if (!strcmp(tok, "wiops"))
+ v[3] = min_t(u64, val, UINT_MAX);
+ else
+ goto out_finish;
+ }
+
+ tg->bps[READ] = v[0];
+ tg->bps[WRITE] = v[1];
+ tg->iops[READ] = v[2];
+ tg->iops[WRITE] = v[3];
+
+ tg_conf_updated(tg);
+ ret = 0;
+out_finish:
+ blkg_conf_finish(&ctx);
+ return ret ?: nbytes;
+}
+
+static struct cftype throtl_files[] = {
+ {
+ .name = "max",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = tg_print_max,
+ .write = tg_set_max,
},
{ } /* terminate */
};
@@ -1468,52 +1384,33 @@ static void throtl_shutdown_wq(struct request_queue *q)
}
static struct blkcg_policy blkcg_policy_throtl = {
- .pd_size = sizeof(struct throtl_grp),
- .cftypes = throtl_files,
+ .dfl_cftypes = throtl_files,
+ .legacy_cftypes = throtl_legacy_files,
+ .pd_alloc_fn = throtl_pd_alloc,
.pd_init_fn = throtl_pd_init,
.pd_online_fn = throtl_pd_online,
- .pd_exit_fn = throtl_pd_exit,
- .pd_reset_stats_fn = throtl_pd_reset_stats,
+ .pd_free_fn = throtl_pd_free,
};
-bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
+bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio)
{
- struct throtl_data *td = q->td;
struct throtl_qnode *qn = NULL;
- struct throtl_grp *tg;
+ struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio);
- struct blkcg *blkcg;
bool throttled = false;
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
/* see throtl_charge_bio() */
- if (bio->bi_rw & REQ_THROTTLED)
+ if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
goto out;
- /*
- * A throtl_grp pointer retrieved under rcu can be used to access
- * basic fields like stats and io rates. If a group has no rules,
- * just update the dispatch stats in lockless manner and return.
- */
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
- tg = throtl_lookup_tg(td, blkcg);
- if (tg) {
- if (!tg->has_rules[rw]) {
- throtl_update_dispatch_stats(tg_to_blkg(tg),
- bio->bi_iter.bi_size, bio->bi_rw);
- goto out_unlock_rcu;
- }
- }
-
- /*
- * Either group has not been allocated yet or it is not an unlimited
- * IO group
- */
spin_lock_irq(q->queue_lock);
- tg = throtl_lookup_create_tg(td, blkcg);
- if (unlikely(!tg))
+
+ if (unlikely(blk_queue_bypass(q)))
goto out_unlock;
sq = &tg->service_queue;
@@ -1580,8 +1477,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
out_unlock:
spin_unlock_irq(q->queue_lock);
-out_unlock_rcu:
- rcu_read_unlock();
out:
/*
* As multiple blk-throtls may stack in the same issue path, we
@@ -1667,7 +1562,7 @@ int blk_throtl_init(struct request_queue *q)
return -ENOMEM;
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
- throtl_service_queue_init(&td->service_queue, NULL);
+ throtl_service_queue_init(&td->service_queue);
q->td = td;
td->queue = q;
diff --git a/block/blk.h b/block/blk.h
index 838188b35a83f..98614ad37c81f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -272,15 +272,10 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
* Internal throttling interface
*/
#ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
-static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
-{
- return false;
-}
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c62bb2e650b8c..04de88463a986 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -68,9 +68,9 @@ static struct kmem_cache *cfq_pool;
#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
/* blkio-related constants */
-#define CFQ_WEIGHT_MIN 10
-#define CFQ_WEIGHT_MAX 1000
-#define CFQ_WEIGHT_DEFAULT 500
+#define CFQ_WEIGHT_LEGACY_MIN 10
+#define CFQ_WEIGHT_LEGACY_DFL 500
+#define CFQ_WEIGHT_LEGACY_MAX 1000
struct cfq_ttime {
unsigned long last_end_request;
@@ -177,10 +177,6 @@ enum wl_type_t {
struct cfqg_stats {
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* total bytes transferred */
- struct blkg_rwstat service_bytes;
- /* total IOs serviced, post merge */
- struct blkg_rwstat serviced;
/* number of ios merged */
struct blkg_rwstat merged;
/* total time spent on device in ns, may not be accurate w/ queueing */
@@ -189,8 +185,6 @@ struct cfqg_stats {
struct blkg_rwstat wait_time;
/* number of IOs queued up */
struct blkg_rwstat queued;
- /* total sectors transferred */
- struct blkg_stat sectors;
/* total disk time and nr sectors dispatched by this group */
struct blkg_stat time;
#ifdef CONFIG_DEBUG_BLK_CGROUP
@@ -220,7 +214,7 @@ struct cfqg_stats {
/* Per-cgroup data */
struct cfq_group_data {
/* must be the first member */
- struct blkcg_policy_data pd;
+ struct blkcg_policy_data cpd;
unsigned int weight;
unsigned int leaf_weight;
@@ -304,7 +298,11 @@ struct cfq_group {
int dispatched;
struct cfq_ttime ttime;
struct cfqg_stats stats; /* stats for this cfqg */
- struct cfqg_stats dead_stats; /* stats pushed from dead children */
+
+ /* async queue for each priority case */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
+
};
struct cfq_io_cq {
@@ -370,12 +368,6 @@ struct cfq_data {
struct cfq_queue *active_queue;
struct cfq_io_cq *active_cic;
- /*
- * async queue for each priority case
- */
- struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
- struct cfq_queue *async_idle_cfqq;
-
sector_t last_position;
/*
@@ -401,6 +393,7 @@ struct cfq_data {
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
+static void cfq_put_queue(struct cfq_queue *cfqq);
static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
enum wl_class_t class,
@@ -612,7 +605,7 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
static struct cfq_group_data
*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
{
- return cpd ? container_of(cpd, struct cfq_group_data, pd) : NULL;
+ return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
}
static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
@@ -693,14 +686,6 @@ static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
}
-static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
- uint64_t bytes, int rw)
-{
- blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
- blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
- blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
-}
-
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
uint64_t start_time, uint64_t io_start_time, int rw)
{
@@ -718,8 +703,6 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
static void cfqg_stats_reset(struct cfqg_stats *stats)
{
/* queued stats shouldn't be cleared */
- blkg_rwstat_reset(&stats->service_bytes);
- blkg_rwstat_reset(&stats->serviced);
blkg_rwstat_reset(&stats->merged);
blkg_rwstat_reset(&stats->service_time);
blkg_rwstat_reset(&stats->wait_time);
@@ -736,28 +719,26 @@ static void cfqg_stats_reset(struct cfqg_stats *stats)
}
/* @to += @from */
-static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
+static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
{
/* queued stats shouldn't be cleared */
- blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
- blkg_rwstat_merge(&to->serviced, &from->serviced);
- blkg_rwstat_merge(&to->merged, &from->merged);
- blkg_rwstat_merge(&to->service_time, &from->service_time);
- blkg_rwstat_merge(&to->wait_time, &from->wait_time);
- blkg_stat_merge(&from->time, &from->time);
+ blkg_rwstat_add_aux(&to->merged, &from->merged);
+ blkg_rwstat_add_aux(&to->service_time, &from->service_time);
+ blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
+ blkg_stat_add_aux(&from->time, &from->time);
#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
- blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
- blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
- blkg_stat_merge(&to->dequeue, &from->dequeue);
- blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
- blkg_stat_merge(&to->idle_time, &from->idle_time);
- blkg_stat_merge(&to->empty_time, &from->empty_time);
+ blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
+ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
+ blkg_stat_add_aux(&to->dequeue, &from->dequeue);
+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_add_aux(&to->idle_time, &from->idle_time);
+ blkg_stat_add_aux(&to->empty_time, &from->empty_time);
#endif
}
/*
- * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
+ * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
* recursive stats can still account for the amount used by this cfqg after
* it's gone.
*/
@@ -770,10 +751,8 @@ static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
if (unlikely(!parent))
return;
- cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
- cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
+ cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
cfqg_stats_reset(&cfqg->stats);
- cfqg_stats_reset(&cfqg->dead_stats);
}
#else /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -795,8 +774,6 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
unsigned long time, unsigned long unaccounted_time) { }
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
-static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
- uint64_t bytes, int rw) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
uint64_t start_time, uint64_t io_start_time, int rw) { }
@@ -883,8 +860,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
- struct cfq_io_cq *cic, struct bio *bio,
- gfp_t gfp_mask);
+ struct cfq_io_cq *cic, struct bio *bio);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -1546,130 +1522,171 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void cfqg_stats_init(struct cfqg_stats *stats)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
+ bool on_dfl, bool reset_dev, bool is_leaf_weight);
+
+static void cfqg_stats_exit(struct cfqg_stats *stats)
{
- blkg_rwstat_init(&stats->service_bytes);
- blkg_rwstat_init(&stats->serviced);
- blkg_rwstat_init(&stats->merged);
- blkg_rwstat_init(&stats->service_time);
- blkg_rwstat_init(&stats->wait_time);
- blkg_rwstat_init(&stats->queued);
+ blkg_rwstat_exit(&stats->merged);
+ blkg_rwstat_exit(&stats->service_time);
+ blkg_rwstat_exit(&stats->wait_time);
+ blkg_rwstat_exit(&stats->queued);
+ blkg_stat_exit(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_exit(&stats->unaccounted_time);
+ blkg_stat_exit(&stats->avg_queue_size_sum);
+ blkg_stat_exit(&stats->avg_queue_size_samples);
+ blkg_stat_exit(&stats->dequeue);
+ blkg_stat_exit(&stats->group_wait_time);
+ blkg_stat_exit(&stats->idle_time);
+ blkg_stat_exit(&stats->empty_time);
+#endif
+}
- blkg_stat_init(&stats->sectors);
- blkg_stat_init(&stats->time);
+static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
+{
+ if (blkg_rwstat_init(&stats->merged, gfp) ||
+ blkg_rwstat_init(&stats->service_time, gfp) ||
+ blkg_rwstat_init(&stats->wait_time, gfp) ||
+ blkg_rwstat_init(&stats->queued, gfp) ||
+ blkg_stat_init(&stats->time, gfp))
+ goto err;
#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_init(&stats->unaccounted_time);
- blkg_stat_init(&stats->avg_queue_size_sum);
- blkg_stat_init(&stats->avg_queue_size_samples);
- blkg_stat_init(&stats->dequeue);
- blkg_stat_init(&stats->group_wait_time);
- blkg_stat_init(&stats->idle_time);
- blkg_stat_init(&stats->empty_time);
+ if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
+ blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
+ blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
+ blkg_stat_init(&stats->dequeue, gfp) ||
+ blkg_stat_init(&stats->group_wait_time, gfp) ||
+ blkg_stat_init(&stats->idle_time, gfp) ||
+ blkg_stat_init(&stats->empty_time, gfp))
+ goto err;
#endif
+ return 0;
+err:
+ cfqg_stats_exit(stats);
+ return -ENOMEM;
}
-static void cfq_cpd_init(const struct blkcg *blkcg)
+static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
{
- struct cfq_group_data *cgd =
- cpd_to_cfqgd(blkcg->pd[blkcg_policy_cfq.plid]);
+ struct cfq_group_data *cgd;
- if (blkcg == &blkcg_root) {
- cgd->weight = 2 * CFQ_WEIGHT_DEFAULT;
- cgd->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
- } else {
- cgd->weight = CFQ_WEIGHT_DEFAULT;
- cgd->leaf_weight = CFQ_WEIGHT_DEFAULT;
- }
+ cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
+ if (!cgd)
+ return NULL;
+ return &cgd->cpd;
+}
+
+static void cfq_cpd_init(struct blkcg_policy_data *cpd)
+{
+ struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
+ unsigned int weight = cgroup_on_dfl(blkcg_root.css.cgroup) ?
+ CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
+
+ if (cpd_to_blkcg(cpd) == &blkcg_root)
+ weight *= 2;
+
+ cgd->weight = weight;
+ cgd->leaf_weight = weight;
}
-static void cfq_pd_init(struct blkcg_gq *blkg)
+static void cfq_cpd_free(struct blkcg_policy_data *cpd)
{
- struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkg->blkcg);
+ kfree(cpd_to_cfqgd(cpd));
+}
+
+static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
+{
+ struct blkcg *blkcg = cpd_to_blkcg(cpd);
+ bool on_dfl = cgroup_on_dfl(blkcg_root.css.cgroup);
+ unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
+
+ if (blkcg == &blkcg_root)
+ weight *= 2;
+
+ WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
+ WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
+}
+
+static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
+{
+ struct cfq_group *cfqg;
+
+ cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
+ if (!cfqg)
+ return NULL;
cfq_init_cfqg_base(cfqg);
+ if (cfqg_stats_init(&cfqg->stats, gfp)) {
+ kfree(cfqg);
+ return NULL;
+ }
+
+ return &cfqg->pd;
+}
+
+static void cfq_pd_init(struct blkg_policy_data *pd)
+{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
+
cfqg->weight = cgd->weight;
cfqg->leaf_weight = cgd->leaf_weight;
- cfqg_stats_init(&cfqg->stats);
- cfqg_stats_init(&cfqg->dead_stats);
}
-static void cfq_pd_offline(struct blkcg_gq *blkg)
+static void cfq_pd_offline(struct blkg_policy_data *pd)
{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqg->async_cfqq[0][i])
+ cfq_put_queue(cfqg->async_cfqq[0][i]);
+ if (cfqg->async_cfqq[1][i])
+ cfq_put_queue(cfqg->async_cfqq[1][i]);
+ }
+
+ if (cfqg->async_idle_cfqq)
+ cfq_put_queue(cfqg->async_idle_cfqq);
+
/*
* @blkg is going offline and will be ignored by
* blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
* that they don't get lost. If IOs complete after this point, the
* stats for them will be lost. Oh well...
*/
- cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
+ cfqg_stats_xfer_dead(cfqg);
}
-/* offset delta from cfqg->stats to cfqg->dead_stats */
-static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
- offsetof(struct cfq_group, stats);
-
-/* to be used by recursive prfill, sums live and dead stats recursively */
-static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+static void cfq_pd_free(struct blkg_policy_data *pd)
{
- u64 sum = 0;
-
- sum += blkg_stat_recursive_sum(pd, off);
- sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
- return sum;
-}
-
-/* to be used by recursive prfill, sums live and dead rwstats recursively */
-static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
- int off)
-{
- struct blkg_rwstat a, b;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- a = blkg_rwstat_recursive_sum(pd, off);
- b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
- blkg_rwstat_merge(&a, &b);
- return a;
+ cfqg_stats_exit(&cfqg->stats);
+ return kfree(cfqg);
}
-static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
{
- struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
cfqg_stats_reset(&cfqg->stats);
- cfqg_stats_reset(&cfqg->dead_stats);
}
-/*
- * Search for the cfq group current task belongs to. request_queue lock must
- * be held.
- */
-static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
+static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
- struct request_queue *q = cfqd->queue;
- struct cfq_group *cfqg = NULL;
-
- /* avoid lookup for the common case where there's no blkcg */
- if (blkcg == &blkcg_root) {
- cfqg = cfqd->root_group;
- } else {
- struct blkcg_gq *blkg;
-
- blkg = blkg_lookup_create(blkcg, q);
- if (!IS_ERR(blkg))
- cfqg = blkg_to_cfqg(blkg);
- }
+ struct blkcg_gq *blkg;
- return cfqg;
+ blkg = blkg_lookup(blkcg, cfqd->queue);
+ if (likely(blkg))
+ return blkg_to_cfqg(blkg);
+ return NULL;
}
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
- /* Currently, all async queues are mapped to root group */
- if (!cfq_cfqq_sync(cfqq))
- cfqg = cfqq->cfqd->root_group;
-
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
cfqg_get(cfqg);
@@ -1739,36 +1756,48 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off,
- bool is_leaf_weight)
+ bool on_dfl, bool is_leaf_weight)
{
+ unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
+ unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
struct blkcg *blkcg = css_to_blkcg(of_css(of));
struct blkg_conf_ctx ctx;
struct cfq_group *cfqg;
struct cfq_group_data *cfqgd;
int ret;
+ u64 v;
ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
if (ret)
return ret;
- ret = -EINVAL;
+ if (sscanf(ctx.body, "%llu", &v) == 1) {
+ /* require "default" on dfl */
+ ret = -ERANGE;
+ if (!v && on_dfl)
+ goto out_finish;
+ } else if (!strcmp(strim(ctx.body), "default")) {
+ v = 0;
+ } else {
+ ret = -EINVAL;
+ goto out_finish;
+ }
+
cfqg = blkg_to_cfqg(ctx.blkg);
cfqgd = blkcg_to_cfqgd(blkcg);
- if (!cfqg || !cfqgd)
- goto err;
- if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+ ret = -ERANGE;
+ if (!v || (v >= min && v <= max)) {
if (!is_leaf_weight) {
- cfqg->dev_weight = ctx.v;
- cfqg->new_weight = ctx.v ?: cfqgd->weight;
+ cfqg->dev_weight = v;
+ cfqg->new_weight = v ?: cfqgd->weight;
} else {
- cfqg->dev_leaf_weight = ctx.v;
- cfqg->new_leaf_weight = ctx.v ?: cfqgd->leaf_weight;
+ cfqg->dev_leaf_weight = v;
+ cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
}
ret = 0;
}
-
-err:
+out_finish:
blkg_conf_finish(&ctx);
return ret ?: nbytes;
}
@@ -1776,25 +1805,27 @@ err:
static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
- return __cfqg_set_weight_device(of, buf, nbytes, off, false);
+ return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
}
static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
- return __cfqg_set_weight_device(of, buf, nbytes, off, true);
+ return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
}
-static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val, bool is_leaf_weight)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
+ bool on_dfl, bool reset_dev, bool is_leaf_weight)
{
+ unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
+ unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
struct cfq_group_data *cfqgd;
int ret = 0;
- if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
- return -EINVAL;
+ if (val < min || val > max)
+ return -ERANGE;
spin_lock_irq(&blkcg->lock);
cfqgd = blkcg_to_cfqgd(blkcg);
@@ -1815,9 +1846,13 @@ static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
continue;
if (!is_leaf_weight) {
+ if (reset_dev)
+ cfqg->dev_weight = 0;
if (!cfqg->dev_weight)
cfqg->new_weight = cfqgd->weight;
} else {
+ if (reset_dev)
+ cfqg->dev_leaf_weight = 0;
if (!cfqg->dev_leaf_weight)
cfqg->new_leaf_weight = cfqgd->leaf_weight;
}
@@ -1831,13 +1866,13 @@ out:
static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val)
{
- return __cfq_set_weight(css, cft, val, false);
+ return __cfq_set_weight(css, val, false, false, false);
}
static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
- return __cfq_set_weight(css, cft, val, true);
+ return __cfq_set_weight(css, val, false, false, true);
}
static int cfqg_print_stat(struct seq_file *sf, void *v)
@@ -1857,16 +1892,16 @@ static int cfqg_print_rwstat(struct seq_file *sf, void *v)
static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
-
+ u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
+ &blkcg_policy_cfq, off);
return __blkg_prfill_u64(sf, pd, sum);
}
static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
-
+ struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
+ &blkcg_policy_cfq, off);
return __blkg_prfill_rwstat(sf, pd, &sum);
}
@@ -1886,6 +1921,40 @@ static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
return 0;
}
+static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
+
+ return __blkg_prfill_u64(sf, pd, sum >> 9);
+}
+
+static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
+ return 0;
+}
+
+static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
+ offsetof(struct blkcg_gq, stat_bytes));
+ u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+
+ return __blkg_prfill_u64(sf, pd, sum >> 9);
+}
+
+static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
+ false);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_BLK_CGROUP
static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
@@ -1912,7 +1981,7 @@ static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
}
#endif /* CONFIG_DEBUG_BLK_CGROUP */
-static struct cftype cfq_blkcg_files[] = {
+static struct cftype cfq_blkcg_legacy_files[] = {
/* on root, weight is mapped to leaf_weight */
{
.name = "weight_device",
@@ -1960,18 +2029,17 @@ static struct cftype cfq_blkcg_files[] = {
},
{
.name = "sectors",
- .private = offsetof(struct cfq_group, stats.sectors),
- .seq_show = cfqg_print_stat,
+ .seq_show = cfqg_print_stat_sectors,
},
{
.name = "io_service_bytes",
- .private = offsetof(struct cfq_group, stats.service_bytes),
- .seq_show = cfqg_print_rwstat,
+ .private = (unsigned long)&blkcg_policy_cfq,
+ .seq_show = blkg_print_stat_bytes,
},
{
.name = "io_serviced",
- .private = offsetof(struct cfq_group, stats.serviced),
- .seq_show = cfqg_print_rwstat,
+ .private = (unsigned long)&blkcg_policy_cfq,
+ .seq_show = blkg_print_stat_ios,
},
{
.name = "io_service_time",
@@ -2002,18 +2070,17 @@ static struct cftype cfq_blkcg_files[] = {
},
{
.name = "sectors_recursive",
- .private = offsetof(struct cfq_group, stats.sectors),
- .seq_show = cfqg_print_stat_recursive,
+ .seq_show = cfqg_print_stat_sectors_recursive,
},
{
.name = "io_service_bytes_recursive",
- .private = offsetof(struct cfq_group, stats.service_bytes),
- .seq_show = cfqg_print_rwstat_recursive,
+ .private = (unsigned long)&blkcg_policy_cfq,
+ .seq_show = blkg_print_stat_bytes_recursive,
},
{
.name = "io_serviced_recursive",
- .private = offsetof(struct cfq_group, stats.serviced),
- .seq_show = cfqg_print_rwstat_recursive,
+ .private = (unsigned long)&blkcg_policy_cfq,
+ .seq_show = blkg_print_stat_ios_recursive,
},
{
.name = "io_service_time_recursive",
@@ -2068,9 +2135,51 @@ static struct cftype cfq_blkcg_files[] = {
#endif /* CONFIG_DEBUG_BLK_CGROUP */
{ } /* terminate */
};
+
+static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
+
+ seq_printf(sf, "default %u\n", cgd->weight);
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
+ &blkcg_policy_cfq, 0, false);
+ return 0;
+}
+
+static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ char *endp;
+ int ret;
+ u64 v;
+
+ buf = strim(buf);
+
+ /* "WEIGHT" or "default WEIGHT" sets the default weight */
+ v = simple_strtoull(buf, &endp, 0);
+ if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
+ ret = __cfq_set_weight(of_css(of), v, true, false, false);
+ return ret ?: nbytes;
+ }
+
+ /* "MAJ:MIN WEIGHT" */
+ return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
+}
+
+static struct cftype cfq_blkcg_files[] = {
+ {
+ .name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cfq_print_weight_on_dfl,
+ .write = cfq_set_weight_on_dfl,
+ },
+ { } /* terminate */
+};
+
#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
+static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
return cfqd->root_group;
}
@@ -2873,7 +2982,6 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
- cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
}
/*
@@ -3506,14 +3614,14 @@ static void cfq_exit_icq(struct io_cq *icq)
struct cfq_io_cq *cic = icq_to_cic(icq);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- if (cic->cfqq[BLK_RW_ASYNC]) {
- cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
- cic->cfqq[BLK_RW_ASYNC] = NULL;
+ if (cic_to_cfqq(cic, false)) {
+ cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
+ cic_set_cfqq(cic, NULL, false);
}
- if (cic->cfqq[BLK_RW_SYNC]) {
- cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
- cic->cfqq[BLK_RW_SYNC] = NULL;
+ if (cic_to_cfqq(cic, true)) {
+ cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
+ cic_set_cfqq(cic, NULL, true);
}
}
@@ -3572,18 +3680,14 @@ static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
return;
- cfqq = cic->cfqq[BLK_RW_ASYNC];
+ cfqq = cic_to_cfqq(cic, false);
if (cfqq) {
- struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
- GFP_ATOMIC);
- if (new_cfqq) {
- cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
- cfq_put_queue(cfqq);
- }
+ cfq_put_queue(cfqq);
+ cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
+ cic_set_cfqq(cic, cfqq, false);
}
- cfqq = cic->cfqq[BLK_RW_SYNC];
+ cfqq = cic_to_cfqq(cic, true);
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
@@ -3614,7 +3718,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct cfq_queue *sync_cfqq;
+ struct cfq_queue *cfqq;
uint64_t serial_nr;
rcu_read_lock();
@@ -3628,15 +3732,22 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
return;
- sync_cfqq = cic_to_cfqq(cic, 1);
- if (sync_cfqq) {
- /*
- * Drop reference to sync queue. A new sync queue will be
- * assigned in new group upon arrival of a fresh request.
- */
- cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
- cic_set_cfqq(cic, NULL, 1);
- cfq_put_queue(sync_cfqq);
+ /*
+ * Drop reference to queues. New queues will be assigned in new
+ * group upon arrival of fresh requests.
+ */
+ cfqq = cic_to_cfqq(cic, false);
+ if (cfqq) {
+ cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
+ cic_set_cfqq(cic, NULL, false);
+ cfq_put_queue(cfqq);
+ }
+
+ cfqq = cic_to_cfqq(cic, true);
+ if (cfqq) {
+ cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
+ cic_set_cfqq(cic, NULL, true);
+ cfq_put_queue(cfqq);
}
cic->blkcg_serial_nr = serial_nr;
@@ -3645,81 +3756,19 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio, gfp_t gfp_mask)
-{
- struct blkcg *blkcg;
- struct cfq_queue *cfqq, *new_cfqq = NULL;
- struct cfq_group *cfqg;
-
-retry:
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
- cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
- if (!cfqg) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
-
- cfqq = cic_to_cfqq(cic, is_sync);
-
- /*
- * Always try a new alloc if we fell back to the OOM cfqq
- * originally, since it should just be a temporary situation.
- */
- if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = NULL;
- if (new_cfqq) {
- cfqq = new_cfqq;
- new_cfqq = NULL;
- } else if (gfp_mask & __GFP_WAIT) {
- rcu_read_unlock();
- spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_ZERO,
- cfqd->queue->node);
- spin_lock_irq(cfqd->queue->queue_lock);
- if (new_cfqq)
- goto retry;
- else
- return &cfqd->oom_cfqq;
- } else {
- cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_ZERO,
- cfqd->queue->node);
- }
-
- if (cfqq) {
- cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, cic);
- cfq_link_cfqq_cfqg(cfqq, cfqg);
- cfq_log_cfqq(cfqd, cfqq, "alloced");
- } else
- cfqq = &cfqd->oom_cfqq;
- }
-out:
- if (new_cfqq)
- kmem_cache_free(cfq_pool, new_cfqq);
-
- rcu_read_unlock();
- return cfqq;
-}
-
static struct cfq_queue **
-cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
{
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
- return &cfqd->async_cfqq[0][ioprio];
+ return &cfqg->async_cfqq[0][ioprio];
case IOPRIO_CLASS_NONE:
ioprio = IOPRIO_NORM;
/* fall through */
case IOPRIO_CLASS_BE:
- return &cfqd->async_cfqq[1][ioprio];
+ return &cfqg->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
- return &cfqd->async_idle_cfqq;
+ return &cfqg->async_idle_cfqq;
default:
BUG();
}
@@ -3727,12 +3776,20 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio, gfp_t gfp_mask)
+ struct bio *bio)
{
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
- struct cfq_queue *cfqq = NULL;
+ struct cfq_queue *cfqq;
+ struct cfq_group *cfqg;
+
+ rcu_read_lock();
+ cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
+ if (!cfqg) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
if (!is_sync) {
if (!ioprio_valid(cic->ioprio)) {
@@ -3740,22 +3797,32 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
ioprio = task_nice_ioprio(tsk);
ioprio_class = task_nice_ioclass(tsk);
}
- async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
cfqq = *async_cfqq;
+ if (cfqq)
+ goto out;
}
- if (!cfqq)
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
+ cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+ cfqd->queue->node);
+ if (!cfqq) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
- /*
- * pin the queue now that it's allocated, scheduler exit will prune it
- */
- if (!is_sync && !(*async_cfqq)) {
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+ cfq_init_prio_data(cfqq, cic);
+ cfq_link_cfqq_cfqg(cfqq, cfqg);
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
+
+ if (async_cfqq) {
+ /* a new async queue is created, pin and remember */
cfqq->ref++;
*async_cfqq = cfqq;
}
-
+out:
cfqq->ref++;
+ rcu_read_unlock();
return cfqq;
}
@@ -4289,8 +4356,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- might_sleep_if(gfp_mask & __GFP_WAIT);
-
spin_lock_irq(q->queue_lock);
check_ioprio_changed(cic, bio);
@@ -4298,7 +4363,9 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
+ if (cfqq)
+ cfq_put_queue(cfqq);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
@@ -4404,21 +4471,6 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
cancel_work_sync(&cfqd->unplug_work);
}
-static void cfq_put_async_queues(struct cfq_data *cfqd)
-{
- int i;
-
- for (i = 0; i < IOPRIO_BE_NR; i++) {
- if (cfqd->async_cfqq[0][i])
- cfq_put_queue(cfqd->async_cfqq[0][i]);
- if (cfqd->async_cfqq[1][i])
- cfq_put_queue(cfqd->async_cfqq[1][i]);
- }
-
- if (cfqd->async_idle_cfqq)
- cfq_put_queue(cfqd->async_idle_cfqq);
-}
-
static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
@@ -4431,8 +4483,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
if (cfqd->active_queue)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
- cfq_put_async_queues(cfqd);
-
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
@@ -4486,9 +4536,9 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
goto out_free;
cfq_init_cfqg_base(cfqd->root_group);
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
+ cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
#endif
- cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
- cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
/*
* Not strictly needed (since RB_ROOT just clears the node and we
@@ -4499,7 +4549,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
cfqd->prio_trees[i] = RB_ROOT;
/*
- * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+ * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it. oom_cfqq is linked to root_group
* but shouldn't hold a reference as it'll never be unlinked. Lose
@@ -4683,13 +4733,18 @@ static struct elevator_type iosched_cfq = {
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static struct blkcg_policy blkcg_policy_cfq = {
- .pd_size = sizeof(struct cfq_group),
- .cpd_size = sizeof(struct cfq_group_data),
- .cftypes = cfq_blkcg_files,
+ .dfl_cftypes = cfq_blkcg_files,
+ .legacy_cftypes = cfq_blkcg_legacy_files,
+ .cpd_alloc_fn = cfq_cpd_alloc,
.cpd_init_fn = cfq_cpd_init,
+ .cpd_free_fn = cfq_cpd_free,
+ .cpd_bind_fn = cfq_cpd_bind,
+
+ .pd_alloc_fn = cfq_pd_alloc,
.pd_init_fn = cfq_pd_init,
.pd_offline_fn = cfq_pd_offline,
+ .pd_free_fn = cfq_pd_free,
.pd_reset_stats_fn = cfq_pd_reset_stats,
};
#endif