summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-04-08 17:48:14 +0800
committerJens Axboe <axboe@kernel.dk>2018-04-10 08:38:46 -0600
commit37c7c6c76d431dd7ef9c29d95f6052bd425f004c (patch)
treef67363b026b429b79416d9f171d7df9ecca529d4 /block/blk-mq.c
parent127276c6ce5a30fcc806b7fe53015f4f89b62956 (diff)
downloadlinux-0-day-37c7c6c76d431dd7ef9c29d95f6052bd425f004c.tar.gz
linux-0-day-37c7c6c76d431dd7ef9c29d95f6052bd425f004c.tar.xz
blk-mq: remove code for dealing with remapping queue
Firstly, from commit 4b855ad37194 ("blk-mq: Create hctx for each present CPU), blk-mq doesn't remap queue any more after CPU topo is changed. Secondly, set->nr_hw_queues can't be bigger than nr_cpu_ids, and now we map all possible CPUs to hw queues, so at least one CPU is mapped to each hctx. So queue mapping has became static and fixed just like percpu variable, and we don't need to handle queue remapping any more. Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c34
1 files changed, 3 insertions, 31 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0ee9d8e964b33..0dc9e341c2a72 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2329,7 +2329,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q)
{
- unsigned int i, hctx_idx;
+ unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,23 +2346,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Map software to hardware queues.
- *
- * If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = q->mq_map[i];
- /* unmapped hw queue can be remapped after CPU topo changed */
- if (!set->tags[hctx_idx] &&
- !__blk_mq_alloc_rq_map(set, hctx_idx)) {
- /*
- * If tags initialization fail for some hctx,
- * that hctx won't be brought online. In this
- * case, remap the current ctx to hctx[0] which
- * is guaranteed to always have tags allocated
- */
- q->mq_map[i] = 0;
- }
-
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@@ -2374,21 +2359,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
- /*
- * If no software queues are mapped to this hardware queue,
- * disable it and free the request entries.
- */
- if (!hctx->nr_ctx) {
- /* Never unmap queue 0. We need it as a
- * fallback in case of a new remap fails
- * allocation
- */
- if (i && set->tags[i])
- blk_mq_free_map_and_requests(set, i);
-
- hctx->tags = NULL;
- continue;
- }
+ /* every hctx should get mapped by at least one CPU */
+ WARN_ON(!hctx->nr_ctx);
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);