From 32825c45ff8f4cce937ab85b030dc693ceb1aa0a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 3 Jul 2017 20:37:14 +0800 Subject: blk-mq-sched: fix performance regression of mq-deadline When mq-deadline is taken, IOPS of sequential read and seqential write is observed more than 20% drop on sata(scsi-mq) devices, compared with using 'none' scheduler. The reason is that the default nr_requests for scheduler is too big for small queuedepth devices, and latency is increased much. Since the principle of taking 256 requests for mq scheduler is based on 128 queue depth, this patch changes into double size of min(hw queue_depth, 128). Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'block/blk-mq-sched.c') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 7f0dc48ffb408..4ab69435708c2 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -515,10 +515,12 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) } /* - * Default to 256, since we don't split into sync/async like the - * old code did. Additionally, this is a per-hw queue depth. + * Default to double of smaller one between hw queue_depth and 128, + * since we don't split into sync/async like the old code did. + * Additionally, this is a per-hw queue depth. */ - q->nr_requests = 2 * BLKDEV_MAX_RQ; + q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, + BLKDEV_MAX_RQ); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); -- cgit v1.2.3