summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-03 09:48:28 -0700
committerJens Axboe <axboe@fb.com>2017-02-03 09:48:28 -0700
commite4d750c97794ea2bab793d4c518b1f4006364588 (patch)
tree8b351d08b1d81986402964a243268e70bb31a6a9 /block/blk-mq-sched.c
parentb973cb7e89fe3dcc2bc72c5b3aa7a3bfd9d0e6d5 (diff)
downloadlinux-0-day-e4d750c97794ea2bab793d4c518b1f4006364588.tar.gz
linux-0-day-e4d750c97794ea2bab793d4c518b1f4006364588.tar.xz
block: free merged request in the caller
If we end up doing a request-to-request merge when we have completed a bio-to-request merge, we free the request from deep down in that path. For blk-mq-sched, the merge path has to hold the appropriate lock, but we don't need it for freeing the request. And in fact holding the lock is problematic, since we are now calling the mq sched put_rq_private() hook with the lock held. Other call paths do not hold this lock. Fix this inconsistency by ensuring that the caller frees a merged request. Then we can do it outside of the lock, making it both more efficient and fixing the blk-mq-sched problem of invoking parts of the scheduler with an unknown lock state. Reported-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 3ec52f4940944..ee455e7cf9d8e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -234,7 +234,8 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
-bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
+bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+ struct request **merged_request)
{
struct request *rq;
int ret;
@@ -244,7 +245,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
if (bio_attempt_back_merge(q, rq, bio)) {
- if (!attempt_back_merge(q, rq))
+ *merged_request = attempt_back_merge(q, rq);
+ if (!*merged_request)
elv_merged_request(q, rq, ret);
return true;
}
@@ -252,7 +254,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
if (bio_attempt_front_merge(q, rq, bio)) {
- if (!attempt_front_merge(q, rq))
+ *merged_request = attempt_front_merge(q, rq);
+ if (!*merged_request)
elv_merged_request(q, rq, ret);
return true;
}