summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-14 10:30:08 +0200
committerJens Axboe <axboe@fb.com>2014-04-15 14:03:02 -0600
commit8727af4b9d45c7503042e3fbd926c1a173876e9c (patch)
treee6f2d8b7d9808cfbfb0b67a088b182776518a797
parent9d74e25737d73e93ccddeb5a61bcd56b7b8eb57b (diff)
downloadlinux-0-day-8727af4b9d45c7503042e3fbd926c1a173876e9c.tar.gz
linux-0-day-8727af4b9d45c7503042e3fbd926c1a173876e9c.tar.xz
blk-mq: make ->flush_rq fully transparent to drivers
Drivers shouldn't have to care about the block layer setting aside a request to implement the flush state machine. We already override the mq context and tag to make it more transparent, but so far haven't deal with the driver private data in the request. Make sure to override this as well, and while we're at it add a proper helper sitting in blk-mq.c that implements the full impersonation. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-flush.c12
-rw-r--r--block/blk-mq.c20
-rw-r--r--block/blk-mq.h2
3 files changed, 24 insertions, 10 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 0e42adcfb55e2..c41fc19f75d12 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -307,16 +307,8 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_pending_idx ^= 1;
blk_rq_init(q, q->flush_rq);
- if (q->mq_ops) {
- /*
- * Reuse the tag value from the fist waiting request,
- * with blk-mq the tag is generated during request
- * allocation and drivers can rely on it being inside
- * the range they asked for.
- */
- q->flush_rq->mq_ctx = first_rq->mq_ctx;
- q->flush_rq->tag = first_rq->tag;
- }
+ if (q->mq_ops)
+ blk_mq_clone_flush_request(q->flush_rq, first_rq);
q->flush_rq->cmd_type = REQ_TYPE_FS;
q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d9d0984d2f010..e644feec068c2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -275,6 +275,26 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_free_request(hctx, ctx, rq);
}
+/*
+ * Clone all relevant state from a request that has been put on hold in
+ * the flush state machine into the preallocated flush request that hangs
+ * off the request queue.
+ *
+ * For a driver the flush request should be invisible, that's why we are
+ * impersonating the original request here.
+ */
+void blk_mq_clone_flush_request(struct request *flush_rq,
+ struct request *orig_rq)
+{
+ struct blk_mq_hw_ctx *hctx =
+ orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
+
+ flush_rq->mq_ctx = orig_rq->mq_ctx;
+ flush_rq->tag = orig_rq->tag;
+ memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
+ hctx->cmd_size);
+}
+
bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 238379a612e42..7964dadb7d64b 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -27,6 +27,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_clone_flush_request(struct request *flush_rq,
+ struct request *orig_rq);
/*
* CPU hotplug helpers