summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-06 13:30:55 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-19 08:34:50 -0700
commit85f4d4b65fdd67f1d6dc9eeb1d91923cef07eb6a (patch)
tree8a87578fc6753431f9d698703ad0217392916f3e /block
parent849a370016a5489c49253338507ee6cc4a08df4b (diff)
downloadlinux-0-day-85f4d4b65fdd67f1d6dc9eeb1d91923cef07eb6a.tar.gz
linux-0-day-85f4d4b65fdd67f1d6dc9eeb1d91923cef07eb6a.tar.xz
block: have ->poll_fn() return number of entries polled
We currently only really support sync poll, ie poll with 1 IO in flight. This prepares us for supporting async poll. Note that the returned value isn't necessarily 100% accurate. If poll races with IRQ completion, we assume that the fact that the task is now runnable means we found at least one entry. In reality it could be more than 1, or not even 1. This is fine, the caller will just need to take this into account. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7fc4abb4cc36e..52b1c97cd7c6f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -38,7 +38,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -3305,7 +3305,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
return true;
}
-static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct request_queue *q = hctx->queue;
long state;
@@ -3318,7 +3318,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
* straight to the busy poll loop.
*/
if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
- return true;
+ return 1;
hctx->poll_considered++;
@@ -3332,30 +3332,30 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
if (ret > 0) {
hctx->poll_success++;
__set_current_state(TASK_RUNNING);
- return true;
+ return ret;
}
if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
if (current->state == TASK_RUNNING)
- return true;
+ return 1;
if (ret < 0)
break;
cpu_relax();
}
__set_current_state(TASK_RUNNING);
- return false;
+ return 0;
}
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_mq_hw_ctx *hctx;
struct request *rq;
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
- return false;
+ return 0;
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
if (!blk_qc_t_is_internal(cookie))
@@ -3369,7 +3369,7 @@ static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
* so we should be safe with just the NULL check.
*/
if (!rq)
- return false;
+ return 0;
}
return __blk_mq_poll(hctx, rq);