summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-05-09 09:54:06 +0200
committerJens Axboe <axboe@kernel.dk>2018-05-14 08:55:14 -0600
commit4accf5fc790b7e17d838265cc58a46b3ddf64721 (patch)
tree99d89116c4342a74fe6cdc3b45a740a1b6063c4d /block/blk-core.c
parentff005a066240efb73ae29a2bb9269ae726bc2eae (diff)
downloadlinux-0-day-4accf5fc790b7e17d838265cc58a46b3ddf64721.tar.gz
linux-0-day-4accf5fc790b7e17d838265cc58a46b3ddf64721.tar.xz
block: pass an explicit gfp_t to get_request
blk_old_get_request already has it at hand, and in blk_queue_bio, which is the fast path, it is constant. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 5da706ae34415..1fe7c7cf42fec 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1332,6 +1332,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @flags: BLQ_MQ_REQ_* flags
+ * @gfp_mask: allocator flags
*
* Get a free request from @q. This function may fail under memory
* pressure or if @q is dead.
@@ -1341,7 +1342,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags)
+ struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
@@ -1350,8 +1351,6 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
struct io_cq *icq = NULL;
const bool is_sync = op_is_sync(op);
int may_queue;
- gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
- __GFP_DIRECT_RECLAIM;
req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock);
@@ -1515,6 +1514,7 @@ rq_starved:
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @flags: BLK_MQ_REQ_* flags.
+ * @gfp: allocator flags
*
* Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
* this function keeps retrying under memory pressure and fails iff @q is dead.
@@ -1524,7 +1524,7 @@ rq_starved:
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags)
+ struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
{
const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
@@ -1536,7 +1536,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, op, bio, flags);
+ rq = __get_request(rl, op, bio, flags, gfp);
if (!IS_ERR(rq))
return rq;
@@ -1590,7 +1590,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
if (ret)
return ERR_PTR(ret);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, op, NULL, flags);
+ rq = get_request(q, op, NULL, flags, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
blk_queue_exit(q);
@@ -2056,7 +2056,7 @@ get_rq:
* Returns with the queue unlocked.
*/
blk_queue_enter_live(q);
- req = get_request(q, bio->bi_opf, bio, 0);
+ req = get_request(q, bio->bi_opf, bio, 0, __GFP_DIRECT_RECLAIM);
if (IS_ERR(req)) {
blk_queue_exit(q);
__wbt_done(q->rq_wb, wb_acct);