summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2015-10-21 13:20:12 -0400
committerJens Axboe <axboe@fb.com>2015-10-21 14:43:41 -0600
commit3ef28e83ab15799742e55fd13243a5f678b04242 (patch)
treee594552aaeaafed8468ae96e54992cf1697a1fc7 /block/blk-core.c
parent4cfc766e07a5ed709a9d5289c8644fe78e9f24de (diff)
downloadlinux-0-day-3ef28e83ab15799742e55fd13243a5f678b04242.tar.gz
linux-0-day-3ef28e83ab15799742e55fd13243a5f678b04242.tar.xz
block: generic request_queue reference counting
Allow pmem, and other synchronous/bio-based block drivers, to fallback on a per-cpu reference count managed by the core for tracking queue live/dead state. The existing per-cpu reference count for the blk_mq case is promoted to be used in all block i/o scenarios. This involves initializing it by default, waiting for it to drop to zero at exit, and holding a live reference over the invocation of q->make_request_fn() in generic_make_request(). The blk_mq code continues to take its own reference per blk_mq request and retains the ability to freeze the queue, but the check that the queue is frozen is moved to generic_make_request(). This fixes crash signatures like the following: BUG: unable to handle kernel paging request at ffff880140000000 [..] Call Trace: [<ffffffff8145e8bf>] ? copy_user_handle_tail+0x5f/0x70 [<ffffffffa004e1e0>] pmem_do_bvec.isra.11+0x70/0xf0 [nd_pmem] [<ffffffffa004e331>] pmem_make_request+0xd1/0x200 [nd_pmem] [<ffffffff811c3162>] ? mempool_alloc+0x72/0x1a0 [<ffffffff8141f8b6>] generic_make_request+0xd6/0x110 [<ffffffff8141f966>] submit_bio+0x76/0x170 [<ffffffff81286dff>] submit_bh_wbc+0x12f/0x160 [<ffffffff81286e62>] submit_bh+0x12/0x20 [<ffffffff813395bd>] jbd2_write_superblock+0x8d/0x170 [<ffffffff8133974d>] jbd2_mark_journal_empty+0x5d/0x90 [<ffffffff813399cb>] jbd2_journal_destroy+0x24b/0x270 [<ffffffff810bc4ca>] ? put_pwq_unlocked+0x2a/0x30 [<ffffffff810bc6f5>] ? destroy_workqueue+0x225/0x250 [<ffffffff81303494>] ext4_put_super+0x64/0x360 [<ffffffff8124ab1a>] generic_shutdown_super+0x6a/0xf0 Cc: Jens Axboe <axboe@kernel.dk> Cc: Keith Busch <keith.busch@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Suggested-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c71
1 files changed, 62 insertions, 9 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773c..9b4d735cb5b8b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -554,13 +554,10 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- spin_lock_irq(lock);
- } else {
- spin_lock_irq(lock);
+ blk_freeze_queue(q);
+ spin_lock_irq(lock);
+ if (!q->mq_ops)
__blk_drain_queue(q, true);
- }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
@@ -570,6 +567,7 @@ void blk_cleanup_queue(struct request_queue *q)
if (q->mq_ops)
blk_mq_free_queue(q);
+ percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
@@ -629,6 +627,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+{
+ while (true) {
+ int ret;
+
+ if (percpu_ref_tryget_live(&q->q_usage_counter))
+ return 0;
+
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
+ ret = wait_event_interruptible(q->mq_freeze_wq,
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+ if (ret)
+ return ret;
+ }
+}
+
+void blk_queue_exit(struct request_queue *q)
+{
+ percpu_ref_put(&q->q_usage_counter);
+}
+
+static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+{
+ struct request_queue *q =
+ container_of(ref, struct request_queue, q_usage_counter);
+
+ wake_up_all(&q->mq_freeze_wq);
+}
+
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
@@ -690,11 +722,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
- if (blkcg_init_queue(q))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->q_usage_counter,
+ blk_queue_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
+ if (blkcg_init_queue(q))
+ goto fail_ref;
+
return q;
+fail_ref:
+ percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
bdi_destroy(&q->backing_dev_info);
fail_split:
@@ -1966,9 +2009,19 @@ void generic_make_request(struct bio *bio)
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- q->make_request_fn(q, bio);
+ if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+
+ q->make_request_fn(q, bio);
+
+ blk_queue_exit(q);
- bio = bio_list_pop(current->bio_list);
+ bio = bio_list_pop(current->bio_list);
+ } else {
+ struct bio *bio_next = bio_list_pop(current->bio_list);
+
+ bio_io_error(bio);
+ bio = bio_next;
+ }
} while (bio);
current->bio_list = NULL; /* deactivate */
}