summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-06-09 16:00:36 +0200
committerJens Axboe <axboe@fb.com>2016-06-09 09:52:25 -0600
commit288dab8a35a0bde426a09870943c8d3ee3a50dab (patch)
tree483fd3eb60ff8f44d149fb28d3b79e5212645104 /block
parent56332f02a562390a3198525ad78cb4f558805c0f (diff)
downloadlinux-0-day-288dab8a35a0bde426a09870943c8d3ee3a50dab.tar.gz
linux-0-day-288dab8a35a0bde426a09870943c8d3ee3a50dab.tar.xz
block: add a separate operation type for secure erase
Instead of overloading the discard support with the REQ_SECURE flag. Use the opportunity to rename the queue flag as well, and remove the dead checks for this flag in the RAID 1 and RAID 10 drivers that don't claim support for secure erase. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c27
-rw-r--r--block/blk-lib.c25
-rw-r--r--block/blk-merge.c6
3 files changed, 33 insertions, 25 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 32a283eb7274c..db31a29812232 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1977,16 +1977,21 @@ generic_make_request_checks(struct bio *bio)
}
}
- if ((bio_op(bio) == REQ_OP_DISCARD) &&
- (!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
- err = -EOPNOTSUPP;
- goto end_io;
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ if (!blk_queue_discard(q))
+ goto not_supported;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ if (!blk_queue_secure_erase(q))
+ goto not_supported;
+ break;
+ case REQ_OP_WRITE_SAME:
+ if (!bdev_write_same(bio->bi_bdev))
+ goto not_supported;
+ break;
+ default:
+ break;
}
/*
@@ -2003,6 +2008,8 @@ generic_make_request_checks(struct bio *bio)
trace_block_bio_queue(q, bio);
return true;
+not_supported:
+ err = -EOPNOTSUPP;
end_io:
bio->bi_error = err;
bio_endio(bio);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ff2a7f04af4df..78626c2fde339 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -23,20 +23,27 @@ static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
}
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int op_flags,
+ sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop)
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
+ enum req_op op;
int alignment;
if (!q)
return -ENXIO;
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
- if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
+
+ if (flags & BLKDEV_DISCARD_SECURE) {
+ if (!blk_queue_secure_erase(q))
+ return -EOPNOTSUPP;
+ op = REQ_OP_SECURE_ERASE;
+ } else {
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+ op = REQ_OP_DISCARD;
+ }
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
@@ -66,7 +73,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio = next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
- bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags);
+ bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
@@ -100,16 +107,12 @@ EXPORT_SYMBOL(__blkdev_issue_discard);
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
- int op_flags = 0;
struct bio *bio = NULL;
struct blk_plug plug;
int ret;
- if (flags & BLKDEV_DISCARD_SECURE)
- op_flags |= REQ_SECURE;
-
blk_start_plug(&plug);
- ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags,
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
&bio);
if (!ret && bio) {
ret = submit_bio_wait(bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c265348b75d1a..9772308a83918 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -649,8 +649,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next))
return 0;
- if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags,
- req_op(next)))
+ if (req_op(req) != req_op(next))
return 0;
/*
@@ -752,8 +751,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
- if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw,
- bio_op(bio)))
+ if (req_op(rq) != bio_op(bio))
return false;
/* different data direction or already started, don't merge */