From e488952b9d04fb0fc7dddd31ec639549d71c76b3 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Fri, 13 Dec 2019 12:16:51 +0100 Subject: block: Implement discard_range This implements the discard_range hook. When a range of data is discarded then we do not have to read it from the device and can pass a zeroed buffer instead. Signed-off-by: Sascha Hauer --- common/block.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'common') diff --git a/common/block.c b/common/block.c index 97cf5dc4de..8b43c3c83a 100644 --- a/common/block.c +++ b/common/block.c @@ -161,6 +161,14 @@ static int block_cache(struct block_device *blk, int block) dev_dbg(blk->dev, "%s: %d to %d\n", __func__, chunk->block_start, chunk->num); + if (chunk->block_start * BLOCKSIZE(blk) >= blk->discard_start && + chunk->block_start * BLOCKSIZE(blk) + writebuffer_io_len(blk, chunk) + <= blk->discard_start + blk->discard_size) { + memset(chunk->data, 0, writebuffer_io_len(blk, chunk)); + list_add(&chunk->list, &blk->buffered_blocks); + return 0; + } + ret = blk->ops->read(blk, chunk->data, chunk->block_start, writebuffer_io_len(blk, chunk)); if (ret) { @@ -337,11 +345,23 @@ static int block_op_flush(struct cdev *cdev) { struct block_device *blk = cdev->priv; + blk->discard_start = blk->discard_size = 0; + return writebuffer_flush(blk); } static int block_op_close(struct cdev *cdev) __alias(block_op_flush); +static int block_op_discard_range(struct cdev *cdev, loff_t count, loff_t offset) +{ + struct block_device *blk = cdev->priv; + + blk->discard_start = offset; + blk->discard_size = count; + + return 0; +} + static struct cdev_operations block_ops = { .read = block_op_read, #ifdef CONFIG_BLOCK_WRITE @@ -349,6 +369,7 @@ static struct cdev_operations block_ops = { #endif .close = block_op_close, .flush = block_op_flush, + .discard_range = block_op_discard_range, }; int blockdevice_register(struct block_device *blk) -- cgit v1.2.3 From 73290bcfd105cd72e45c4843e8625033cefcc089 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Thu, 27 Feb 2020 17:58:25 +0100 Subject: Revert "block: Adjust cache sizes" On 12/13/19 2:12 PM, Sascha Hauer wrote: > On Tue, Dec 10, 2019 at 03:44:52PM +0100, Hubert Feurstein wrote: >> With v2015.06.0 the indicated progress of the copy command is very >> smooth. Calling "cp -v /dev/zero /dev/mmc3.root" takes about 80 >> seconds for 256MB. But with v2019.12.0 the progress is very bumpy and >> the copy takes about 280 seconds. >> >> I've tracked this down to this commit which destroys the performance: >> "block: Adjust cache sizes" (b6fef20c1215c6ef0004f6af4a9c4b77af51dc43) > > We could just revert this patch. I can't find any workload that gets > faster with b6fef20c1215. It's rather the other way round. Do this by reverting commit b6fef20c1215c6ef0004f6af4a9c4b77af51dc43. Reported-by: Hubert Feurstein Suggested-by: Sascha Hauer Signed-off-by: Ahmad Fatoum Signed-off-by: Sascha Hauer --- common/block.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'common') diff --git a/common/block.c b/common/block.c index 8b43c3c83a..02be80d7cc 100644 --- a/common/block.c +++ b/common/block.c @@ -36,7 +36,7 @@ struct chunk { struct list_head list; }; -#define BUFSIZE (PAGE_SIZE * 4) +#define BUFSIZE (PAGE_SIZE * 16) static int writebuffer_io_len(struct block_device *blk, struct chunk *chunk) { @@ -391,7 +391,7 @@ int blockdevice_register(struct block_device *blk) dev_dbg(blk->dev, "rdbufsize: %d blockbits: %d blkmask: 0x%08x\n", blk->rdbufsize, blk->blockbits, blk->blkmask); - for (i = 0; i < 32; i++) { + for (i = 0; i < 8; i++) { struct chunk *chunk = xzalloc(sizeof(*chunk)); chunk->data = dma_alloc(BUFSIZE); chunk->num = i; -- cgit v1.2.3