diff options
author | Andrey Smirnov <andrew.smirnov@gmail.com> | 2019-01-21 22:06:24 -0800 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2019-01-22 08:57:21 +0100 |
commit | 17c4b989a5d00fe5fc84c528bbd59583c45d80d0 (patch) | |
tree | dbe49644ba01922270c74b85df7f9499cdfe9b40 | |
parent | cee4c2a064796db730c572bf8973e799c1b89b02 (diff) | |
download | barebox-17c4b989a5d00fe5fc84c528bbd59583c45d80d0.tar.gz barebox-17c4b989a5d00fe5fc84c528bbd59583c45d80d0.tar.xz |
block: Do not write past block device boundary during a flush
When calling I/O functions of underlying block device driver we always
need to make sure that its size is small enough to not go past
device's boundary. Not only in get_chunk() and block_cache(), but in
writebuffer_flush() as well. Since the same code is used in three
different places, move it into a subroutine and adjust all of the
calls to ->write()/->read() accordingly.
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r-- | common/block.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/common/block.c b/common/block.c index 35173c65f1..3a031a4fc7 100644 --- a/common/block.c +++ b/common/block.c @@ -38,6 +38,11 @@ struct chunk { #define BUFSIZE (PAGE_SIZE * 4) +static int writebuffer_io_len(struct block_device *blk, struct chunk *chunk) +{ + return min(blk->rdbufsize, blk->num_blocks - chunk->block_start); +} + /* * Write all dirty chunks back to the device */ @@ -51,7 +56,9 @@ static int writebuffer_flush(struct block_device *blk) list_for_each_entry(chunk, &blk->buffered_blocks, list) { if (chunk->dirty) { - ret = blk->ops->write(blk, chunk->data, chunk->block_start, blk->rdbufsize); + ret = blk->ops->write(blk, chunk->data, + chunk->block_start, + writebuffer_io_len(blk, chunk)); if (ret < 0) return ret; @@ -118,10 +125,9 @@ static struct chunk *get_chunk(struct block_device *blk) /* use last entry which is the most unused */ chunk = list_last_entry(&blk->buffered_blocks, struct chunk, list); if (chunk->dirty) { - size_t num_blocks = min(blk->rdbufsize, - blk->num_blocks - chunk->block_start); - ret = blk->ops->write(blk, chunk->data, chunk->block_start, - num_blocks); + ret = blk->ops->write(blk, chunk->data, + chunk->block_start, + writebuffer_io_len(blk, chunk)); if (ret < 0) return ERR_PTR(ret); @@ -145,7 +151,6 @@ static struct chunk *get_chunk(struct block_device *blk) static int block_cache(struct block_device *blk, int block) { struct chunk *chunk; - size_t num_blocks; int ret; chunk = get_chunk(blk); @@ -157,9 +162,8 @@ static int block_cache(struct block_device *blk, int block) dev_dbg(blk->dev, "%s: %d to %d\n", __func__, chunk->block_start, chunk->num); - num_blocks = min(blk->rdbufsize, blk->num_blocks - chunk->block_start); - - ret = blk->ops->read(blk, chunk->data, chunk->block_start, num_blocks); + ret = blk->ops->read(blk, chunk->data, chunk->block_start, + writebuffer_io_len(blk, chunk)); if (ret) { list_add_tail(&chunk->list, &blk->idle_blocks); return ret; |