summaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2018-03-29 13:49:45 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2018-07-13 08:56:52 +0200
commitb6fef20c1215c6ef0004f6af4a9c4b77af51dc43 (patch)
treeff23e9eb9505ffc0459da88ef1312058fd1e07f5 /common
parent551219844e92e33c0f41757d225474ebd3bc569c (diff)
downloadbarebox-b6fef20c1215c6ef0004f6af4a9c4b77af51dc43.tar.gz
barebox-b6fef20c1215c6ef0004f6af4a9c4b77af51dc43.tar.xz
block: Adjust cache sizes
Use four times more cache entries and divide the memory for each entry by four. This lowers the linear read throughput somewhat but increases the access speed for filesystems. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'common')
-rw-r--r--common/block.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/common/block.c b/common/block.c
index 55d8d1637e..219b943afc 100644
--- a/common/block.c
+++ b/common/block.c
@@ -36,7 +36,7 @@ struct chunk {
struct list_head list;
};
-#define BUFSIZE (PAGE_SIZE * 16)
+#define BUFSIZE (PAGE_SIZE * 4)
/*
* Write all dirty chunks back to the device
@@ -361,7 +361,7 @@ int blockdevice_register(struct block_device *blk)
debug("%s: rdbufsize: %d blockbits: %d blkmask: 0x%08x\n", __func__, blk->rdbufsize, blk->blockbits,
blk->blkmask);
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 32; i++) {
struct chunk *chunk = xzalloc(sizeof(*chunk));
chunk->data = dma_alloc(BUFSIZE);
chunk->num = i;