summaryrefslogtreecommitdiffstats
path: root/fs/erofs
diff options
context:
space:
mode:
authorGao Xiang <hsiangkao@redhat.com>2021-03-22 02:32:27 +0800
committerGao Xiang <hsiangkao@redhat.com>2021-03-29 10:18:01 +0800
commit0b964600d3aae56ff9d5bdd710a79f39a44c572c (patch)
tree550634ce6fb92373b5691fbf2c5b5b7660ca6847 /fs/erofs
parent30048cdac4b92f39ee50e2a1344f5899f8e70cb6 (diff)
downloadlinux-0b964600d3aae56ff9d5bdd710a79f39a44c572c.tar.gz
linux-0b964600d3aae56ff9d5bdd710a79f39a44c572c.tar.xz
erofs: complete a missing case for inplace I/O
Add a missing case which could cause unnecessary page allocation but not directly use inplace I/O instead, which increases runtime extra memory footprint. The detail is, considering an online file-backed page, the right half of the page is chosen to be cached (e.g. the end page of a readahead request) and some of its data doesn't exist in managed cache, so the pcluster will be definitely kept in the submission chain. (IOWs, it cannot be decompressed without I/O, e.g., due to the bypass queue). Currently, DELAYEDALLOC/TRYALLOC cases can be downgraded as NOINPLACE, and stop online pages from inplace I/O. After this patch, unneeded page allocations won't be observed in pickup_page_for_submission() then. Link: https://lore.kernel.org/r/20210321183227.5182-1-hsiangkao@aol.com Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
Diffstat (limited to 'fs/erofs')
-rw-r--r--fs/erofs/zdata.c44
1 files changed, 29 insertions, 15 deletions
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 7ab8a4e3dfcb..cd9b76216925 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -104,6 +104,12 @@ enum z_erofs_collectmode {
* |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
*/
COLLECT_PRIMARY_HOOKED,
+ /*
+ * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
+ * could be dispatched into bypass queue later due to uptodated managed
+ * pages. All related online pages cannot be reused for inplace I/O (or
+ * pagevec) since it can be directly decoded without I/O submission.
+ */
COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
/*
* The current collection has been linked with the owned chain, and
@@ -186,21 +192,25 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
if (page) {
t = tag_compressed_page_justfound(page);
- } else if (type == DELAYEDALLOC) {
- t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
- } else if (type == TRYALLOC) {
- newpage = erofs_allocpage(pagepool, gfp);
- if (!newpage)
- goto dontalloc;
-
- set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
- t = tag_compressed_page_justfound(newpage);
- } else { /* DONTALLOC */
-dontalloc:
- if (standalone)
- clt->compressedpages = pages;
+ } else {
+ /* I/O is needed, no possible to decompress directly */
standalone = false;
- continue;
+ switch (type) {
+ case DELAYEDALLOC:
+ t = tagptr_init(compressed_page_t,
+ PAGE_UNALLOCATED);
+ break;
+ case TRYALLOC:
+ newpage = erofs_allocpage(pagepool, gfp);
+ if (!newpage)
+ continue;
+ set_page_private(newpage,
+ Z_EROFS_PREALLOCATED_PAGE);
+ t = tag_compressed_page_justfound(newpage);
+ break;
+ default: /* DONTALLOC */
+ continue;
+ }
}
if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
@@ -214,7 +224,11 @@ dontalloc:
}
}
- if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
+ /*
+ * don't do inplace I/O if all compressed pages are available in
+ * managed cache since it can be moved to the bypass queue instead.
+ */
+ if (standalone)
clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
}