summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorSayali Lokhande <sayalil@codeaurora.org>2020-04-30 16:28:29 +0530
committerJaegeuk Kim <jaegeuk@kernel.org>2020-05-11 20:36:47 -0700
commit34c061ad85a2f5d5e9e3b045d72f3b211db6e282 (patch)
tree82291cc1ba147c8eaa395da771ccd4ea65807519 /fs/f2fs/node.c
parentbaaa7ebf25c78c5cb712fac16b7f549100beddd3 (diff)
downloadlinux-34c061ad85a2f5d5e9e3b045d72f3b211db6e282.tar.gz
linux-34c061ad85a2f5d5e9e3b045d72f3b211db6e282.tar.xz
f2fs: Avoid double lock for cp_rwsem during checkpoint
There could be a scenario where f2fs_sync_node_pages gets called during checkpoint, which in turn tries to flush inline data and calls iput(). This results in deadlock as iput() tries to hold cp_rwsem, which is already held at the beginning by checkpoint->block_operations(). Call stack : Thread A Thread B f2fs_write_checkpoint() - block_operations(sbi) - f2fs_lock_all(sbi); - down_write(&sbi->cp_rwsem); - open() - igrab() - write() write inline data - unlink() - f2fs_sync_node_pages() - if (is_inline_node(page)) - flush_inline_data() - ilookup() page = f2fs_pagecache_get_page() if (!page) goto iput_out; iput_out: -close() -iput() iput(inode); - f2fs_evict_inode() - f2fs_truncate_blocks() - f2fs_lock_op() - down_read(&sbi->cp_rwsem); Fixes: 2049d4fcb057 ("f2fs: avoid multiple node page writes due to inline_data") Signed-off-by: Sayali Lokhande <sayalil@codeaurora.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c51
1 files changed, 49 insertions, 2 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 1db8cabf727e..e632de10aeda 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1807,6 +1807,53 @@ static bool flush_dirty_inode(struct page *page)
return true;
}
+int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
+{
+ pgoff_t index = 0;
+ struct pagevec pvec;
+ int nr_pages;
+ int ret = 0;
+
+ pagevec_init(&pvec);
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec,
+ NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (!IS_DNODE(page))
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ /* flush inline_data, if it's async context. */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ continue;
+ }
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+ return ret;
+}
+
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
@@ -1870,8 +1917,8 @@ continue_unlock:
goto continue_unlock;
}
- /* flush inline_data */
- if (is_inline_node(page)) {
+ /* flush inline_data, if it's async context. */
+ if (do_balance && is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));