summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-08-03 16:43:59 -0600
committerJens Axboe <axboe@kernel.dk>2020-08-03 17:48:15 -0600
commitc1dd91d16246b168b80af9b64c5cc35a66410455 (patch)
treef04e90419bba116503b9a881fff548e7b18b093a
parentcbd287c09351f1d3a4b3cb9167a2616a11390d32 (diff)
downloadlinux-c1dd91d16246b168b80af9b64c5cc35a66410455.tar.gz
linux-c1dd91d16246b168b80af9b64c5cc35a66410455.tar.xz
io_uring: add comments on how the async buffered read retry works
The retry based logic here isn't easy to follow unless you're already familiar with how io_uring does task_work based retries. Add some comments explaining the flow a little better. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bb4f0b2d5138..5e1c08e22990 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2952,6 +2952,16 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return io_rw_prep_async(req, READ, force_nonblock);
}
+/*
+ * This is our waitqueue callback handler, registered through lock_page_async()
+ * when we initially tried to do the IO with the iocb armed our waitqueue.
+ * This gets called when the page is unlocked, and we generally expect that to
+ * happen when the page IO is completed and the page is now uptodate. This will
+ * queue a task_work based retry of the operation, attempting to copy the data
+ * again. If the latter fails because the page was NOT uptodate, then we will
+ * do a thread based blocking retry of the operation. That's the unexpected
+ * slow path.
+ */
static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
int sync, void *arg)
{
@@ -3004,7 +3014,18 @@ static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
return -EOPNOTSUPP;
}
-
+/*
+ * This controls whether a given IO request should be armed for async page
+ * based retry. If we return false here, the request is handed to the async
+ * worker threads for retry. If we're doing buffered reads on a regular file,
+ * we prepare a private wait_page_queue entry and retry the operation. This
+ * will either succeed because the page is now uptodate and unlocked, or it
+ * will register a callback when the page is unlocked at IO completion. Through
+ * that callback, io_uring uses task_work to setup a retry of the operation.
+ * That retry will attempt the buffered read again. The retry will generally
+ * succeed, or in rare cases where it fails, we then fall back to using the
+ * async worker threads for a blocking retry.
+ */
static bool io_rw_should_retry(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;