io_uring/rw: avoid punting to io-wq directly
kiocb_done() should care to specifically redirecting requests to io-wq. Remove the hopping to tw to then queue an io-wq, return -EAGAIN and let the core code io_uring handle offloading. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Tested-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/413564e550fe23744a970e1783dfa566291b0e6f.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1afdb76038
commit
6e6b8c6212
@ -492,7 +492,7 @@ static void io_prep_async_link(struct io_kiocb *req)
|
||||
}
|
||||
}
|
||||
|
||||
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
|
||||
static void io_queue_iowq(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *link = io_prep_linked_timeout(req);
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
@ -1499,7 +1499,7 @@ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
if (unlikely(req->task->flags & PF_EXITING))
|
||||
io_req_defer_failed(req, -EFAULT);
|
||||
else if (req->flags & REQ_F_FORCE_ASYNC)
|
||||
io_queue_iowq(req, ts);
|
||||
io_queue_iowq(req);
|
||||
else
|
||||
io_queue_sqe(req);
|
||||
}
|
||||
@ -2087,7 +2087,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
|
||||
break;
|
||||
case IO_APOLL_ABORTED:
|
||||
io_kbuf_recycle(req, 0);
|
||||
io_queue_iowq(req, NULL);
|
||||
io_queue_iowq(req);
|
||||
break;
|
||||
case IO_APOLL_OK:
|
||||
break;
|
||||
@ -2134,7 +2134,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
|
||||
if (unlikely(req->ctx->drain_active))
|
||||
io_drain_req(req);
|
||||
else
|
||||
io_queue_iowq(req, NULL);
|
||||
io_queue_iowq(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
||||
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
|
||||
bool io_alloc_async_data(struct io_kiocb *req);
|
||||
void io_req_task_queue(struct io_kiocb *req);
|
||||
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
|
||||
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
|
||||
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
|
@ -187,12 +187,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void io_req_task_queue_reissue(struct io_kiocb *req)
|
||||
{
|
||||
req->io_task_work.func = io_queue_iowq;
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static bool io_resubmit_prep(struct io_kiocb *req)
|
||||
{
|
||||
@ -405,7 +399,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
||||
if (req->flags & REQ_F_REISSUE) {
|
||||
req->flags &= ~REQ_F_REISSUE;
|
||||
if (io_resubmit_prep(req))
|
||||
io_req_task_queue_reissue(req);
|
||||
return -EAGAIN;
|
||||
else
|
||||
io_req_task_queue_fail(req, final_ret);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user