io_uring-6.8-2024-02-01
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmW8PwYQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpl1iD/9Zgk+2vEAp2YcH9m8VzFoiR6xp81MbmH1k HsuuO/Yy3N7jiAiv+pY7Ov2wYNiSsTQHsNpUIyA5KNPvgArB26aSS5a6d02Q3eJD aay2kcySCi162MfJ+SLKXWmhBYJhwJ3QsvGHmEd4MjPuutJcaUvS+dfIp91Fuvzu JRfKUdMqrd9UAU3UMUdssmFI5VMuf+Y7hg0V2Fm0jRxbkreGuAJkVw7Kc7E0f0ad 3S87++AZSG/IwViAJGd8Tsfv3ump7+b0z+gnZU8wwiEfbUS+NvjNZ4ybetGcMqfg AZR617YGjF/7/QzOOGYxJ9fWUCRIx8UUP27baYjsdAVayoPV77xVLZ7IpfLGFqqS BUud1GKJ2d28G9OzKEuwMGHEh5wugQEzXNdUo+9ys9bAd5KDiWuH3R6S7MabLTsY J4MYiumY/xRCKq1lWB0wd1a4UN2IiYMwvvXb2GF33GPeH302qQw4QhHrnwX7b8Tf di0XAr7C0LJ9cFT5GHOzCbYUy8Z5ehhOv+dhhHWDSB5KoiL/5f3SuhfEjdKnG51/ w4tTS3zEpIhcfYFC2PcUFXefmBJuSSbU7wkvbzHIGWoB0v2Ibe0RAcrvK99EsvJZ KOGHtuxq/4LcPieQvNfpVnwMSC9B+ItryTVoG4FmUR+ofxXFbKSw5ihZEimdqus3 kCTtXVmPQQ== =QYEp -----END PGP SIGNATURE----- Merge tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux Pull io_uring fixes from Jens Axboe: - Fix for missing retry for read multishot. If we trigger the execution of it and there's more than one buffer to be read, then we don't always read more than the first one. As it's edge triggered, this can lead to stalls. - Limit inline receive multishot retries for fairness reasons. If we have a very bursty socket receiving data, we still need to ensure we process other requests as well. This is really two minor cleanups, then adding a way for poll reissue to trigger a requeue, and then finally having multishot receive utilize that. - Fix for a weird corner case for non-multishot receive with MSG_WAITALL, using provided buffers, and setting the length to zero (to let the buffer dictate the receive size). * tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux: io_uring/net: fix sr->len for IORING_OP_RECV with MSG_WAITALL and buffers io_uring/net: limit inline multishot retries io_uring/poll: add requeue return code from poll multishot handling io_uring/net: un-indent mshot retry path in io_recv_finish() io_uring/poll: move poll execution helpers higher up io_uring/rw: ensure poll based multishot read retries appropriately
This commit is contained in:
commit
717ca0b8e5
@ -15,11 +15,17 @@
|
||||
#include <trace/events/io_uring.h>
|
||||
#endif
|
||||
|
||||
|
||||
enum {
|
||||
IOU_OK = 0,
|
||||
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
|
||||
|
||||
/*
|
||||
* Requeue the task_work to restart operations on this request. The
|
||||
* actual value isn't important, should just be not an otherwise
|
||||
* valid error code, yet less than -MAX_ERRNO and valid internally.
|
||||
*/
|
||||
IOU_REQUEUE = -3072,
|
||||
|
||||
/*
|
||||
* Intended only when both IO_URING_F_MULTISHOT is passed
|
||||
* to indicate to the poll runner that multishot should be
|
||||
|
@ -60,6 +60,7 @@ struct io_sr_msg {
|
||||
unsigned len;
|
||||
unsigned done_io;
|
||||
unsigned msg_flags;
|
||||
unsigned nr_multishot_loops;
|
||||
u16 flags;
|
||||
/* initialised and used only by !msg send variants */
|
||||
u16 addr_len;
|
||||
@ -70,6 +71,13 @@ struct io_sr_msg {
|
||||
struct io_kiocb *notif;
|
||||
};
|
||||
|
||||
/*
|
||||
* Number of times we'll try and do receives if there's more data. If we
|
||||
* exceed this limit, then add us to the back of the queue and retry from
|
||||
* there. This helps fairness between flooding clients.
|
||||
*/
|
||||
#define MULTISHOT_MAX_RETRY 32
|
||||
|
||||
static inline bool io_check_multishot(struct io_kiocb *req,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
sr->msg_flags |= MSG_CMSG_COMPAT;
|
||||
#endif
|
||||
sr->done_io = 0;
|
||||
sr->nr_multishot_loops = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -645,23 +654,35 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!mshot_finished) {
|
||||
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||
*ret, cflags | IORING_CQE_F_MORE)) {
|
||||
io_recv_prep_retry(req);
|
||||
/* Known not-empty or unknown state, retry */
|
||||
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
|
||||
msg->msg_inq == -1)
|
||||
return false;
|
||||
if (issue_flags & IO_URING_F_MULTISHOT)
|
||||
*ret = IOU_ISSUE_SKIP_COMPLETE;
|
||||
else
|
||||
*ret = -EAGAIN;
|
||||
return true;
|
||||
}
|
||||
/* Otherwise stop multishot but use the current result. */
|
||||
}
|
||||
if (mshot_finished)
|
||||
goto finish;
|
||||
|
||||
/*
|
||||
* Fill CQE for this receive and see if we should keep trying to
|
||||
* receive from this socket.
|
||||
*/
|
||||
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||
*ret, cflags | IORING_CQE_F_MORE)) {
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
|
||||
|
||||
io_recv_prep_retry(req);
|
||||
/* Known not-empty or unknown state, retry */
|
||||
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
|
||||
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
|
||||
return false;
|
||||
/* mshot retries exceeded, force a requeue */
|
||||
sr->nr_multishot_loops = 0;
|
||||
mshot_retry_ret = IOU_REQUEUE;
|
||||
}
|
||||
if (issue_flags & IO_URING_F_MULTISHOT)
|
||||
*ret = mshot_retry_ret;
|
||||
else
|
||||
*ret = -EAGAIN;
|
||||
return true;
|
||||
}
|
||||
/* Otherwise stop multishot but use the current result. */
|
||||
finish:
|
||||
io_req_set_res(req, *ret, cflags);
|
||||
|
||||
if (issue_flags & IO_URING_F_MULTISHOT)
|
||||
@ -902,6 +923,7 @@ retry_multishot:
|
||||
if (!buf)
|
||||
return -ENOBUFS;
|
||||
sr->buf = buf;
|
||||
sr->len = len;
|
||||
}
|
||||
|
||||
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
|
||||
|
@ -226,8 +226,29 @@ enum {
|
||||
IOU_POLL_NO_ACTION = 1,
|
||||
IOU_POLL_REMOVE_POLL_USE_RES = 2,
|
||||
IOU_POLL_REISSUE = 3,
|
||||
IOU_POLL_REQUEUE = 4,
|
||||
};
|
||||
|
||||
static void __io_poll_execute(struct io_kiocb *req, int mask)
|
||||
{
|
||||
unsigned flags = 0;
|
||||
|
||||
io_req_set_res(req, mask, 0);
|
||||
req->io_task_work.func = io_poll_task_func;
|
||||
|
||||
trace_io_uring_task_add(req, mask);
|
||||
|
||||
if (!(req->flags & REQ_F_POLL_NO_LAZY))
|
||||
flags = IOU_F_TWQ_LAZY_WAKE;
|
||||
__io_req_task_work_add(req, flags);
|
||||
}
|
||||
|
||||
static inline void io_poll_execute(struct io_kiocb *req, int res)
|
||||
{
|
||||
if (io_poll_get_ownership(req))
|
||||
__io_poll_execute(req, res);
|
||||
}
|
||||
|
||||
/*
|
||||
* All poll tw should go through this. Checks for poll events, manages
|
||||
* references, does rewait, etc.
|
||||
@ -309,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
int ret = io_poll_issue(req, ts);
|
||||
if (ret == IOU_STOP_MULTISHOT)
|
||||
return IOU_POLL_REMOVE_POLL_USE_RES;
|
||||
else if (ret == IOU_REQUEUE)
|
||||
return IOU_POLL_REQUEUE;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -331,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
int ret;
|
||||
|
||||
ret = io_poll_check_events(req, ts);
|
||||
if (ret == IOU_POLL_NO_ACTION)
|
||||
if (ret == IOU_POLL_NO_ACTION) {
|
||||
return;
|
||||
} else if (ret == IOU_POLL_REQUEUE) {
|
||||
__io_poll_execute(req, 0);
|
||||
return;
|
||||
}
|
||||
io_poll_remove_entries(req);
|
||||
io_poll_tw_hash_eject(req, ts);
|
||||
|
||||
@ -364,26 +391,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
}
|
||||
}
|
||||
|
||||
static void __io_poll_execute(struct io_kiocb *req, int mask)
|
||||
{
|
||||
unsigned flags = 0;
|
||||
|
||||
io_req_set_res(req, mask, 0);
|
||||
req->io_task_work.func = io_poll_task_func;
|
||||
|
||||
trace_io_uring_task_add(req, mask);
|
||||
|
||||
if (!(req->flags & REQ_F_POLL_NO_LAZY))
|
||||
flags = IOU_F_TWQ_LAZY_WAKE;
|
||||
__io_req_task_work_add(req, flags);
|
||||
}
|
||||
|
||||
static inline void io_poll_execute(struct io_kiocb *req, int res)
|
||||
{
|
||||
if (io_poll_get_ownership(req))
|
||||
__io_poll_execute(req, res);
|
||||
}
|
||||
|
||||
static void io_poll_cancel_req(struct io_kiocb *req)
|
||||
{
|
||||
io_poll_mark_cancelled(req);
|
||||
|
@ -24,6 +24,15 @@ struct async_poll {
|
||||
struct io_poll *double_poll;
|
||||
};
|
||||
|
||||
/*
|
||||
* Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
|
||||
* potentially other cases where we already "own" this poll request.
|
||||
*/
|
||||
static inline void io_poll_multishot_retry(struct io_kiocb *req)
|
||||
{
|
||||
atomic_inc(&req->poll_refs);
|
||||
}
|
||||
|
||||
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "opdef.h"
|
||||
#include "kbuf.h"
|
||||
#include "rsrc.h"
|
||||
#include "poll.h"
|
||||
#include "rw.h"
|
||||
|
||||
struct io_rw {
|
||||
@ -962,8 +963,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (io_fill_cqe_req_aux(req,
|
||||
issue_flags & IO_URING_F_COMPLETE_DEFER,
|
||||
ret, cflags | IORING_CQE_F_MORE)) {
|
||||
if (issue_flags & IO_URING_F_MULTISHOT)
|
||||
if (issue_flags & IO_URING_F_MULTISHOT) {
|
||||
/*
|
||||
* Force retry, as we might have more data to
|
||||
* be read and otherwise it won't get retried
|
||||
* until (if ever) another poll is triggered.
|
||||
*/
|
||||
io_poll_multishot_retry(req);
|
||||
return IOU_ISSUE_SKIP_COMPLETE;
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user