1

io_uring/net: add IORING_ACCEPT_DONTWAIT flag

This allows the caller to perform a non-blocking attempt, similarly to
how recvmsg has MSG_DONTWAIT. If set, and we get -EAGAIN on a connection
attempt, propagate the result to userspace rather than arm poll and
wait for a retry.

Suggested-by: Norman Maurer <norman_maurer@apple.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-05-07 14:06:15 -06:00
parent 340f634aa4
commit 7dcc758cca
2 changed files with 10 additions and 6 deletions

View File

@ -379,6 +379,7 @@ enum io_uring_op {
* accept flags stored in sqe->ioprio * accept flags stored in sqe->ioprio
*/ */
#define IORING_ACCEPT_MULTISHOT (1U << 0) #define IORING_ACCEPT_MULTISHOT (1U << 0)
#define IORING_ACCEPT_DONTWAIT (1U << 1)
/* /*
* IORING_OP_MSG_RING command types, stored in sqe->addr * IORING_OP_MSG_RING command types, stored in sqe->addr

View File

@ -28,6 +28,7 @@ struct io_accept {
struct sockaddr __user *addr; struct sockaddr __user *addr;
int __user *addr_len; int __user *addr_len;
int flags; int flags;
int iou_flags;
u32 file_slot; u32 file_slot;
unsigned long nofile; unsigned long nofile;
}; };
@ -1489,7 +1490,6 @@ void io_sendrecv_fail(struct io_kiocb *req)
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
unsigned flags;
if (sqe->len || sqe->buf_index) if (sqe->len || sqe->buf_index)
return -EINVAL; return -EINVAL;
@ -1498,15 +1498,15 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
accept->flags = READ_ONCE(sqe->accept_flags); accept->flags = READ_ONCE(sqe->accept_flags);
accept->nofile = rlimit(RLIMIT_NOFILE); accept->nofile = rlimit(RLIMIT_NOFILE);
flags = READ_ONCE(sqe->ioprio); accept->iou_flags = READ_ONCE(sqe->ioprio);
if (flags & ~IORING_ACCEPT_MULTISHOT) if (accept->iou_flags & ~(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT))
return -EINVAL; return -EINVAL;
accept->file_slot = READ_ONCE(sqe->file_index); accept->file_slot = READ_ONCE(sqe->file_index);
if (accept->file_slot) { if (accept->file_slot) {
if (accept->flags & SOCK_CLOEXEC) if (accept->flags & SOCK_CLOEXEC)
return -EINVAL; return -EINVAL;
if (flags & IORING_ACCEPT_MULTISHOT && if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
accept->file_slot != IORING_FILE_INDEX_ALLOC) accept->file_slot != IORING_FILE_INDEX_ALLOC)
return -EINVAL; return -EINVAL;
} }
@ -1514,8 +1514,10 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL; return -EINVAL;
if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
if (flags & IORING_ACCEPT_MULTISHOT) if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
req->flags |= REQ_F_APOLL_MULTISHOT; req->flags |= REQ_F_APOLL_MULTISHOT;
if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
return 0; return 0;
} }
@ -1540,7 +1542,8 @@ retry:
if (!fixed) if (!fixed)
put_unused_fd(fd); put_unused_fd(fd);
ret = PTR_ERR(file); ret = PTR_ERR(file);
if (ret == -EAGAIN && force_nonblock) { if (ret == -EAGAIN && force_nonblock &&
!(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
/* /*
* if it's multishot and polled, we don't need to * if it's multishot and polled, we don't need to
* return EAGAIN to arm the poll infra since it * return EAGAIN to arm the poll infra since it