1

io_uring/net: remove dependency on REQ_F_PARTIAL_IO for sr->done_io

Ensure that prep handlers always initialize sr->done_io before any
potential failure conditions, and with that, we now it's always been
set even for the failure case.

With that, we don't need to use the REQ_F_PARTIAL_IO flag to gate on that.
Additionally, we should not overwrite req->cqe.res unless sr->done_io is
actually positive.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-03-07 12:43:22 -07:00
parent deaef31bc1
commit 9817ad8589

View File

@ -387,6 +387,8 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
if (req->opcode == IORING_OP_SEND) { if (req->opcode == IORING_OP_SEND) {
if (READ_ONCE(sqe->__pad3[0])) if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL; return -EINVAL;
@ -409,7 +411,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat) if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT; sr->msg_flags |= MSG_CMSG_COMPAT;
#endif #endif
sr->done_io = 0;
return 0; return 0;
} }
@ -631,6 +632,8 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
if (unlikely(sqe->file_index || sqe->addr2)) if (unlikely(sqe->file_index || sqe->addr2))
return -EINVAL; return -EINVAL;
@ -667,7 +670,6 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat) if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT; sr->msg_flags |= MSG_CMSG_COMPAT;
#endif #endif
sr->done_io = 0;
sr->nr_multishot_loops = 0; sr->nr_multishot_loops = 0;
return 0; return 0;
} }
@ -1055,6 +1057,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *notif; struct io_kiocb *notif;
zc->done_io = 0;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL; return -EINVAL;
/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
@ -1107,8 +1111,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (zc->msg_flags & MSG_DONTWAIT) if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
zc->done_io = 0;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (req->ctx->compat) if (req->ctx->compat)
zc->msg_flags |= MSG_CMSG_COMPAT; zc->msg_flags |= MSG_CMSG_COMPAT;
@ -1353,7 +1355,7 @@ void io_sendrecv_fail(struct io_kiocb *req)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (req->flags & REQ_F_PARTIAL_IO) if (sr->done_io)
req->cqe.res = sr->done_io; req->cqe.res = sr->done_io;
if ((req->flags & REQ_F_NEED_CLEANUP) && if ((req->flags & REQ_F_NEED_CLEANUP) &&