io_uring: drop ->prep_async()
It's now unused, drop the code related to it. This includes the io_issue_defs->manual alloc field. While in there, and since ->async_size is now being used a bit more frequently and in the issue path, move it to io_issue_defs[]. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5eff57fa9f
commit
e10677a8f6
@ -1712,8 +1712,10 @@ io_req_flags_t io_file_get_flags(struct file *file)
|
||||
|
||||
bool io_alloc_async_data(struct io_kiocb *req)
|
||||
{
|
||||
WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size);
|
||||
req->async_data = kmalloc(io_cold_defs[req->opcode].async_size, GFP_KERNEL);
|
||||
const struct io_issue_def *def = &io_issue_defs[req->opcode];
|
||||
|
||||
WARN_ON_ONCE(!def->async_size);
|
||||
req->async_data = kmalloc(def->async_size, GFP_KERNEL);
|
||||
if (req->async_data) {
|
||||
req->flags |= REQ_F_ASYNC_DATA;
|
||||
return false;
|
||||
@ -1721,25 +1723,6 @@ bool io_alloc_async_data(struct io_kiocb *req)
|
||||
return true;
|
||||
}
|
||||
|
||||
int io_req_prep_async(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_cold_def *cdef = &io_cold_defs[req->opcode];
|
||||
const struct io_issue_def *def = &io_issue_defs[req->opcode];
|
||||
|
||||
/* assign early for deferred execution for non-fixed file */
|
||||
if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
|
||||
req->file = io_file_get_normal(req, req->cqe.fd);
|
||||
if (!cdef->prep_async)
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(req_has_async_data(req)))
|
||||
return -EFAULT;
|
||||
if (!def->manual_alloc) {
|
||||
if (io_alloc_async_data(req))
|
||||
return -EAGAIN;
|
||||
}
|
||||
return cdef->prep_async(req);
|
||||
}
|
||||
|
||||
static u32 io_get_sequence(struct io_kiocb *req)
|
||||
{
|
||||
u32 seq = req->ctx->cached_sq_head;
|
||||
@ -2057,13 +2040,6 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
|
||||
req->flags |= REQ_F_LINK;
|
||||
io_req_defer_failed(req, req->cqe.res);
|
||||
} else {
|
||||
int ret = io_req_prep_async(req);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
io_req_defer_failed(req, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(req->ctx->drain_active))
|
||||
io_drain_req(req);
|
||||
else
|
||||
@ -2273,10 +2249,6 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
* conditions are true (normal request), then just queue it.
|
||||
*/
|
||||
if (unlikely(link->head)) {
|
||||
ret = io_req_prep_async(req);
|
||||
if (unlikely(ret))
|
||||
return io_submit_fail_init(sqe, req, ret);
|
||||
|
||||
trace_io_uring_link(req, link->head);
|
||||
link->last->link = req;
|
||||
link->last = req;
|
||||
|
@ -95,7 +95,6 @@ int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
|
||||
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
|
||||
void __io_submit_flush_completions(struct io_ring_ctx *ctx);
|
||||
int io_req_prep_async(struct io_kiocb *req);
|
||||
|
||||
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
|
||||
void io_wq_submit_work(struct io_wq_work *work);
|
||||
|
@ -67,6 +67,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.vectored = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_readv,
|
||||
.issue = io_read,
|
||||
},
|
||||
@ -81,6 +82,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.vectored = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_writev,
|
||||
.issue = io_write,
|
||||
},
|
||||
@ -99,6 +101,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_read_fixed,
|
||||
.issue = io_read,
|
||||
},
|
||||
@ -112,6 +115,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_write_fixed,
|
||||
.issue = io_write,
|
||||
},
|
||||
@ -138,8 +142,8 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.unbound_nonreg_file = 1,
|
||||
.pollout = 1,
|
||||
.ioprio = 1,
|
||||
.manual_alloc = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_sendmsg_prep,
|
||||
.issue = io_sendmsg,
|
||||
#else
|
||||
@ -152,8 +156,8 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.pollin = 1,
|
||||
.buffer_select = 1,
|
||||
.ioprio = 1,
|
||||
.manual_alloc = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_recvmsg_prep,
|
||||
.issue = io_recvmsg,
|
||||
#else
|
||||
@ -162,6 +166,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
},
|
||||
[IORING_OP_TIMEOUT] = {
|
||||
.audit_skip = 1,
|
||||
.async_size = sizeof(struct io_timeout_data),
|
||||
.prep = io_timeout_prep,
|
||||
.issue = io_timeout,
|
||||
},
|
||||
@ -191,6 +196,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
},
|
||||
[IORING_OP_LINK_TIMEOUT] = {
|
||||
.audit_skip = 1,
|
||||
.async_size = sizeof(struct io_timeout_data),
|
||||
.prep = io_link_timeout_prep,
|
||||
.issue = io_no_issue,
|
||||
},
|
||||
@ -199,6 +205,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.unbound_nonreg_file = 1,
|
||||
.pollout = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_connect_prep,
|
||||
.issue = io_connect,
|
||||
#else
|
||||
@ -239,6 +246,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_read,
|
||||
.issue = io_read,
|
||||
},
|
||||
@ -252,6 +260,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.ioprio = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_prep_write,
|
||||
.issue = io_write,
|
||||
},
|
||||
@ -272,8 +281,8 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.pollout = 1,
|
||||
.audit_skip = 1,
|
||||
.ioprio = 1,
|
||||
.manual_alloc = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_sendmsg_prep,
|
||||
.issue = io_send,
|
||||
#else
|
||||
@ -288,6 +297,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.audit_skip = 1,
|
||||
.ioprio = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_recvmsg_prep,
|
||||
.issue = io_recv,
|
||||
#else
|
||||
@ -403,6 +413,7 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.plug = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.async_size = 2 * sizeof(struct io_uring_sqe),
|
||||
.prep = io_uring_cmd_prep,
|
||||
.issue = io_uring_cmd,
|
||||
},
|
||||
@ -412,8 +423,8 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.pollout = 1,
|
||||
.audit_skip = 1,
|
||||
.ioprio = 1,
|
||||
.manual_alloc = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_send_zc_prep,
|
||||
.issue = io_send_zc,
|
||||
#else
|
||||
@ -425,8 +436,8 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.unbound_nonreg_file = 1,
|
||||
.pollout = 1,
|
||||
.ioprio = 1,
|
||||
.manual_alloc = 1,
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.prep = io_send_zc_prep,
|
||||
.issue = io_sendmsg_zc,
|
||||
#else
|
||||
@ -439,10 +450,12 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.pollin = 1,
|
||||
.buffer_select = 1,
|
||||
.audit_skip = 1,
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.prep = io_read_mshot_prep,
|
||||
.issue = io_read_mshot,
|
||||
},
|
||||
[IORING_OP_WAITID] = {
|
||||
.async_size = sizeof(struct io_waitid_async),
|
||||
.prep = io_waitid_prep,
|
||||
.issue = io_waitid,
|
||||
},
|
||||
@ -488,13 +501,11 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
.name = "NOP",
|
||||
},
|
||||
[IORING_OP_READV] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "READV",
|
||||
.cleanup = io_readv_writev_cleanup,
|
||||
.fail = io_rw_fail,
|
||||
},
|
||||
[IORING_OP_WRITEV] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "WRITEV",
|
||||
.cleanup = io_readv_writev_cleanup,
|
||||
.fail = io_rw_fail,
|
||||
@ -503,12 +514,10 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
.name = "FSYNC",
|
||||
},
|
||||
[IORING_OP_READ_FIXED] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "READ_FIXED",
|
||||
.fail = io_rw_fail,
|
||||
},
|
||||
[IORING_OP_WRITE_FIXED] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "WRITE_FIXED",
|
||||
.fail = io_rw_fail,
|
||||
},
|
||||
@ -524,7 +533,6 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_SENDMSG] = {
|
||||
.name = "SENDMSG",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
@ -532,13 +540,11 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_RECVMSG] = {
|
||||
.name = "RECVMSG",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
},
|
||||
[IORING_OP_TIMEOUT] = {
|
||||
.async_size = sizeof(struct io_timeout_data),
|
||||
.name = "TIMEOUT",
|
||||
},
|
||||
[IORING_OP_TIMEOUT_REMOVE] = {
|
||||
@ -551,14 +557,10 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
.name = "ASYNC_CANCEL",
|
||||
},
|
||||
[IORING_OP_LINK_TIMEOUT] = {
|
||||
.async_size = sizeof(struct io_timeout_data),
|
||||
.name = "LINK_TIMEOUT",
|
||||
},
|
||||
[IORING_OP_CONNECT] = {
|
||||
.name = "CONNECT",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
#endif
|
||||
},
|
||||
[IORING_OP_FALLOCATE] = {
|
||||
.name = "FALLOCATE",
|
||||
@ -578,12 +580,10 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
.cleanup = io_statx_cleanup,
|
||||
},
|
||||
[IORING_OP_READ] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "READ",
|
||||
.fail = io_rw_fail,
|
||||
},
|
||||
[IORING_OP_WRITE] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "WRITE",
|
||||
.fail = io_rw_fail,
|
||||
},
|
||||
@ -596,7 +596,6 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_SEND] = {
|
||||
.name = "SEND",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
@ -604,7 +603,6 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_RECV] = {
|
||||
.name = "RECV",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_sendmsg_recvmsg_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
@ -676,12 +674,10 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
},
|
||||
[IORING_OP_URING_CMD] = {
|
||||
.name = "URING_CMD",
|
||||
.async_size = 2 * sizeof(struct io_uring_sqe),
|
||||
},
|
||||
[IORING_OP_SEND_ZC] = {
|
||||
.name = "SEND_ZC",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_send_zc_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
@ -689,18 +685,15 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_SENDMSG_ZC] = {
|
||||
.name = "SENDMSG_ZC",
|
||||
#if defined(CONFIG_NET)
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
.cleanup = io_send_zc_cleanup,
|
||||
.fail = io_sendrecv_fail,
|
||||
#endif
|
||||
},
|
||||
[IORING_OP_READ_MULTISHOT] = {
|
||||
.async_size = sizeof(struct io_async_rw),
|
||||
.name = "READ_MULTISHOT",
|
||||
},
|
||||
[IORING_OP_WAITID] = {
|
||||
.name = "WAITID",
|
||||
.async_size = sizeof(struct io_waitid_async),
|
||||
},
|
||||
[IORING_OP_FUTEX_WAIT] = {
|
||||
.name = "FUTEX_WAIT",
|
||||
|
@ -27,22 +27,19 @@ struct io_issue_def {
|
||||
unsigned iopoll : 1;
|
||||
/* have to be put into the iopoll list */
|
||||
unsigned iopoll_queue : 1;
|
||||
/* opcode specific path will handle ->async_data allocation if needed */
|
||||
unsigned manual_alloc : 1;
|
||||
/* vectored opcode, set if 1) vectored, and 2) handler needs to know */
|
||||
unsigned vectored : 1;
|
||||
|
||||
/* size of async data needed, if any */
|
||||
unsigned short async_size;
|
||||
|
||||
int (*issue)(struct io_kiocb *, unsigned int);
|
||||
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
|
||||
};
|
||||
|
||||
struct io_cold_def {
|
||||
/* size of async data needed, if any */
|
||||
unsigned short async_size;
|
||||
|
||||
const char *name;
|
||||
|
||||
int (*prep_async)(struct io_kiocb *);
|
||||
void (*cleanup)(struct io_kiocb *);
|
||||
void (*fail)(struct io_kiocb *);
|
||||
};
|
||||
|
@ -9,7 +9,6 @@ struct uring_cache {
|
||||
|
||||
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_uring_cmd_prep_async(struct io_kiocb *req);
|
||||
void io_uring_cache_free(struct io_cache_entry *entry);
|
||||
|
||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
|
Loading…
Reference in New Issue
Block a user