From f33096a3c99c0149be49fe1e107244a7ed860ecb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 6 Jun 2024 10:28:26 -0600 Subject: [PATCH] io_uring: add io_add_aux_cqe() helper This helper will post a CQE, and can be called from task_work where we now that the ctx is already properly locked and that deferred completions will get flushed later on. Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 27 +++++++++++++++++++++++---- io_uring/io_uring.h | 1 + 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 85b2ce54328c..cdeb94d2a26b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -801,19 +801,38 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, return false; } +static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, + u32 cflags) +{ + bool filled; + + filled = io_fill_cqe_aux(ctx, user_data, res, cflags); + if (!filled) + filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); + + return filled; +} + bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { bool filled; io_cq_lock(ctx); - filled = io_fill_cqe_aux(ctx, user_data, res, cflags); - if (!filled) - filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); - + filled = __io_post_aux_cqe(ctx, user_data, res, cflags); io_cq_unlock_post(ctx); return filled; } +/* + * Must be called from inline task_work so we now a flush will happen later, + * and obviously with ctx->uring_lock held (tw always has that). + */ +void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) +{ + __io_post_aux_cqe(ctx, user_data, res, cflags); + ctx->submit_state.cq_flush = true; +} + /* * A helper for multishot requests posting additional CQEs. * Should only be used from a task_work including IO_URING_F_MULTISHOT. diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 7a8641214509..e1ce908f0679 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -65,6 +65,7 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); int io_run_task_work_sig(struct io_ring_ctx *ctx); void io_req_defer_failed(struct io_kiocb *req, s32 res); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); +void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); void __io_commit_cqring_flush(struct io_ring_ctx *ctx);