io_uring/io-wq: make io_wq_work flags atomic
The work flags can be set/accessed from different tasks, both the originator of the request, and the io-wq workers. While modifications aren't concurrent, it still makes KMSAN unhappy. There's no real downside to just making the flag reading/manipulation use proper atomics here. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f2a93294ed
commit
3474d1b93f
@ -50,7 +50,7 @@ struct io_wq_work_list {
|
||||
|
||||
struct io_wq_work {
|
||||
struct io_wq_work_node list;
|
||||
unsigned flags;
|
||||
atomic_t flags;
|
||||
/* place it here instead of io_kiocb as it fills padding and saves 4B */
|
||||
int cancel_seq;
|
||||
};
|
||||
|
@ -159,7 +159,7 @@ static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
|
||||
static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
|
||||
struct io_wq_work *work)
|
||||
{
|
||||
return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
|
||||
return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND));
|
||||
}
|
||||
|
||||
static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
|
||||
@ -451,7 +451,7 @@ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
|
||||
|
||||
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
|
||||
{
|
||||
return work->flags >> IO_WQ_HASH_SHIFT;
|
||||
return atomic_read(&work->flags) >> IO_WQ_HASH_SHIFT;
|
||||
}
|
||||
|
||||
static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
|
||||
@ -592,8 +592,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
|
||||
|
||||
next_hashed = wq_next_work(work);
|
||||
|
||||
if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
if (do_kill &&
|
||||
(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND))
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
wq->do_work(work);
|
||||
io_assign_current_work(worker, NULL);
|
||||
|
||||
@ -891,7 +892,7 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
|
||||
static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
|
||||
{
|
||||
do {
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
wq->do_work(work);
|
||||
work = wq->free_work(work);
|
||||
} while (work);
|
||||
@ -926,7 +927,7 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
|
||||
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
|
||||
{
|
||||
struct io_wq_acct *acct = io_work_get_acct(wq, work);
|
||||
unsigned long work_flags = work->flags;
|
||||
unsigned int work_flags = atomic_read(&work->flags);
|
||||
struct io_cb_cancel_data match = {
|
||||
.fn = io_wq_work_match_item,
|
||||
.data = work,
|
||||
@ -939,7 +940,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
|
||||
* been marked as one that should not get executed, cancel it here.
|
||||
*/
|
||||
if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
|
||||
(work->flags & IO_WQ_WORK_CANCEL)) {
|
||||
(work_flags & IO_WQ_WORK_CANCEL)) {
|
||||
io_run_cancel(work, wq);
|
||||
return;
|
||||
}
|
||||
@ -982,7 +983,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
|
||||
unsigned int bit;
|
||||
|
||||
bit = hash_ptr(val, IO_WQ_HASH_ORDER);
|
||||
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
|
||||
atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags);
|
||||
}
|
||||
|
||||
static bool __io_wq_worker_cancel(struct io_worker *worker,
|
||||
@ -990,7 +991,7 @@ static bool __io_wq_worker_cancel(struct io_worker *worker,
|
||||
struct io_wq_work *work)
|
||||
{
|
||||
if (work && match->fn(work, match->data)) {
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
__set_notify_signal(worker->task);
|
||||
return true;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ bool io_wq_worker_stopped(void);
|
||||
|
||||
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
||||
{
|
||||
return work->flags & IO_WQ_WORK_HASHED;
|
||||
return atomic_read(&work->flags) & IO_WQ_WORK_HASHED;
|
||||
}
|
||||
|
||||
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
||||
|
@ -462,9 +462,9 @@ static void io_prep_async_work(struct io_kiocb *req)
|
||||
}
|
||||
|
||||
req->work.list.next = NULL;
|
||||
req->work.flags = 0;
|
||||
atomic_set(&req->work.flags, 0);
|
||||
if (req->flags & REQ_F_FORCE_ASYNC)
|
||||
req->work.flags |= IO_WQ_WORK_CONCURRENT;
|
||||
atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
|
||||
|
||||
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
|
||||
req->flags |= io_file_get_flags(req->file);
|
||||
@ -480,7 +480,7 @@ static void io_prep_async_work(struct io_kiocb *req)
|
||||
io_wq_hash_work(&req->work, file_inode(req->file));
|
||||
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
|
||||
if (def->unbound_nonreg_file)
|
||||
req->work.flags |= IO_WQ_WORK_UNBOUND;
|
||||
atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -520,7 +520,7 @@ static void io_queue_iowq(struct io_kiocb *req)
|
||||
* worker for it).
|
||||
*/
|
||||
if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
|
||||
req->work.flags |= IO_WQ_WORK_CANCEL;
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
|
||||
|
||||
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
|
||||
io_wq_enqueue(tctx->io_wq, &req->work);
|
||||
@ -1736,14 +1736,14 @@ void io_wq_submit_work(struct io_wq_work *work)
|
||||
io_arm_ltimeout(req);
|
||||
|
||||
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
|
||||
if (work->flags & IO_WQ_WORK_CANCEL) {
|
||||
if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
|
||||
fail:
|
||||
io_req_task_queue_fail(req, err);
|
||||
return;
|
||||
}
|
||||
if (!io_assign_file(req, def, issue_flags)) {
|
||||
err = -EBADF;
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user