io_uring/eventfd: move refs to refcount_t
atomic_t for the struct io_ev_fd references and there are no issues with it. While the ref getting and putting for the eventfd code is somewhat performance critical for cases where eventfd signaling is used (news flash, you should not...), it probably doesn't warrant using an atomic_t for this. Let's just move to it to refcount_t to get the added protection of over/underflows. Link: https://lore.kernel.org/lkml/202409082039.hnsaIJ3X-lkp@intel.com/ Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202409082039.hnsaIJ3X-lkp@intel.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c9f9ce65c2
commit
0e0bcf07ec
@ -15,7 +15,7 @@ struct io_ev_fd {
|
|||||||
struct eventfd_ctx *cq_ev_fd;
|
struct eventfd_ctx *cq_ev_fd;
|
||||||
unsigned int eventfd_async: 1;
|
unsigned int eventfd_async: 1;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
atomic_t refs;
|
refcount_t refs;
|
||||||
atomic_t ops;
|
atomic_t ops;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ static void io_eventfd_do_signal(struct rcu_head *rcu)
|
|||||||
|
|
||||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ev_fd->refs))
|
if (refcount_dec_and_test(&ev_fd->refs))
|
||||||
io_eventfd_free(rcu);
|
io_eventfd_free(rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!ev_fd))
|
if (unlikely(!ev_fd))
|
||||||
return;
|
return;
|
||||||
if (!atomic_inc_not_zero(&ev_fd->refs))
|
if (!refcount_inc_not_zero(&ev_fd->refs))
|
||||||
return;
|
return;
|
||||||
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
|
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
|
||||||
goto out;
|
goto out;
|
||||||
@ -77,7 +77,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (atomic_dec_and_test(&ev_fd->refs))
|
if (refcount_dec_and_test(&ev_fd->refs))
|
||||||
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
|
|
||||||
ev_fd->eventfd_async = eventfd_async;
|
ev_fd->eventfd_async = eventfd_async;
|
||||||
ctx->has_evfd = true;
|
ctx->has_evfd = true;
|
||||||
atomic_set(&ev_fd->refs, 1);
|
refcount_set(&ev_fd->refs, 1);
|
||||||
atomic_set(&ev_fd->ops, 0);
|
atomic_set(&ev_fd->ops, 0);
|
||||||
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
|
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
|
||||||
return 0;
|
return 0;
|
||||||
@ -152,7 +152,7 @@ int io_eventfd_unregister(struct io_ring_ctx *ctx)
|
|||||||
if (ev_fd) {
|
if (ev_fd) {
|
||||||
ctx->has_evfd = false;
|
ctx->has_evfd = false;
|
||||||
rcu_assign_pointer(ctx->io_ev_fd, NULL);
|
rcu_assign_pointer(ctx->io_ev_fd, NULL);
|
||||||
if (atomic_dec_and_test(&ev_fd->refs))
|
if (refcount_dec_and_test(&ev_fd->refs))
|
||||||
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user