1

io_uring/kbuf: hold io_buffer_list reference over mmap

If we look up the kbuf, ensure that it doesn't get unregistered until
after we're done with it. Since we're inside mmap, we cannot safely use
the io_uring lock. Rely on the fact that we can lookup the buffer list
under RCU now and grab a reference to it, preventing it from being
unregistered until we're done with it. The lookup returns the
io_buffer_list directly with it referenced.

Cc: stable@vger.kernel.org # v6.4+
Fixes: 5cf4f52e6d ("io_uring: free io_buffer_list entries via RCU")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-04-02 16:16:03 -06:00
parent 6b69c4ab4f
commit 561e4f9451
3 changed files with 34 additions and 12 deletions

View File

@ -3447,14 +3447,15 @@ static void *io_uring_validate_mmap_request(struct file *file,
ptr = ctx->sq_sqes; ptr = ctx->sq_sqes;
break; break;
case IORING_OFF_PBUF_RING: { case IORING_OFF_PBUF_RING: {
struct io_buffer_list *bl;
unsigned int bgid; unsigned int bgid;
bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
rcu_read_lock(); bl = io_pbuf_get_bl(ctx, bgid);
ptr = io_pbuf_get_address(ctx, bgid); if (IS_ERR(bl))
rcu_read_unlock(); return bl;
if (!ptr) ptr = bl->buf_ring;
return ERR_PTR(-EINVAL); io_put_bl(ctx, bl);
break; break;
} }
default: default:

View File

@ -266,7 +266,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
return i; return i;
} }
static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{ {
if (atomic_dec_and_test(&bl->refs)) { if (atomic_dec_and_test(&bl->refs)) {
__io_remove_buffers(ctx, bl, -1U); __io_remove_buffers(ctx, bl, -1U);
@ -719,16 +719,35 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
return 0; return 0;
} }
void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid) struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
unsigned long bgid)
{ {
struct io_buffer_list *bl; struct io_buffer_list *bl;
bool ret;
bl = __io_buffer_get_list(ctx, bgid); /*
* We have to be a bit careful here - we're inside mmap and cannot grab
* the uring_lock. This means the buffer_list could be simultaneously
* going away, if someone is trying to be sneaky. Look it up under rcu
* so we know it's not going away, and attempt to grab a reference to
* it. If the ref is already zero, then fail the mapping. If successful,
* the caller will call io_put_bl() to drop the the reference at at the
* end. This may then safely free the buffer_list (and drop the pages)
* at that point, vm_insert_pages() would've already grabbed the
* necessary vma references.
*/
rcu_read_lock();
bl = xa_load(&ctx->io_bl_xa, bgid);
/* must be a mmap'able buffer ring and have pages */
ret = false;
if (bl && bl->is_mmap)
ret = atomic_inc_not_zero(&bl->refs);
rcu_read_unlock();
if (!bl || !bl->is_mmap) if (ret)
return NULL; return bl;
return bl->buf_ring; return ERR_PTR(-EINVAL);
} }
/* /*

View File

@ -61,7 +61,9 @@ void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid); void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
unsigned long bgid);
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
{ {