io_uring: for requests that require async, force it
Some requests require being run async as they do not support non-blocking. Instead of trying to issue these requests, getting -EAGAIN and then queueing them for async issue, rather just force async upfront. Add WARN_ON_ONCE to make sure surprising code paths do not come up, however in those cases the bug would end up being a blocking io_uring_enter(2) which should not be critical. Signed-off-by: Dylan Yudaken <dylany@meta.com> Link: https://lore.kernel.org/r/20230127135227.3646353-3-dylany@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6bb3085556
commit
aebb224fd4
@ -39,6 +39,7 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
ma->addr = READ_ONCE(sqe->addr);
|
||||
ma->len = READ_ONCE(sqe->len);
|
||||
ma->advice = READ_ONCE(sqe->fadvise_advice);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
#else
|
||||
return -EOPNOTSUPP;
|
||||
@ -51,8 +52,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
|
||||
io_req_set_res(req, ret, 0);
|
||||
|
@ -74,6 +74,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -82,8 +83,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
|
||||
ren->newpath, ren->flags);
|
||||
@ -123,6 +123,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return PTR_ERR(un->filename);
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -131,8 +132,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
if (un->flags & AT_REMOVEDIR)
|
||||
ret = do_rmdir(un->dfd, un->filename);
|
||||
@ -170,6 +170,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return PTR_ERR(mkd->filename);
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -178,8 +179,7 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
|
||||
|
||||
@ -220,6 +220,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -228,8 +229,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
|
||||
|
||||
@ -265,6 +265,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -273,8 +274,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
|
||||
lnk->newpath, lnk->flags);
|
||||
|
@ -90,6 +90,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return -EINVAL;
|
||||
|
||||
shutdown->how = READ_ONCE(sqe->len);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -99,8 +100,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct socket *sock;
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
sock = sock_from_file(req->file);
|
||||
if (unlikely(!sock))
|
||||
|
@ -34,6 +34,7 @@ static int __io_splice_prep(struct io_kiocb *req,
|
||||
if (unlikely(sp->flags & ~valid_flags))
|
||||
return -EINVAL;
|
||||
sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -52,8 +53,7 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct file *in;
|
||||
long ret = 0;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
if (sp->flags & SPLICE_F_FD_IN_FIXED)
|
||||
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
|
||||
@ -94,8 +94,7 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct file *in;
|
||||
long ret = 0;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
if (sp->flags & SPLICE_F_FD_IN_FIXED)
|
||||
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
|
||||
|
@ -48,6 +48,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -56,8 +57,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer);
|
||||
io_req_set_res(req, ret, 0);
|
||||
|
@ -32,6 +32,8 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
sync->off = READ_ONCE(sqe->off);
|
||||
sync->len = READ_ONCE(sqe->len);
|
||||
sync->flags = READ_ONCE(sqe->sync_range_flags);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -41,8 +43,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
|
||||
int ret;
|
||||
|
||||
/* sync_file_range always requires a blocking context */
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
|
||||
io_req_set_res(req, ret, 0);
|
||||
@ -62,6 +63,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
|
||||
sync->off = READ_ONCE(sqe->off);
|
||||
sync->len = READ_ONCE(sqe->len);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -72,8 +74,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
|
||||
int ret;
|
||||
|
||||
/* fsync always requires a blocking context */
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
|
||||
sync->flags & IORING_FSYNC_DATASYNC);
|
||||
@ -91,6 +92,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
sync->off = READ_ONCE(sqe->off);
|
||||
sync->len = READ_ONCE(sqe->addr);
|
||||
sync->mode = READ_ONCE(sqe->len);
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -100,8 +102,8 @@ int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
|
||||
int ret;
|
||||
|
||||
/* fallocate always requiring blocking context */
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
|
||||
if (ret >= 0)
|
||||
fsnotify_modify(req->file);
|
||||
|
@ -75,6 +75,7 @@ static int __io_getxattr_prep(struct io_kiocb *req,
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -109,8 +110,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = do_getxattr(mnt_idmap(req->file->f_path.mnt),
|
||||
req->file->f_path.dentry,
|
||||
@ -127,8 +127,7 @@ int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct path path;
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
retry:
|
||||
ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
|
||||
@ -174,6 +173,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
|
||||
}
|
||||
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -222,8 +222,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
ret = __io_setxattr(req, issue_flags, &req->file->f_path);
|
||||
io_xattr_finish(req, ret);
|
||||
@ -237,8 +236,7 @@ int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct path path;
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
retry:
|
||||
ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
|
||||
|
Loading…
Reference in New Issue
Block a user