fuse: fix wrong ff->iomode state changes from parallel dio write
There is a confusion with fuse_file_uncached_io_{start,end} interface.
These helpers do two things when called from passthrough open()/release():
1. Take/drop negative refcount of fi->iocachectr (inode uncached io mode)
2. State change ff->iomode IOM_NONE <-> IOM_UNCACHED (file uncached open)
The calls from parallel dio write path need to take a reference on
fi->iocachectr, but they should not be changing ff->iomode state, because
in this case, the fi->iocachectr reference does not stick around until file
release().
Factor out helpers fuse_inode_uncached_io_{start,end}, to be used from
parallel dio write path and rename fuse_file_*cached_io_{start,end} helpers
to fuse_file_*cached_io_{open,release} to clarify the difference.
Fixes: 205c1d8026
("fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP")
Signed-off-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
fec50db703
commit
4864a6dd83
@ -1362,7 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
|
||||
bool *exclusive)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
struct fuse_file *ff = iocb->ki_filp->private_data;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
*exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
|
||||
if (*exclusive) {
|
||||
@ -1377,7 +1377,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
|
||||
* have raced, so check it again.
|
||||
*/
|
||||
if (fuse_io_past_eof(iocb, from) ||
|
||||
fuse_file_uncached_io_start(inode, ff, NULL) != 0) {
|
||||
fuse_inode_uncached_io_start(fi, NULL) != 0) {
|
||||
inode_unlock_shared(inode);
|
||||
inode_lock(inode);
|
||||
*exclusive = true;
|
||||
@ -1388,13 +1388,13 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
|
||||
static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
|
||||
{
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
struct fuse_file *ff = iocb->ki_filp->private_data;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
if (exclusive) {
|
||||
inode_unlock(inode);
|
||||
} else {
|
||||
/* Allow opens in caching mode after last parallel dio end */
|
||||
fuse_file_uncached_io_end(inode, ff);
|
||||
fuse_inode_uncached_io_end(fi);
|
||||
inode_unlock_shared(inode);
|
||||
}
|
||||
}
|
||||
@ -2574,8 +2574,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
* First mmap of direct_io file enters caching inode io mode.
|
||||
* Also waits for parallel dio writers to go into serial mode
|
||||
* (exclusive instead of shared lock).
|
||||
* After first mmap, the inode stays in caching io mode until
|
||||
* the direct_io file release.
|
||||
*/
|
||||
rc = fuse_file_cached_io_start(inode, ff);
|
||||
rc = fuse_file_cached_io_open(inode, ff);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
@ -1394,9 +1394,10 @@ int fuse_fileattr_set(struct mnt_idmap *idmap,
|
||||
struct dentry *dentry, struct fileattr *fa);
|
||||
|
||||
/* iomode.c */
|
||||
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff);
|
||||
int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb);
|
||||
void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff);
|
||||
int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff);
|
||||
int fuse_inode_uncached_io_start(struct fuse_inode *fi,
|
||||
struct fuse_backing *fb);
|
||||
void fuse_inode_uncached_io_end(struct fuse_inode *fi);
|
||||
|
||||
int fuse_file_io_open(struct file *file, struct inode *inode);
|
||||
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
|
||||
|
@ -175,6 +175,7 @@ static void fuse_evict_inode(struct inode *inode)
|
||||
}
|
||||
}
|
||||
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
|
||||
WARN_ON(fi->iocachectr != 0);
|
||||
WARN_ON(!list_empty(&fi->write_files));
|
||||
WARN_ON(!list_empty(&fi->queued_writes));
|
||||
}
|
||||
|
@ -21,12 +21,13 @@ static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
|
||||
}
|
||||
|
||||
/*
|
||||
* Start cached io mode.
|
||||
* Called on cached file open() and on first mmap() of direct_io file.
|
||||
* Takes cached_io inode mode reference to be dropped on file release.
|
||||
*
|
||||
* Blocks new parallel dio writes and waits for the in-progress parallel dio
|
||||
* writes to complete.
|
||||
*/
|
||||
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
|
||||
int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
@ -67,10 +68,9 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
|
||||
static void fuse_file_cached_io_release(struct fuse_file *ff,
|
||||
struct fuse_inode *fi)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
WARN_ON(fi->iocachectr <= 0);
|
||||
WARN_ON(ff->iomode != IOM_CACHED);
|
||||
@ -82,9 +82,8 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
|
||||
}
|
||||
|
||||
/* Start strictly uncached io mode where cache access is not allowed */
|
||||
int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb)
|
||||
int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_backing *oldfb;
|
||||
int err = 0;
|
||||
|
||||
@ -99,9 +98,7 @@ int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struc
|
||||
err = -ETXTBSY;
|
||||
goto unlock;
|
||||
}
|
||||
WARN_ON(ff->iomode != IOM_NONE);
|
||||
fi->iocachectr--;
|
||||
ff->iomode = IOM_UNCACHED;
|
||||
|
||||
/* fuse inode holds a single refcount of backing file */
|
||||
if (!oldfb) {
|
||||
@ -115,15 +112,29 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
|
||||
/* Takes uncached_io inode mode reference to be dropped on file release */
|
||||
static int fuse_file_uncached_io_open(struct inode *inode,
|
||||
struct fuse_file *ff,
|
||||
struct fuse_backing *fb)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
int err;
|
||||
|
||||
err = fuse_inode_uncached_io_start(fi, fb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
WARN_ON(ff->iomode != IOM_NONE);
|
||||
ff->iomode = IOM_UNCACHED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fuse_inode_uncached_io_end(struct fuse_inode *fi)
|
||||
{
|
||||
struct fuse_backing *oldfb = NULL;
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
WARN_ON(fi->iocachectr >= 0);
|
||||
WARN_ON(ff->iomode != IOM_UNCACHED);
|
||||
ff->iomode = IOM_NONE;
|
||||
fi->iocachectr++;
|
||||
if (!fi->iocachectr) {
|
||||
wake_up(&fi->direct_io_waitq);
|
||||
@ -134,6 +145,15 @@ void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
|
||||
fuse_backing_put(oldfb);
|
||||
}
|
||||
|
||||
/* Drop uncached_io reference from passthrough open */
|
||||
static void fuse_file_uncached_io_release(struct fuse_file *ff,
|
||||
struct fuse_inode *fi)
|
||||
{
|
||||
WARN_ON(ff->iomode != IOM_UNCACHED);
|
||||
ff->iomode = IOM_NONE;
|
||||
fuse_inode_uncached_io_end(fi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
|
||||
* A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
|
||||
@ -163,7 +183,7 @@ static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
|
||||
return PTR_ERR(fb);
|
||||
|
||||
/* First passthrough file open denies caching inode io mode */
|
||||
err = fuse_file_uncached_io_start(inode, ff, fb);
|
||||
err = fuse_file_uncached_io_open(inode, ff, fb);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
@ -216,7 +236,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
|
||||
if (ff->open_flags & FOPEN_PASSTHROUGH)
|
||||
err = fuse_file_passthrough_open(inode, file);
|
||||
else
|
||||
err = fuse_file_cached_io_start(inode, ff);
|
||||
err = fuse_file_cached_io_open(inode, ff);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
@ -236,8 +256,10 @@ fail:
|
||||
/* No more pending io and no new io possible to inode via open/mmapped file */
|
||||
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
/*
|
||||
* Last parallel dio close allows caching inode io mode.
|
||||
* Last passthrough file close allows caching inode io mode.
|
||||
* Last caching file close exits caching inode io mode.
|
||||
*/
|
||||
switch (ff->iomode) {
|
||||
@ -245,10 +267,10 @@ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
|
||||
/* Nothing to do */
|
||||
break;
|
||||
case IOM_UNCACHED:
|
||||
fuse_file_uncached_io_end(inode, ff);
|
||||
fuse_file_uncached_io_release(ff, fi);
|
||||
break;
|
||||
case IOM_CACHED:
|
||||
fuse_file_cached_io_end(inode, ff);
|
||||
fuse_file_cached_io_release(ff, fi);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user