1

btrfs: fix corruption after buffer fault in during direct IO append write

During an append (O_APPEND write flag) direct IO write if the input buffer
was not previously faulted in, we can corrupt the file in a way that the
final size is unexpected and it includes an unexpected hole.

The problem happens like this:

1) We have an empty file, with size 0, for example;

2) We do an O_APPEND direct IO with a length of 4096 bytes and the input
   buffer is not currently faulted in;

3) We enter btrfs_direct_write(), lock the inode and call
   generic_write_checks(), which calls generic_write_checks_count(), and
   that function sets the iocb position to 0 with the following code:

	if (iocb->ki_flags & IOCB_APPEND)
		iocb->ki_pos = i_size_read(inode);

4) We call btrfs_dio_write() and enter into iomap, which will end up
   calling btrfs_dio_iomap_begin() and that calls
   btrfs_get_blocks_direct_write(), where we update the i_size of the
   inode to 4096 bytes;

5) After btrfs_dio_iomap_begin() returns, iomap will attempt to access
   the page of the write input buffer (at iomap_dio_bio_iter(), with a
   call to bio_iov_iter_get_pages()) and fail with -EFAULT, which gets
   returned to btrfs at btrfs_direct_write() via btrfs_dio_write();

6) At btrfs_direct_write() we get the -EFAULT error, unlock the inode,
   fault in the write buffer and then goto to the label 'relock';

7) We lock again the inode, do all the necessary checks again and call
   again generic_write_checks(), which calls generic_write_checks_count()
   again, and there we set the iocb's position to 4K, which is the current
   i_size of the inode, with the following code pointed above:

        if (iocb->ki_flags & IOCB_APPEND)
                iocb->ki_pos = i_size_read(inode);

8) Then we go again to btrfs_dio_write() and enter iomap and the write
   succeeds, but it wrote to the file range [4K, 8K), leaving a hole in
   the [0, 4K) range and an i_size of 8K, which goes against the
   expectations of having the data written to the range [0, 4K) and get an
   i_size of 4K.

Fix this by not unlocking the inode before faulting in the input buffer,
in case we get -EFAULT or an incomplete write, and not jumping to the
'relock' label after faulting in the buffer - instead jump to a location
immediately before calling iomap, skipping all the write checks and
relocking. This solves this problem and it's fine even in case the input
buffer is memory mapped to the same file range, since only holding the
range locked in the inode's io tree can cause a deadlock, it's safe to
keep the inode lock (VFS lock), as was fixed and described in commit
51bd9563b6 ("btrfs: fix deadlock due to page faults during direct IO
reads and writes").

A sample reproducer provided by a reporter is the following:

   $ cat test.c
   #ifndef _GNU_SOURCE
   #define _GNU_SOURCE
   #endif

   #include <fcntl.h>
   #include <stdio.h>
   #include <sys/mman.h>
   #include <sys/stat.h>
   #include <unistd.h>

   int main(int argc, char *argv[])
   {
       if (argc < 2) {
           fprintf(stderr, "Usage: %s <test file>\n", argv[0]);
           return 1;
       }

       int fd = open(argv[1], O_WRONLY | O_CREAT | O_TRUNC | O_DIRECT |
                     O_APPEND, 0644);
       if (fd < 0) {
           perror("creating test file");
           return 1;
       }

       char *buf = mmap(NULL, 4096, PROT_READ,
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
       ssize_t ret = write(fd, buf, 4096);
       if (ret < 0) {
           perror("pwritev2");
           return 1;
       }

       struct stat stbuf;
       ret = fstat(fd, &stbuf);
       if (ret < 0) {
           perror("stat");
           return 1;
       }

       printf("size: %llu\n", (unsigned long long)stbuf.st_size);
       return stbuf.st_size == 4096 ? 0 : 1;
   }

A test case for fstests will be sent soon.

Reported-by: Hanna Czenczek <hreitz@redhat.com>
Link: https://lore.kernel.org/linux-btrfs/0b841d46-12fe-4e64-9abb-871d8d0de271@redhat.com/
Fixes: 8184620ae2 ("btrfs: fix lost file sync on direct IO write with nowait and dsync iocb")
CC: stable@vger.kernel.org # 6.1+
Tested-by: Hanna Czenczek <hreitz@redhat.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana 2024-07-26 11:12:52 +01:00 committed by David Sterba
parent 8cd44dd1d1
commit 939b656bc8
3 changed files with 43 additions and 13 deletions

View File

@ -459,6 +459,7 @@ struct btrfs_file_private {
void *filldir_buf; void *filldir_buf;
u64 last_index; u64 last_index;
struct extent_state *llseek_cached_state; struct extent_state *llseek_cached_state;
bool fsync_skip_inode_lock;
}; };
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)

View File

@ -856,21 +856,37 @@ relock:
* So here we disable page faults in the iov_iter and then retry if we * So here we disable page faults in the iov_iter and then retry if we
* got -EFAULT, faulting in the pages before the retry. * got -EFAULT, faulting in the pages before the retry.
*/ */
again:
from->nofault = true; from->nofault = true;
dio = btrfs_dio_write(iocb, from, written); dio = btrfs_dio_write(iocb, from, written);
from->nofault = false; from->nofault = false;
/* if (IS_ERR_OR_NULL(dio)) {
* iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
* iocb, and that needs to lock the inode. So unlock it before calling
* iomap_dio_complete() to avoid a deadlock.
*/
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
if (IS_ERR_OR_NULL(dio))
ret = PTR_ERR_OR_ZERO(dio); ret = PTR_ERR_OR_ZERO(dio);
else } else {
struct btrfs_file_private stack_private = { 0 };
struct btrfs_file_private *private;
const bool have_private = (file->private_data != NULL);
if (!have_private)
file->private_data = &stack_private;
/*
* If we have a synchronous write, we must make sure the fsync
* triggered by the iomap_dio_complete() call below doesn't
* deadlock on the inode lock - we are already holding it and we
* can't call it after unlocking because we may need to complete
* partial writes due to the input buffer (or parts of it) not
* being already faulted in.
*/
private = file->private_data;
private->fsync_skip_inode_lock = true;
ret = iomap_dio_complete(dio); ret = iomap_dio_complete(dio);
private->fsync_skip_inode_lock = false;
if (!have_private)
file->private_data = NULL;
}
/* No increment (+=) because iomap returns a cumulative value. */ /* No increment (+=) because iomap returns a cumulative value. */
if (ret > 0) if (ret > 0)
@ -897,10 +913,12 @@ relock:
} else { } else {
fault_in_iov_iter_readable(from, left); fault_in_iov_iter_readable(from, left);
prev_left = left; prev_left = left;
goto relock; goto again;
} }
} }
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
/* /*
* If 'ret' is -ENOTBLK or we have not written all data, then it means * If 'ret' is -ENOTBLK or we have not written all data, then it means
* we must fallback to buffered IO. * we must fallback to buffered IO.

View File

@ -1603,6 +1603,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
*/ */
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{ {
struct btrfs_file_private *private = file->private_data;
struct dentry *dentry = file_dentry(file); struct dentry *dentry = file_dentry(file);
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
struct btrfs_root *root = inode->root; struct btrfs_root *root = inode->root;
@ -1612,6 +1613,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
int ret = 0, err; int ret = 0, err;
u64 len; u64 len;
bool full_sync; bool full_sync;
const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
trace_btrfs_sync_file(file, datasync); trace_btrfs_sync_file(file, datasync);
@ -1639,7 +1641,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret) if (ret)
goto out; goto out;
btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP); if (skip_ilock)
down_write(&inode->i_mmap_lock);
else
btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch); atomic_inc(&root->log_batch);
@ -1663,7 +1668,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/ */
ret = start_ordered_ops(inode, start, end); ret = start_ordered_ops(inode, start, end);
if (ret) { if (ret) {
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); if (skip_ilock)
up_write(&inode->i_mmap_lock);
else
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out; goto out;
} }
@ -1788,7 +1796,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization * file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe. * inside btrfs_sync_log to keep things safe.
*/ */
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); if (skip_ilock)
up_write(&inode->i_mmap_lock);
else
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) { if (ret == BTRFS_NO_LOG_SYNC) {
ret = btrfs_end_transaction(trans); ret = btrfs_end_transaction(trans);