1

Merge patch series "fsdax/xfs: unshare range fixes for 6.12"

Darrick J. Wong <djwong@kernel.org> says:

This patchset fixes multiple data corruption bugs in the fallocate
unshare range implementation for fsdax.

* patches from https://lore.kernel.org/r/172796813251.1131942.12184885574609980777.stgit@frogsfrogsfrogs:
  fsdax: dax_unshare_iter needs to copy entire blocks
  fsdax: remove zeroing code from dax_unshare_iter
  iomap: share iomap_unshare_iter predicate code with fsdax
  xfs: don't allocate COW extents when unsharing a hole

Link: https://lore.kernel.org/r/172796813251.1131942.12184885574609980777.stgit@frogsfrogsfrogs
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2024-10-07 13:51:55 +02:00
commit dad1b6c805
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
4 changed files with 48 additions and 34 deletions

View File

@ -1262,35 +1262,46 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t copy_pos = iter->pos;
u64 copy_len = iomap_length(iter);
u32 mod;
int id = 0;
s64 ret = 0;
void *daddr = NULL, *saddr = NULL;
/* don't bother with blocks that are not shared to start with */
if (!(iomap->flags & IOMAP_F_SHARED))
return length;
if (!iomap_want_unshare_iter(iter))
return iomap_length(iter);
id = dax_read_lock();
ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
if (ret < 0)
goto out_unlock;
/* zero the distance if srcmap is HOLE or UNWRITTEN */
if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
memset(daddr, 0, length);
dax_flush(iomap->dax_dev, daddr, length);
ret = length;
goto out_unlock;
/*
* Extend the file range to be aligned to fsblock/pagesize, because
* we need to copy entire blocks, not just the byte range specified.
* Invalidate the mapping because we're about to CoW.
*/
mod = offset_in_page(copy_pos);
if (mod) {
copy_len += mod;
copy_pos -= mod;
}
ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
mod = offset_in_page(copy_pos + copy_len);
if (mod)
copy_len += PAGE_SIZE - mod;
invalidate_inode_pages2_range(iter->inode->i_mapping,
copy_pos >> PAGE_SHIFT,
(copy_pos + copy_len - 1) >> PAGE_SHIFT);
id = dax_read_lock();
ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL);
if (ret < 0)
goto out_unlock;
if (copy_mc_to_kernel(daddr, saddr, length) == 0)
ret = length;
ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL);
if (ret < 0)
goto out_unlock;
if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0)
ret = iomap_length(iter);
else
ret = -EIO;

View File

@ -1309,19 +1309,12 @@ void iomap_file_buffered_write_punch_delalloc(struct inode *inode,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
bool iomap_want_unshare_iter(const struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
/* Don't bother with blocks that are not shared to start with. */
if (!(iomap->flags & IOMAP_F_SHARED))
return length;
/*
* Don't bother with delalloc reservations, holes or unwritten extents.
* Don't bother with blocks that are not shared to start with; or
* mappings that cannot be shared, such as inline data, delalloc
* reservations, holes or unwritten extents.
*
* Note that we use srcmap directly instead of iomap_iter_srcmap as
* unsharing requires providing a separate source map, and the presence
@ -1329,9 +1322,18 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
* IOMAP_F_SHARED which can be set for any data that goes into the COW
* fork for XFS.
*/
if (iter->srcmap.type == IOMAP_HOLE ||
iter->srcmap.type == IOMAP_DELALLOC ||
iter->srcmap.type == IOMAP_UNWRITTEN)
return (iter->iomap.flags & IOMAP_F_SHARED) &&
iter->srcmap.type == IOMAP_MAPPED;
}
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
if (!iomap_want_unshare_iter(iter))
return length;
do {

View File

@ -707,7 +707,7 @@ imap_needs_cow(
return false;
/* when zeroing we don't have to COW holes or unwritten extents */
if (flags & IOMAP_ZERO) {
if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
if (!nimaps ||
imap->br_startblock == HOLESTARTBLOCK ||
imap->br_state == XFS_EXT_UNWRITTEN)

View File

@ -267,6 +267,7 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
bool iomap_want_unshare_iter(const struct iomap_iter *iter);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,