Merge patch series "fsdax/xfs: unshare range fixes for 6.12"
Darrick J. Wong <djwong@kernel.org> says: This patchset fixes multiple data corruption bugs in the fallocate unshare range implementation for fsdax. * patches from https://lore.kernel.org/r/172796813251.1131942.12184885574609980777.stgit@frogsfrogsfrogs: fsdax: dax_unshare_iter needs to copy entire blocks fsdax: remove zeroing code from dax_unshare_iter iomap: share iomap_unshare_iter predicate code with fsdax xfs: don't allocate COW extents when unsharing a hole Link: https://lore.kernel.org/r/172796813251.1131942.12184885574609980777.stgit@frogsfrogsfrogs Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
commit
dad1b6c805
49
fs/dax.c
49
fs/dax.c
@ -1262,35 +1262,46 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
|
|||||||
{
|
{
|
||||||
struct iomap *iomap = &iter->iomap;
|
struct iomap *iomap = &iter->iomap;
|
||||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||||
loff_t pos = iter->pos;
|
loff_t copy_pos = iter->pos;
|
||||||
loff_t length = iomap_length(iter);
|
u64 copy_len = iomap_length(iter);
|
||||||
|
u32 mod;
|
||||||
int id = 0;
|
int id = 0;
|
||||||
s64 ret = 0;
|
s64 ret = 0;
|
||||||
void *daddr = NULL, *saddr = NULL;
|
void *daddr = NULL, *saddr = NULL;
|
||||||
|
|
||||||
/* don't bother with blocks that are not shared to start with */
|
if (!iomap_want_unshare_iter(iter))
|
||||||
if (!(iomap->flags & IOMAP_F_SHARED))
|
return iomap_length(iter);
|
||||||
return length;
|
|
||||||
|
|
||||||
id = dax_read_lock();
|
/*
|
||||||
ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
|
* Extend the file range to be aligned to fsblock/pagesize, because
|
||||||
if (ret < 0)
|
* we need to copy entire blocks, not just the byte range specified.
|
||||||
goto out_unlock;
|
* Invalidate the mapping because we're about to CoW.
|
||||||
|
*/
|
||||||
/* zero the distance if srcmap is HOLE or UNWRITTEN */
|
mod = offset_in_page(copy_pos);
|
||||||
if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
|
if (mod) {
|
||||||
memset(daddr, 0, length);
|
copy_len += mod;
|
||||||
dax_flush(iomap->dax_dev, daddr, length);
|
copy_pos -= mod;
|
||||||
ret = length;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
|
mod = offset_in_page(copy_pos + copy_len);
|
||||||
|
if (mod)
|
||||||
|
copy_len += PAGE_SIZE - mod;
|
||||||
|
|
||||||
|
invalidate_inode_pages2_range(iter->inode->i_mapping,
|
||||||
|
copy_pos >> PAGE_SHIFT,
|
||||||
|
(copy_pos + copy_len - 1) >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
id = dax_read_lock();
|
||||||
|
ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (copy_mc_to_kernel(daddr, saddr, length) == 0)
|
ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL);
|
||||||
ret = length;
|
if (ret < 0)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0)
|
||||||
|
ret = iomap_length(iter);
|
||||||
else
|
else
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
|
||||||
|
@ -1309,19 +1309,12 @@ void iomap_file_buffered_write_punch_delalloc(struct inode *inode,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
|
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
|
||||||
|
|
||||||
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
bool iomap_want_unshare_iter(const struct iomap_iter *iter)
|
||||||
{
|
{
|
||||||
struct iomap *iomap = &iter->iomap;
|
|
||||||
loff_t pos = iter->pos;
|
|
||||||
loff_t length = iomap_length(iter);
|
|
||||||
loff_t written = 0;
|
|
||||||
|
|
||||||
/* Don't bother with blocks that are not shared to start with. */
|
|
||||||
if (!(iomap->flags & IOMAP_F_SHARED))
|
|
||||||
return length;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't bother with delalloc reservations, holes or unwritten extents.
|
* Don't bother with blocks that are not shared to start with; or
|
||||||
|
* mappings that cannot be shared, such as inline data, delalloc
|
||||||
|
* reservations, holes or unwritten extents.
|
||||||
*
|
*
|
||||||
* Note that we use srcmap directly instead of iomap_iter_srcmap as
|
* Note that we use srcmap directly instead of iomap_iter_srcmap as
|
||||||
* unsharing requires providing a separate source map, and the presence
|
* unsharing requires providing a separate source map, and the presence
|
||||||
@ -1329,9 +1322,18 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
|||||||
* IOMAP_F_SHARED which can be set for any data that goes into the COW
|
* IOMAP_F_SHARED which can be set for any data that goes into the COW
|
||||||
* fork for XFS.
|
* fork for XFS.
|
||||||
*/
|
*/
|
||||||
if (iter->srcmap.type == IOMAP_HOLE ||
|
return (iter->iomap.flags & IOMAP_F_SHARED) &&
|
||||||
iter->srcmap.type == IOMAP_DELALLOC ||
|
iter->srcmap.type == IOMAP_MAPPED;
|
||||||
iter->srcmap.type == IOMAP_UNWRITTEN)
|
}
|
||||||
|
|
||||||
|
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
||||||
|
{
|
||||||
|
struct iomap *iomap = &iter->iomap;
|
||||||
|
loff_t pos = iter->pos;
|
||||||
|
loff_t length = iomap_length(iter);
|
||||||
|
loff_t written = 0;
|
||||||
|
|
||||||
|
if (!iomap_want_unshare_iter(iter))
|
||||||
return length;
|
return length;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -707,7 +707,7 @@ imap_needs_cow(
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* when zeroing we don't have to COW holes or unwritten extents */
|
/* when zeroing we don't have to COW holes or unwritten extents */
|
||||||
if (flags & IOMAP_ZERO) {
|
if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
|
||||||
if (!nimaps ||
|
if (!nimaps ||
|
||||||
imap->br_startblock == HOLESTARTBLOCK ||
|
imap->br_startblock == HOLESTARTBLOCK ||
|
||||||
imap->br_state == XFS_EXT_UNWRITTEN)
|
imap->br_state == XFS_EXT_UNWRITTEN)
|
||||||
|
@ -267,6 +267,7 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
|||||||
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||||
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||||
const struct iomap_ops *ops);
|
const struct iomap_ops *ops);
|
||||||
|
bool iomap_want_unshare_iter(const struct iomap_iter *iter);
|
||||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||||
bool *did_zero, const struct iomap_ops *ops);
|
bool *did_zero, const struct iomap_ops *ops);
|
||||||
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||||
|
Loading…
Reference in New Issue
Block a user