vfs-6.12-rc6.iomap
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZyTGVAAKCRCRxhvAZXjc oltEAP9r8cWa3Tdv8DzMNWu/jezTUXoW/mX5Qe+c1L6faqj0WQD/dIVtBtG37Tfq 3Ci9F/GEWjKijtCQ5lwMGUq27jQJ1gk= =/0iA -----END PGP SIGNATURE----- Merge tag 'vfs-6.12-rc6.iomap' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs Pull iomap fixes from Christian Brauner: "Fixes for iomap to prevent data corruption bugs in the fallocate unshare range implementation of fsdax and a small cleanup to turn iomap_want_unshare_iter() into an inline function" * tag 'vfs-6.12-rc6.iomap' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs: iomap: turn iomap_want_unshare_iter into an inline function fsdax: dax_unshare_iter needs to copy entire blocks fsdax: remove zeroing code from dax_unshare_iter iomap: share iomap_unshare_iter predicate code with fsdax xfs: don't allocate COW extents when unsharing a hole
This commit is contained in:
commit
17fa6a5f93
49
fs/dax.c
49
fs/dax.c
@ -1262,35 +1262,46 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
|
|||||||
{
|
{
|
||||||
struct iomap *iomap = &iter->iomap;
|
struct iomap *iomap = &iter->iomap;
|
||||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||||
loff_t pos = iter->pos;
|
loff_t copy_pos = iter->pos;
|
||||||
loff_t length = iomap_length(iter);
|
u64 copy_len = iomap_length(iter);
|
||||||
|
u32 mod;
|
||||||
int id = 0;
|
int id = 0;
|
||||||
s64 ret = 0;
|
s64 ret = 0;
|
||||||
void *daddr = NULL, *saddr = NULL;
|
void *daddr = NULL, *saddr = NULL;
|
||||||
|
|
||||||
/* don't bother with blocks that are not shared to start with */
|
if (!iomap_want_unshare_iter(iter))
|
||||||
if (!(iomap->flags & IOMAP_F_SHARED))
|
return iomap_length(iter);
|
||||||
return length;
|
|
||||||
|
|
||||||
id = dax_read_lock();
|
/*
|
||||||
ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
|
* Extend the file range to be aligned to fsblock/pagesize, because
|
||||||
if (ret < 0)
|
* we need to copy entire blocks, not just the byte range specified.
|
||||||
goto out_unlock;
|
* Invalidate the mapping because we're about to CoW.
|
||||||
|
*/
|
||||||
/* zero the distance if srcmap is HOLE or UNWRITTEN */
|
mod = offset_in_page(copy_pos);
|
||||||
if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
|
if (mod) {
|
||||||
memset(daddr, 0, length);
|
copy_len += mod;
|
||||||
dax_flush(iomap->dax_dev, daddr, length);
|
copy_pos -= mod;
|
||||||
ret = length;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
|
mod = offset_in_page(copy_pos + copy_len);
|
||||||
|
if (mod)
|
||||||
|
copy_len += PAGE_SIZE - mod;
|
||||||
|
|
||||||
|
invalidate_inode_pages2_range(iter->inode->i_mapping,
|
||||||
|
copy_pos >> PAGE_SHIFT,
|
||||||
|
(copy_pos + copy_len - 1) >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
id = dax_read_lock();
|
||||||
|
ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (copy_mc_to_kernel(daddr, saddr, length) == 0)
|
ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL);
|
||||||
ret = length;
|
if (ret < 0)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0)
|
||||||
|
ret = iomap_length(iter);
|
||||||
else
|
else
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
|
||||||
|
@ -1277,22 +1277,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
|||||||
loff_t length = iomap_length(iter);
|
loff_t length = iomap_length(iter);
|
||||||
loff_t written = 0;
|
loff_t written = 0;
|
||||||
|
|
||||||
/* Don't bother with blocks that are not shared to start with. */
|
if (!iomap_want_unshare_iter(iter))
|
||||||
if (!(iomap->flags & IOMAP_F_SHARED))
|
|
||||||
return length;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't bother with delalloc reservations, holes or unwritten extents.
|
|
||||||
*
|
|
||||||
* Note that we use srcmap directly instead of iomap_iter_srcmap as
|
|
||||||
* unsharing requires providing a separate source map, and the presence
|
|
||||||
* of one is a good indicator that unsharing is needed, unlike
|
|
||||||
* IOMAP_F_SHARED which can be set for any data that goes into the COW
|
|
||||||
* fork for XFS.
|
|
||||||
*/
|
|
||||||
if (iter->srcmap.type == IOMAP_HOLE ||
|
|
||||||
iter->srcmap.type == IOMAP_DELALLOC ||
|
|
||||||
iter->srcmap.type == IOMAP_UNWRITTEN)
|
|
||||||
return length;
|
return length;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -707,7 +707,7 @@ imap_needs_cow(
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* when zeroing we don't have to COW holes or unwritten extents */
|
/* when zeroing we don't have to COW holes or unwritten extents */
|
||||||
if (flags & IOMAP_ZERO) {
|
if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
|
||||||
if (!nimaps ||
|
if (!nimaps ||
|
||||||
imap->br_startblock == HOLESTARTBLOCK ||
|
imap->br_startblock == HOLESTARTBLOCK ||
|
||||||
imap->br_state == XFS_EXT_UNWRITTEN)
|
imap->br_state == XFS_EXT_UNWRITTEN)
|
||||||
|
@ -270,6 +270,25 @@ static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
|
|||||||
return round_up(pos + written, i_blocksize(inode));
|
return round_up(pos + written, i_blocksize(inode));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
|
||||||
|
* operation.
|
||||||
|
*
|
||||||
|
* Don't bother with blocks that are not shared to start with; or mappings that
|
||||||
|
* cannot be shared, such as inline data, delalloc reservations, holes or
|
||||||
|
* unwritten extents.
|
||||||
|
*
|
||||||
|
* Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
|
||||||
|
* requires providing a separate source map, and the presence of one is a good
|
||||||
|
* indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
|
||||||
|
* for any data that goes into the COW fork for XFS.
|
||||||
|
*/
|
||||||
|
static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
|
||||||
|
{
|
||||||
|
return (iter->iomap.flags & IOMAP_F_SHARED) &&
|
||||||
|
iter->srcmap.type == IOMAP_MAPPED;
|
||||||
|
}
|
||||||
|
|
||||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||||
const struct iomap_ops *ops, void *private);
|
const struct iomap_ops *ops, void *private);
|
||||||
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
||||||
|
Loading…
Reference in New Issue
Block a user