1

fscrypt: support decrypting data from large folios

Try to make the filesystem-level decryption functions in fs/crypto/
aware of large folios.  This includes making fscrypt_decrypt_bio()
support the case where the bio contains large folios, and making
fscrypt_decrypt_pagecache_blocks() take a folio instead of a page.

There's no way to actually test this with large folios yet, but I've
tested that this doesn't cause any regressions.

Note that this patch just handles *decryption*, not encryption which
will be a little more difficult.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20230127224202.355629-1-ebiggers@kernel.org
This commit is contained in:
Eric Biggers 2023-01-27 14:25:14 -08:00
parent 5d0f0e57ed
commit 51e4e3153e
6 changed files with 31 additions and 30 deletions

View File

@ -1277,8 +1277,8 @@ the file contents themselves, as described below:
For the read path (->read_folio()) of regular files, filesystems can
read the ciphertext into the page cache and decrypt it in-place. The
page lock must be held until decryption has finished, to prevent the
page from becoming visible to userspace prematurely.
folio lock must be held until decryption has finished, to prevent the
folio from becoming visible to userspace prematurely.
For the write path (->writepage()) of regular files, filesystems
cannot encrypt data in-place in the page cache, since the cached

View File

@ -331,8 +331,8 @@ static void decrypt_bh(struct work_struct *work)
struct buffer_head *bh = ctx->bh;
int err;
err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
bh_offset(bh));
err = fscrypt_decrypt_pagecache_blocks(page_folio(bh->b_page),
bh->b_size, bh_offset(bh));
if (err == 0 && need_fsverity(bh)) {
/*
* We use different work queues for decryption and for verity

View File

@ -30,13 +30,11 @@
*/
bool fscrypt_decrypt_bio(struct bio *bio)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
struct folio_iter fi;
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
int err = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
bv->bv_offset);
bio_for_each_folio_all(fi, bio) {
int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
fi.offset);
if (err) {
bio->bi_status = errno_to_blk_status(err);

View File

@ -237,41 +237,43 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
* pagecache page
* @page: The locked pagecache page containing the block(s) to decrypt
* pagecache folio
* @folio: The locked pagecache folio containing the block(s) to decrypt
* @len: Total size of the block(s) to decrypt. Must be a nonzero
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to decrypt. Must be
* @offs: Byte offset within @folio of the first block to decrypt. Must be
* a multiple of the filesystem's block size.
*
* The specified block(s) are decrypted in-place within the pagecache page,
* which must still be locked and not uptodate. Normally, blocksize ==
* PAGE_SIZE and the whole page is decrypted at once.
* The specified block(s) are decrypted in-place within the pagecache folio,
* which must still be locked and not uptodate.
*
* This is for use by the filesystem's ->readahead() method.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs)
int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = page->mapping->host;
const struct inode *inode = folio->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
unsigned int i;
size_t i;
int err;
if (WARN_ON_ONCE(!PageLocked(page)))
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return -EINVAL;
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
page, blocksize, i, GFP_NOFS);
page, blocksize, i & ~PAGE_MASK,
GFP_NOFS);
if (err)
return err;
}

View File

@ -1136,7 +1136,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
for (i = 0; i < nr_wait; i++) {
int err2;
err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
err2 = fscrypt_decrypt_pagecache_blocks(page_folio(page),
blocksize,
bh_offset(wait[i]));
if (err2) {
clear_buffer_uptodate(wait[i]);
@ -3858,7 +3859,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
err = fscrypt_decrypt_pagecache_blocks(page_folio(page),
blocksize,
bh_offset(bh));
if (err) {
clear_buffer_uptodate(bh);

View File

@ -257,8 +257,8 @@ int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags);
int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs);
int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs);
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num);
@ -422,9 +422,8 @@ static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
return -EOPNOTSUPP;
}
static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs)
static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio,
size_t len, size_t offs)
{
return -EOPNOTSUPP;
}