1

- fix memory safety bugs in dm-cache

- fix restart/panic logic in dm-verity
 
 - fix 32-bit unsigned integer overflow in dm-unstriped
 
 - fix a device mapper crash if blk_alloc_disk fails
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRnH8MwLyZDhyYfesYTAyx9YGnhbQUCZyj5MhQcbXBhdG9ja2FA
 cmVkaGF0LmNvbQAKCRATAyx9YGnhbSpeAQCcyhjrFxvFQuTJm/nv65Txwqw3+nvu
 i45pJ1DbK1awEQD/W0xUhhWrHfXwnb2dHV/mowLSnlou7uUh/JUx9q24OQg=
 =5qjV
 -----END PGP SIGNATURE-----

Merge tag 'for-6.12/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mikulas Patocka:

 - fix memory safety bugs in dm-cache

 - fix restart/panic logic in dm-verity

 - fix 32-bit unsigned integer overflow in dm-unstriped

 - fix a device mapper crash if blk_alloc_disk fails

* tag 'for-6.12/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix potential out-of-bounds access on the first resume
  dm cache: optimize dirty bit checking with find_next_bit when resizing
  dm cache: fix out-of-bounds access to the dirty bitset when resizing
  dm cache: fix flushing uninitialized delayed_work on cache_ctr error
  dm cache: correct the number of origin blocks to match the target length
  dm-verity: don't crash if panic_on_corruption is not selected
  dm-unstriped: cast an operand to sector_t to prevent potential uint32_t overflow
  dm: fix a crash if blk_alloc_disk fails
This commit is contained in:
Linus Torvalds 2024-11-06 07:56:47 -10:00
commit 9e23acf024
5 changed files with 42 additions and 35 deletions

View File

@ -1905,16 +1905,13 @@ static void check_migrations(struct work_struct *ws)
* This function gets called on the error paths of the constructor, so we
* have to cope with a partially initialised struct.
*/
static void destroy(struct cache *cache)
static void __destroy(struct cache *cache)
{
unsigned int i;
mempool_exit(&cache->migration_pool);
if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
cancel_delayed_work_sync(&cache->waker);
if (cache->wq)
destroy_workqueue(cache->wq);
@ -1942,13 +1939,22 @@ static void destroy(struct cache *cache)
if (cache->policy)
dm_cache_policy_destroy(cache->policy);
bioset_exit(&cache->bs);
kfree(cache);
}
static void destroy(struct cache *cache)
{
unsigned int i;
cancel_delayed_work_sync(&cache->waker);
for (i = 0; i < cache->nr_ctr_args ; i++)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
bioset_exit(&cache->bs);
kfree(cache);
__destroy(cache);
}
static void cache_dtr(struct dm_target *ti)
@ -2003,7 +2009,6 @@ struct cache_args {
sector_t cache_sectors;
struct dm_dev *origin_dev;
sector_t origin_sectors;
uint32_t block_size;
@ -2084,6 +2089,7 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
sector_t origin_sectors;
int r;
if (!at_least_one_arg(as, error))
@ -2096,8 +2102,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
return r;
}
ca->origin_sectors = get_dev_size(ca->origin_dev);
if (ca->ti->len > ca->origin_sectors) {
origin_sectors = get_dev_size(ca->origin_dev);
if (ca->ti->len > origin_sectors) {
*error = "Device size larger than cached device";
return -EINVAL;
}
@ -2407,7 +2413,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
origin_blocks = cache->origin_sectors = ca->origin_sectors;
origin_blocks = cache->origin_sectors = ti->len;
origin_blocks = block_div(origin_blocks, ca->block_size);
cache->origin_blocks = to_oblock(origin_blocks);
@ -2561,7 +2567,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
*result = cache;
return 0;
bad:
destroy(cache);
__destroy(cache);
return r;
}
@ -2612,7 +2618,7 @@ static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
if (r) {
destroy(cache);
__destroy(cache);
goto out;
}
@ -2895,19 +2901,19 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
if (cache->sized) {
DMERR("%s: unable to extend cache due to missing cache table reload",
cache_device_name(cache));
return false;
}
DMERR("%s: unable to extend cache due to missing cache table reload",
cache_device_name(cache));
return false;
}
/*
* We can't drop a dirty block when shrinking the cache.
*/
while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
new_size = to_cblock(from_cblock(new_size) + 1);
if (is_dirty(cache, new_size)) {
if (cache->loaded_mappings) {
new_size = to_cblock(find_next_bit(cache->dirty_bitset,
from_cblock(cache->cache_size),
from_cblock(new_size)));
if (new_size != cache->cache_size) {
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
cache_device_name(cache),
(unsigned long long) from_cblock(new_size));
@ -2943,20 +2949,15 @@ static int cache_preresume(struct dm_target *ti)
/*
* Check to see if the cache has resized.
*/
if (!cache->sized) {
r = resize_cache_dev(cache, csize);
if (r)
return r;
cache->sized = true;
} else if (csize != cache->cache_size) {
if (!cache->sized || csize != cache->cache_size) {
if (!can_resize(cache, csize))
return -EINVAL;
r = resize_cache_dev(cache, csize);
if (r)
return r;
cache->sized = true;
}
if (!cache->loaded_mappings) {

View File

@ -85,8 +85,8 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
uc->physical_start = start;
uc->unstripe_offset = uc->unstripe * uc->chunk_size;
uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
uc->unstripe_offset = (sector_t)uc->unstripe * uc->chunk_size;
uc->unstripe_width = (sector_t)(uc->stripes - 1) * uc->chunk_size;
uc->chunk_shift = is_power_of_2(uc->chunk_size) ? fls(uc->chunk_size) - 1 : 0;
tmp_len = ti->len;

View File

@ -356,9 +356,9 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block)) {
struct bio *bio =
dm_bio_from_per_bio_data(io,
v->ti->per_io_data_size);
struct bio *bio;
io->had_mismatch = true;
bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
block, 0);
r = -EIO;
@ -482,6 +482,7 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v,
return -EIO; /* Error correction failed; Just return error */
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) {
io->had_mismatch = true;
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
return -EIO;
}
@ -606,6 +607,7 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
if (unlikely(status != BLK_STS_OK) &&
unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
!io->had_mismatch &&
!verity_is_system_shutting_down()) {
if (v->error_mode == DM_VERITY_MODE_PANIC) {
panic("dm-verity device has I/O error");
@ -779,6 +781,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->orig_bi_end_io = bio->bi_end_io;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
io->had_mismatch = false;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;

View File

@ -92,6 +92,7 @@ struct dm_verity_io {
sector_t block;
unsigned int n_blocks;
bool in_bh;
bool had_mismatch;
struct work_struct work;
struct work_struct bh_work;

View File

@ -2290,8 +2290,10 @@ static struct mapped_device *alloc_dev(int minor)
* override accordingly.
*/
md->disk = blk_alloc_disk(NULL, md->numa_node_id);
if (IS_ERR(md->disk))
if (IS_ERR(md->disk)) {
md->disk = NULL;
goto bad;
}
md->queue = md->disk->queue;
init_waitqueue_head(&md->wait);