1

dm: Call dm_revalidate_zones() after setting the queue limits

dm_revalidate_zones() is called from dm_set_zone_restrictions() when the
mapped device queue limits are not yet set. However,
dm_revalidate_zones() calls blk_revalidate_disk_zones() and this
function consults and modifies the mapped device queue limits. Thus,
currently, blk_revalidate_disk_zones() operates on limits that are not
yet initialized.

Fix this by moving the call to dm_revalidate_zones() out of
dm_set_zone_restrictions() and into dm_table_set_restrictions() after
executing queue_limits_set().

To further cleanup dm_set_zones_restrictions(), the message about the
type of zone append (native or emulated) is also moved inside
dm_revalidate_zones().

Fixes: 1c0e720228 ("dm: use queue_limits_set")
Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Benjamin Marzinski <bmarzins@redhat.com>
Reviewed-by: Niklas Cassel <cassel@kernel.org>
Link: https://lore.kernel.org/r/20240611023639.89277-3-dlemoal@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Damien Le Moal 2024-06-11 11:36:37 +09:00 committed by Jens Axboe
parent e21d12c7cd
commit 7f91ccd8a6
3 changed files with 22 additions and 19 deletions

View File

@ -1921,10 +1921,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_any_dev_attr(t, device_is_not_random, NULL))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
* For a zoned target, setup the zones related queue attributes
* and resources necessary for zone append emulation if necessary.
*/
/* For a zoned table, setup the zone related queue attributes. */
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
r = dm_set_zones_restrictions(t, q, limits);
if (r)
@ -1935,6 +1932,16 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (r)
return r;
/*
* Now that the limits are set, check the zones mapped by the table
* and setup the resources for zone append emulation if necessary.
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
r = dm_revalidate_zones(t, q);
if (r)
return r;
}
dm_update_crypto_profile(q, t);
/*

View File

@ -166,14 +166,22 @@ static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
* blk_revalidate_disk_zones() function here as the mapped device is suspended
* (this is called from __bind() context).
*/
static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
int ret;
if (!get_capacity(disk))
return 0;
/* Revalidate only if something changed. */
if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
if (!disk->nr_zones || disk->nr_zones != md->nr_zones) {
DMINFO("%s using %s zone append",
disk->disk_name,
queue_emulates_zone_append(q) ? "emulated" : "native");
md->nr_zones = 0;
}
if (md->nr_zones)
return 0;
@ -240,9 +248,6 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
lim->max_zone_append_sectors = 0;
}
if (!get_capacity(md->disk))
return 0;
/*
* Count conventional zones to check that the mapped device will indeed
* have sequential write required zones.
@ -269,16 +274,6 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
if (!md->disk->nr_zones) {
DMINFO("%s using %s zone append",
md->disk->disk_name,
queue_emulates_zone_append(q) ? "emulated" : "native");
}
ret = dm_revalidate_zones(md, t);
if (ret < 0)
return ret;
if (!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
return 0;

View File

@ -103,6 +103,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
*/
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,