1

blk-throttle: delay initialization until configuration

Other cgroup policy like bfq, iocost are lazy-initialized when they are
configured for the first time for the device, but blk-throttle is
initialized unconditionally from blkcg_init_disk().

Delay initialization of blk-throttle as well, to save some cpu and
memory overhead if it's not configured.

Noted that once it's initialized, it can't be destroyed until disk
removal, even if it's disabled.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20240509121107.3195568-3-yukuai1@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Yu Kuai 2024-05-09 20:11:07 +08:00 committed by Jens Axboe
parent bf20ab538c
commit a3166c5170
4 changed files with 88 additions and 53 deletions

View File

@ -1440,14 +1440,8 @@ int blkcg_init_disk(struct gendisk *disk)
if (ret)
goto err_destroy_all;
ret = blk_throtl_init(disk);
if (ret)
goto err_ioprio_exit;
return 0;
err_ioprio_exit:
blk_ioprio_exit(disk);
err_destroy_all:
blkg_destroy_all(disk);
return ret;

View File

@ -807,7 +807,6 @@ int blk_register_queue(struct gendisk *disk)
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(disk);
blk_throtl_register(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);

View File

@ -1211,6 +1211,53 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
}
}
static int blk_throtl_init(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct throtl_data *td;
int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
throtl_service_queue_init(&td->service_queue);
/*
* Freeze queue before activating policy, to synchronize with IO path,
* which is protected by 'q_usage_counter'.
*/
blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
q->td = td;
td->queue = q;
/* activate policy */
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
if (ret) {
q->td = NULL;
kfree(td);
goto out;
}
if (blk_queue_nonrot(q))
td->throtl_slice = DFL_THROTL_SLICE_SSD;
else
td->throtl_slice = DFL_THROTL_SLICE_HD;
td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
out:
blk_mq_unquiesce_queue(disk->queue);
blk_mq_unfreeze_queue(disk->queue);
return ret;
}
static ssize_t tg_set_conf(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off, bool is_u64)
{
@ -1222,6 +1269,16 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
blkg_conf_init(&ctx, buf);
ret = blkg_conf_open_bdev(&ctx);
if (ret)
goto out_finish;
if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
ret = blk_throtl_init(ctx.bdev->bd_disk);
if (ret)
goto out_finish;
}
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
if (ret)
goto out_finish;
@ -1396,6 +1453,16 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
blkg_conf_init(&ctx, buf);
ret = blkg_conf_open_bdev(&ctx);
if (ret)
goto out_finish;
if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
ret = blk_throtl_init(ctx.bdev->bd_disk);
if (ret)
goto out_finish;
}
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
if (ret)
goto out_finish;
@ -1488,6 +1555,9 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg;
if (!blk_throtl_activated(q))
return;
spin_lock_irq(&q->queue_lock);
/*
* queue_lock is held, rcu lock is not needed here technically.
@ -1617,57 +1687,19 @@ out_unlock:
return throttled;
}
int blk_throtl_init(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct throtl_data *td;
int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
throtl_service_queue_init(&td->service_queue);
q->td = td;
td->queue = q;
/* activate policy */
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
if (ret)
kfree(td);
return ret;
}
void blk_throtl_exit(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
BUG_ON(!q->td);
if (!blk_throtl_activated(q))
return;
del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
kfree(q->td);
}
void blk_throtl_register(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct throtl_data *td;
td = q->td;
BUG_ON(!td);
if (blk_queue_nonrot(q))
td->throtl_slice = DFL_THROTL_SLICE_SSD;
else
td->throtl_slice = DFL_THROTL_SLICE_HD;
td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
static int __init throtl_init(void)
{
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);

View File

@ -150,23 +150,33 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
* Internal throttling interface
*/
#ifndef CONFIG_BLK_DEV_THROTTLING
static inline int blk_throtl_init(struct gendisk *disk) { return 0; }
static inline void blk_throtl_exit(struct gendisk *disk) { }
static inline void blk_throtl_register(struct gendisk *disk) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
#else /* CONFIG_BLK_DEV_THROTTLING */
int blk_throtl_init(struct gendisk *disk);
void blk_throtl_exit(struct gendisk *disk);
void blk_throtl_register(struct gendisk *disk);
bool __blk_throtl_bio(struct bio *bio);
void blk_throtl_cancel_bios(struct gendisk *disk);
static inline bool blk_throtl_activated(struct request_queue *q)
{
return q->td != NULL;
}
static inline bool blk_should_throtl(struct bio *bio)
{
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
struct throtl_grp *tg;
int rw = bio_data_dir(bio);
/*
* This is called under bio_queue_enter(), and it's synchronized with
* the activation of blk-throtl, which is protected by
* blk_mq_freeze_queue().
*/
if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
return false;
tg = blkg_to_tg(bio->bi_blkg);
if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
bio_set_flag(bio, BIO_CGROUP_ACCT);