block: serialize all debugfs operations using q->debugfs_mutex
Various places like I/O schedulers or the QOS infrastructure try to register debugfs files on demans, which can race with creating and removing the main queue debugfs directory. Use the existing debugfs_mutex to serialize all debugfs operations that rely on q->debugfs_dir or the directories hanging off it. To make the teardown code a little simpler declare all debugfs dentry pointers and not just the main one uncoditionally in blkdev.h. Move debugfs_mutex next to the dentries that it protects and document what it is used for. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220614074827.458955-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
50e34d7881
commit
5cf9c91ba9
@ -711,11 +711,6 @@ void blk_mq_debugfs_register(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||
{
|
||||
q->sched_debugfs_dir = NULL;
|
||||
}
|
||||
|
||||
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx)
|
||||
{
|
||||
@ -746,6 +741,8 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||
|
||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!hctx->queue->debugfs_dir)
|
||||
return;
|
||||
debugfs_remove_recursive(hctx->debugfs_dir);
|
||||
hctx->sched_debugfs_dir = NULL;
|
||||
hctx->debugfs_dir = NULL;
|
||||
@ -773,6 +770,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
|
||||
{
|
||||
struct elevator_type *e = q->elevator->type;
|
||||
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
/*
|
||||
* If the parent directory has not been created yet, return, we will be
|
||||
* called again later on and the directory/files will be created then.
|
||||
@ -790,6 +789,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
|
||||
|
||||
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
||||
{
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
debugfs_remove_recursive(q->sched_debugfs_dir);
|
||||
q->sched_debugfs_dir = NULL;
|
||||
}
|
||||
@ -811,6 +812,10 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
|
||||
|
||||
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
||||
{
|
||||
lockdep_assert_held(&rqos->q->debugfs_mutex);
|
||||
|
||||
if (!rqos->q->debugfs_dir)
|
||||
return;
|
||||
debugfs_remove_recursive(rqos->debugfs_dir);
|
||||
rqos->debugfs_dir = NULL;
|
||||
}
|
||||
@ -820,6 +825,8 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
||||
struct request_queue *q = rqos->q;
|
||||
const char *dir_name = rq_qos_id_to_name(rqos->id);
|
||||
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
|
||||
return;
|
||||
|
||||
@ -835,6 +842,8 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
||||
|
||||
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
|
||||
{
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
debugfs_remove_recursive(q->rqos_debugfs_dir);
|
||||
q->rqos_debugfs_dir = NULL;
|
||||
}
|
||||
@ -844,6 +853,8 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||
{
|
||||
struct elevator_type *e = q->elevator->type;
|
||||
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
/*
|
||||
* If the parent debugfs directory has not been created yet, return;
|
||||
* We will be called again later on with appropriate parent debugfs
|
||||
@ -863,6 +874,10 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||
|
||||
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
lockdep_assert_held(&hctx->queue->debugfs_mutex);
|
||||
|
||||
if (!hctx->queue->debugfs_dir)
|
||||
return;
|
||||
debugfs_remove_recursive(hctx->sched_debugfs_dir);
|
||||
hctx->sched_debugfs_dir = NULL;
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
|
||||
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
|
||||
|
||||
void blk_mq_debugfs_register(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister(struct request_queue *q);
|
||||
void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
|
||||
@ -42,10 +41,6 @@ static inline void blk_mq_debugfs_register(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
|
@ -594,7 +594,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
if (ret)
|
||||
goto err_free_map_and_rqs;
|
||||
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_register_sched(q);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (e->ops.init_hctx) {
|
||||
@ -607,7 +609,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -648,14 +652,21 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
|
||||
unsigned int flags = 0;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_unregister_sched_hctx(hctx);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
if (e->type->ops.exit_hctx && hctx->sched_data) {
|
||||
e->type->ops.exit_hctx(hctx, i);
|
||||
hctx->sched_data = NULL;
|
||||
}
|
||||
flags = hctx->flags;
|
||||
}
|
||||
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_unregister_sched(q);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
if (e->type->ops.exit_sched)
|
||||
e->type->ops.exit_sched(e);
|
||||
blk_mq_sched_tags_teardown(q, flags);
|
||||
|
@ -294,7 +294,9 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
|
||||
|
||||
void rq_qos_exit(struct request_queue *q)
|
||||
{
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_unregister_queue_rqos(q);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
while (q->rq_qos) {
|
||||
struct rq_qos *rqos = q->rq_qos;
|
||||
|
@ -104,8 +104,11 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
if (rqos->ops->debugfs_attrs)
|
||||
if (rqos->ops->debugfs_attrs) {
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_register_rqos(rqos);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
|
||||
@ -129,7 +132,9 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_mq_debugfs_unregister_rqos(rqos);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
}
|
||||
|
||||
typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
|
||||
|
@ -779,14 +779,13 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_release(q);
|
||||
|
||||
blk_trace_shutdown(q);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
blk_trace_shutdown(q);
|
||||
debugfs_remove_recursive(q->debugfs_dir);
|
||||
q->debugfs_dir = NULL;
|
||||
q->sched_debugfs_dir = NULL;
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_debugfs_unregister(q);
|
||||
|
||||
bioset_exit(&q->bio_split);
|
||||
|
||||
if (blk_queue_has_srcu(q))
|
||||
@ -836,17 +835,16 @@ int blk_register_queue(struct gendisk *disk)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (queue_is_mq(q))
|
||||
__blk_mq_register_dev(dev, q);
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
|
||||
blk_debugfs_root);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
if (queue_is_mq(q)) {
|
||||
__blk_mq_register_dev(dev, q);
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_debugfs_register(q);
|
||||
}
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
ret = disk_register_independent_access_ranges(disk, NULL);
|
||||
if (ret)
|
||||
|
@ -482,7 +482,6 @@ struct request_queue {
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
int node;
|
||||
struct mutex debugfs_mutex;
|
||||
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
||||
struct blk_trace __rcu *blk_trace;
|
||||
#endif
|
||||
@ -526,11 +525,12 @@ struct request_queue {
|
||||
struct bio_set bio_split;
|
||||
|
||||
struct dentry *debugfs_dir;
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct dentry *sched_debugfs_dir;
|
||||
struct dentry *rqos_debugfs_dir;
|
||||
#endif
|
||||
/*
|
||||
* Serializes all debugfs metadata operations using the above dentries.
|
||||
*/
|
||||
struct mutex debugfs_mutex;
|
||||
|
||||
bool mq_sysfs_init_done;
|
||||
|
||||
|
@ -770,14 +770,11 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
|
||||
**/
|
||||
void blk_trace_shutdown(struct request_queue *q)
|
||||
{
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
if (rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->debugfs_mutex))) {
|
||||
__blk_trace_startstop(q, 0);
|
||||
__blk_trace_remove(q);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
Loading…
Reference in New Issue
Block a user