md: change the return value type of md_write_start to void
Commit cc27b0c78c
("md: fix deadlock between mddev_suspend() and
md_write_start()") aborted md_write_start() with false when mddev is
suspended, which fixed a deadlock if calling mddev_suspend() with
holding reconfig_mutex(). Since mddev_suspend() now includes
lockdep_assert_not_held(), it no longer holds the reconfig_mutex. This
makes previous abort unnecessary. Now, remove unnecessary abort and
change function return value to void.
Signed-off-by: Li Nan <linan122@huawei.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240525185257.3896201-2-linan666@huaweicloud.com
This commit is contained in:
parent
a8768a1345
commit
03e792eaf1
@ -8640,12 +8640,12 @@ EXPORT_SYMBOL(md_done_sync);
|
|||||||
* A return value of 'false' means that the write wasn't recorded
|
* A return value of 'false' means that the write wasn't recorded
|
||||||
* and cannot proceed as the array is being suspend.
|
* and cannot proceed as the array is being suspend.
|
||||||
*/
|
*/
|
||||||
bool md_write_start(struct mddev *mddev, struct bio *bi)
|
void md_write_start(struct mddev *mddev, struct bio *bi)
|
||||||
{
|
{
|
||||||
int did_change = 0;
|
int did_change = 0;
|
||||||
|
|
||||||
if (bio_data_dir(bi) != WRITE)
|
if (bio_data_dir(bi) != WRITE)
|
||||||
return true;
|
return;
|
||||||
|
|
||||||
BUG_ON(mddev->ro == MD_RDONLY);
|
BUG_ON(mddev->ro == MD_RDONLY);
|
||||||
if (mddev->ro == MD_AUTO_READ) {
|
if (mddev->ro == MD_AUTO_READ) {
|
||||||
@ -8678,15 +8678,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
|
|||||||
if (did_change)
|
if (did_change)
|
||||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||||
if (!mddev->has_superblocks)
|
if (!mddev->has_superblocks)
|
||||||
return true;
|
return;
|
||||||
wait_event(mddev->sb_wait,
|
wait_event(mddev->sb_wait,
|
||||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
|
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
||||||
is_md_suspended(mddev));
|
|
||||||
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
|
||||||
percpu_ref_put(&mddev->writes_pending);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(md_write_start);
|
EXPORT_SYMBOL(md_write_start);
|
||||||
|
|
||||||
|
@ -785,7 +785,7 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t
|
|||||||
extern void md_wakeup_thread(struct md_thread __rcu *thread);
|
extern void md_wakeup_thread(struct md_thread __rcu *thread);
|
||||||
extern void md_check_recovery(struct mddev *mddev);
|
extern void md_check_recovery(struct mddev *mddev);
|
||||||
extern void md_reap_sync_thread(struct mddev *mddev);
|
extern void md_reap_sync_thread(struct mddev *mddev);
|
||||||
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
|
extern void md_write_start(struct mddev *mddev, struct bio *bi);
|
||||||
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
|
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
|
||||||
extern void md_write_end(struct mddev *mddev);
|
extern void md_write_end(struct mddev *mddev);
|
||||||
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
|
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
|
||||||
|
@ -1687,8 +1687,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
|
|||||||
if (bio_data_dir(bio) == READ)
|
if (bio_data_dir(bio) == READ)
|
||||||
raid1_read_request(mddev, bio, sectors, NULL);
|
raid1_read_request(mddev, bio, sectors, NULL);
|
||||||
else {
|
else {
|
||||||
if (!md_write_start(mddev,bio))
|
md_write_start(mddev,bio);
|
||||||
return false;
|
|
||||||
raid1_write_request(mddev, bio, sectors);
|
raid1_write_request(mddev, bio, sectors);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
|
|||||||
&& md_flush_request(mddev, bio))
|
&& md_flush_request(mddev, bio))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (!md_write_start(mddev, bio))
|
md_write_start(mddev, bio);
|
||||||
return false;
|
|
||||||
|
|
||||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
|
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
|
||||||
if (!raid10_handle_discard(mddev, bio))
|
if (!raid10_handle_discard(mddev, bio))
|
||||||
|
@ -6078,8 +6078,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||||||
ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
|
ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!md_write_start(mddev, bi))
|
md_write_start(mddev, bi);
|
||||||
return false;
|
|
||||||
/*
|
/*
|
||||||
* If array is degraded, better not do chunk aligned read because
|
* If array is degraded, better not do chunk aligned read because
|
||||||
* later we might have to read it again in order to reconstruct
|
* later we might have to read it again in order to reconstruct
|
||||||
|
Loading…
Reference in New Issue
Block a user