md: factor out helpers for different sync_action in md_do_sync()
Make code cleaner by replacing if else if with switch, and it's more obvious now what is doing for each sync_action. There are no functional changes. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20240611132251.1967786-11-yukuai1@huaweicloud.com
This commit is contained in:
parent
d249e54188
commit
bbf2076277
123
drivers/md/md.c
123
drivers/md/md.c
@ -8914,6 +8914,77 @@ void md_allow_write(struct mddev *mddev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_allow_write);
|
||||
|
||||
static sector_t md_sync_max_sectors(struct mddev *mddev,
|
||||
enum sync_action action)
|
||||
{
|
||||
switch (action) {
|
||||
case ACTION_RESYNC:
|
||||
case ACTION_CHECK:
|
||||
case ACTION_REPAIR:
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
fallthrough;
|
||||
case ACTION_RESHAPE:
|
||||
return mddev->resync_max_sectors;
|
||||
case ACTION_RECOVER:
|
||||
return mddev->dev_sectors;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
|
||||
{
|
||||
sector_t start = 0;
|
||||
struct md_rdev *rdev;
|
||||
|
||||
switch (action) {
|
||||
case ACTION_CHECK:
|
||||
case ACTION_REPAIR:
|
||||
return mddev->resync_min;
|
||||
case ACTION_RESYNC:
|
||||
if (!mddev->bitmap)
|
||||
return mddev->recovery_cp;
|
||||
return 0;
|
||||
case ACTION_RESHAPE:
|
||||
/*
|
||||
* If the original node aborts reshaping then we continue the
|
||||
* reshaping, so set again to avoid restart reshape from the
|
||||
* first beginning
|
||||
*/
|
||||
if (mddev_is_clustered(mddev) &&
|
||||
mddev->reshape_position != MaxSector)
|
||||
return mddev->reshape_position;
|
||||
return 0;
|
||||
case ACTION_RECOVER:
|
||||
start = MaxSector;
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
!test_bit(In_sync, &rdev->flags) &&
|
||||
rdev->recovery_offset < start)
|
||||
start = rdev->recovery_offset;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there is a bitmap, we need to make sure all
|
||||
* writes that started before we added a spare
|
||||
* complete before we start doing a recovery.
|
||||
* Otherwise the write might complete and (via
|
||||
* bitmap_endwrite) set a bit in the bitmap after the
|
||||
* recovery has checked that bit and skipped that
|
||||
* region.
|
||||
*/
|
||||
if (mddev->bitmap) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
return start;
|
||||
default:
|
||||
return MaxSector;
|
||||
}
|
||||
}
|
||||
|
||||
#define SYNC_MARKS 10
|
||||
#define SYNC_MARK_STEP (3*HZ)
|
||||
#define UPDATE_FREQUENCY (5*60*HZ)
|
||||
@ -9032,56 +9103,8 @@ void md_do_sync(struct md_thread *thread)
|
||||
spin_unlock(&all_mddevs_lock);
|
||||
} while (mddev->curr_resync < MD_RESYNC_DELAYED);
|
||||
|
||||
j = 0;
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||
/* resync follows the size requested by the personality,
|
||||
* which defaults to physical size, but can be virtual size
|
||||
*/
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
/* we don't use the checkpoint if there's a bitmap */
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
j = mddev->resync_min;
|
||||
else if (!mddev->bitmap)
|
||||
j = mddev->recovery_cp;
|
||||
|
||||
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
/*
|
||||
* If the original node aborts reshaping then we continue the
|
||||
* reshaping, so set j again to avoid restart reshape from the
|
||||
* first beginning
|
||||
*/
|
||||
if (mddev_is_clustered(mddev) &&
|
||||
mddev->reshape_position != MaxSector)
|
||||
j = mddev->reshape_position;
|
||||
} else {
|
||||
/* recovery follows the physical size of devices */
|
||||
max_sectors = mddev->dev_sectors;
|
||||
j = MaxSector;
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
!test_bit(Faulty, &rdev->flags) &&
|
||||
!test_bit(In_sync, &rdev->flags) &&
|
||||
rdev->recovery_offset < j)
|
||||
j = rdev->recovery_offset;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there is a bitmap, we need to make sure all
|
||||
* writes that started before we added a spare
|
||||
* complete before we start doing a recovery.
|
||||
* Otherwise the write might complete and (via
|
||||
* bitmap_endwrite) set a bit in the bitmap after the
|
||||
* recovery has checked that bit and skipped that
|
||||
* region.
|
||||
*/
|
||||
if (mddev->bitmap) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
}
|
||||
max_sectors = md_sync_max_sectors(mddev, action);
|
||||
j = md_sync_position(mddev, action);
|
||||
|
||||
pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
|
||||
pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
|
||||
|
Loading…
Reference in New Issue
Block a user