1

md: pass in max_sectors for pers->sync_request()

For different sync_action, sync_thread will use different max_sectors,
see details in md_sync_max_sectors(), currently both md_do_sync() and
pers->sync_request() in eatch iteration have to get the same
max_sectors. Hence pass in max_sectors for pers->sync_request() to
prevent redundant code.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240611132251.1967786-12-yukuai1@huaweicloud.com
This commit is contained in:
Yu Kuai 2024-06-11 21:22:50 +08:00 committed by Song Liu
parent bbf2076277
commit bc49694a9e
5 changed files with 10 additions and 14 deletions

View File

@ -9186,7 +9186,8 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break; break;
sectors = mddev->pers->sync_request(mddev, j, &skipped); sectors = mddev->pers->sync_request(mddev, j, max_sectors,
&skipped);
if (sectors == 0) { if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break; break;
@ -9276,7 +9277,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = mddev->curr_resync; mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed); sysfs_notify_dirent_safe(mddev->sysfs_completed);
} }
mddev->pers->sync_request(mddev, max_sectors, &skipped); mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > MD_RESYNC_ACTIVE) { mddev->curr_resync > MD_RESYNC_ACTIVE) {

View File

@ -729,7 +729,8 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev); int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
sector_t max_sector, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors); int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev); int (*check_reshape) (struct mddev *mddev);

View File

@ -2756,12 +2756,12 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
*/ */
static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int *skipped) sector_t max_sector, int *skipped)
{ {
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
struct r1bio *r1_bio; struct r1bio *r1_bio;
struct bio *bio; struct bio *bio;
sector_t max_sector, nr_sectors; sector_t nr_sectors;
int disk = -1; int disk = -1;
int i; int i;
int wonly = -1; int wonly = -1;
@ -2777,7 +2777,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (init_resync(conf)) if (init_resync(conf))
return 0; return 0;
max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) { if (sector_nr >= max_sector) {
/* If we aborted, we need to abort the /* If we aborted, we need to abort the
* sync on the 'current' bitmap chunk (there will * sync on the 'current' bitmap chunk (there will

View File

@ -3139,12 +3139,12 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
*/ */
static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int *skipped) sector_t max_sector, int *skipped)
{ {
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
struct r10bio *r10_bio; struct r10bio *r10_bio;
struct bio *biolist = NULL, *bio; struct bio *biolist = NULL, *bio;
sector_t max_sector, nr_sectors; sector_t nr_sectors;
int i; int i;
int max_sync; int max_sync;
sector_t sync_blocks; sector_t sync_blocks;
@ -3174,10 +3174,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return 0; return 0;
skipped: skipped:
max_sector = mddev->dev_sectors;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) { if (sector_nr >= max_sector) {
conf->cluster_sync_low = 0; conf->cluster_sync_low = 0;
conf->cluster_sync_high = 0; conf->cluster_sync_high = 0;

View File

@ -6457,11 +6457,10 @@ ret:
} }
static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
int *skipped) sector_t max_sector, int *skipped)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
struct stripe_head *sh; struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
sector_t sync_blocks; sector_t sync_blocks;
int still_degraded = 0; int still_degraded = 0;
int i; int i;