1

block: move read-only and supported checks into (__)blkdev_issue_zeroout

Move these checks out of the lower level helpers and into the higher level
ones to prepare for refactoring.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20240701165219.1571322-8-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-07-01 18:51:17 +02:00 committed by Jens Axboe
parent ff760a8f0d
commit f6eacb2654

View File

@ -111,17 +111,12 @@ static sector_t bio_write_zeroes_limit(struct block_device *bdev)
(UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
} }
static int __blkdev_issue_write_zeroes(struct block_device *bdev, static void __blkdev_issue_write_zeroes(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned flags) struct bio **biop, unsigned flags)
{ {
struct bio *bio = *biop; struct bio *bio = *biop;
if (bdev_read_only(bdev))
return -EPERM;
if (!bdev_write_zeroes_sectors(bdev))
return -EOPNOTSUPP;
while (nr_sects) { while (nr_sects) {
unsigned int len = min_t(sector_t, nr_sects, unsigned int len = min_t(sector_t, nr_sects,
bio_write_zeroes_limit(bdev)); bio_write_zeroes_limit(bdev));
@ -138,7 +133,6 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
} }
*biop = bio; *biop = bio;
return 0;
} }
/* /*
@ -154,7 +148,7 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
return min(pages, (sector_t)BIO_MAX_VECS); return min(pages, (sector_t)BIO_MAX_VECS);
} }
static int __blkdev_issue_zero_pages(struct block_device *bdev, static void __blkdev_issue_zero_pages(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop) struct bio **biop)
{ {
@ -162,9 +156,6 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
int bi_size = 0; int bi_size = 0;
unsigned int sz; unsigned int sz;
if (bdev_read_only(bdev))
return -EPERM;
while (nr_sects != 0) { while (nr_sects != 0) {
bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
REQ_OP_WRITE, gfp_mask); REQ_OP_WRITE, gfp_mask);
@ -182,7 +173,6 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
} }
*biop = bio; *biop = bio;
return 0;
} }
/** /**
@ -208,15 +198,19 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
unsigned flags) unsigned flags)
{ {
int ret; if (bdev_read_only(bdev))
return -EPERM;
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, if (bdev_write_zeroes_sectors(bdev)) {
biop, flags); __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) gfp_mask, biop, flags);
return ret; } else {
if (flags & BLKDEV_ZERO_NOFALLBACK)
return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, return -EOPNOTSUPP;
biop); __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
biop);
}
return 0;
} }
EXPORT_SYMBOL(__blkdev_issue_zeroout); EXPORT_SYMBOL(__blkdev_issue_zeroout);
@ -245,21 +239,22 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
if ((sector | nr_sects) & bs_mask) if ((sector | nr_sects) & bs_mask)
return -EINVAL; return -EINVAL;
if (bdev_read_only(bdev))
return -EPERM;
if ((flags & BLKDEV_ZERO_NOFALLBACK) && !try_write_zeroes)
return -EOPNOTSUPP;
retry: retry:
bio = NULL; bio = NULL;
blk_start_plug(&plug); blk_start_plug(&plug);
if (try_write_zeroes) { if (try_write_zeroes) {
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
gfp_mask, &bio, flags); &bio, flags);
} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
gfp_mask, &bio);
} else { } else {
/* No zeroing offload support */ __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
ret = -EOPNOTSUPP; &bio);
} }
if (ret == 0 && bio) { if (bio) {
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
bio_put(bio); bio_put(bio);
} }