1

null_blk: cleanup discard handling

null_handle_discard() is called from both null_handle_rq() and
null_handle_bio(). As these functions are only passed a nullb_cmd
structure, this forces pointer dereferences to identiify the discard
operation code and to access the sector range to be discarded.
Simplify all this by changing the interface of the functions
null_handle_discard() and null_handle_memory_backed() to pass along
the operation code, operation start sector and number of sectors. With
this change null_handle_discard() can be called directly from
null_handle_memory_backed().

Also add a message warning that the discard configuration attribute has
no effect when memory backing is disabled.

No functional change is introduced by this patch.

Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Damien Le Moal 2020-11-20 10:55:16 +09:00 committed by Jens Axboe
parent 2e8c6e0e1d
commit 49c7089f3d

View File

@ -1076,13 +1076,16 @@ static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
kunmap_atomic(dst);
}
static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
static blk_status_t null_handle_discard(struct nullb_device *dev,
sector_t sector, sector_t nr_sectors)
{
struct nullb *nullb = dev->nullb;
size_t n = nr_sectors << SECTOR_SHIFT;
size_t temp;
spin_lock_irq(&nullb->lock);
while (n > 0) {
temp = min_t(size_t, n, nullb->dev->blocksize);
temp = min_t(size_t, n, dev->blocksize);
null_free_sector(nullb, sector, false);
if (null_cache_active(nullb))
null_free_sector(nullb, sector, true);
@ -1090,6 +1093,8 @@ static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
n -= temp;
}
spin_unlock_irq(&nullb->lock);
return BLK_STS_OK;
}
static int null_handle_flush(struct nullb *nullb)
@ -1149,17 +1154,10 @@ static int null_handle_rq(struct nullb_cmd *cmd)
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
sector_t sector;
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
struct bio_vec bvec;
sector = blk_rq_pos(rq);
if (req_op(rq) == REQ_OP_DISCARD) {
null_handle_discard(nullb, sector, blk_rq_bytes(rq));
return 0;
}
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
@ -1183,18 +1181,10 @@ static int null_handle_bio(struct nullb_cmd *cmd)
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
sector_t sector;
sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
if (bio_op(bio) == REQ_OP_DISCARD) {
null_handle_discard(nullb, sector,
bio_sectors(bio) << SECTOR_SHIFT);
return 0;
}
spin_lock_irq(&nullb->lock);
bio_for_each_segment(bvec, bio, iter) {
len = bvec.bv_len;
@ -1263,11 +1253,16 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
}
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
enum req_opf op)
enum req_opf op,
sector_t sector,
sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
int err;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
if (dev->queue_mode == NULL_Q_BIO)
err = null_handle_bio(cmd);
else
@ -1343,7 +1338,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd,
}
if (dev->memory_backed)
return null_handle_memory_backed(cmd, op);
return null_handle_memory_backed(cmd, op, sector, nr_sectors);
return BLK_STS_OK;
}
@ -1589,6 +1584,12 @@ static void null_config_discard(struct nullb *nullb)
if (nullb->dev->discard == false)
return;
if (!nullb->dev->memory_backed) {
nullb->dev->discard = false;
pr_info("discard option is ignored without memory backing\n");
return;
}
if (nullb->dev->zoned) {
nullb->dev->discard = false;
pr_info("discard option is ignored in zoned mode\n");