1

block: change rq_integrity_vec to respect the iterator

If we allocate a bio that is larger than NVMe maximum request size,
attach integrity metadata to it and send it to the NVMe subsystem, the
integrity metadata will be corrupted.

Splitting the bio works correctly. The function bio_split will clone the
bio, trim the iterator of the first bio and advance the iterator of the
second bio.

However, the function rq_integrity_vec has a bug - it returns the first
vector of the bio's metadata and completely disregards the metadata
iterator that was advanced when the bio was split. Thus, the second bio
uses the same metadata as the first bio and this leads to metadata
corruption.

This commit changes rq_integrity_vec, so that it calls mp_bvec_iter_bvec
instead of returning the first vector. mp_bvec_iter_bvec reads the
iterator and uses it to build a bvec for the current position in the
iterator.

The "queue_max_integrity_segments(rq->q) > 1" check was removed, because
the updated rq_integrity_vec function works correctly with multiple
segments.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/49d1afaa-f934-6ed2-a678-e0d428c63a65@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Mikulas Patocka 2024-05-27 17:40:10 +02:00 committed by Jens Axboe
parent 44348870de
commit cf546dd289
2 changed files with 10 additions and 10 deletions

View File

@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd) struct nvme_command *cmnd)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct bio_vec bv = rq_integrity_vec(req);
iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->meta_dma)) if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR; return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
@ -967,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma, dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); rq_integrity_vec(req).bv_len, rq_dma_dir(req));
} }
if (blk_rq_nr_phys_segments(req)) if (blk_rq_nr_phys_segments(req))

View File

@ -90,14 +90,13 @@ static inline bool blk_integrity_rq(struct request *rq)
} }
/* /*
* Return the first bvec that contains integrity data. Only drivers that are * Return the current bvec that contains the integrity data. bip_iter may be
* limited to a single integrity segment should use this helper. * advanced to iterate over the integrity data.
*/ */
static inline struct bio_vec *rq_integrity_vec(struct request *rq) static inline struct bio_vec rq_integrity_vec(struct request *rq)
{ {
if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
return NULL; rq->bio->bi_integrity->bip_iter);
return rq->bio->bi_integrity->bip_vec;
} }
#else /* CONFIG_BLK_DEV_INTEGRITY */ #else /* CONFIG_BLK_DEV_INTEGRITY */
static inline int blk_rq_count_integrity_sg(struct request_queue *q, static inline int blk_rq_count_integrity_sg(struct request_queue *q,
@ -148,7 +147,8 @@ static inline int blk_integrity_rq(struct request *rq)
static inline struct bio_vec *rq_integrity_vec(struct request *rq) static inline struct bio_vec *rq_integrity_vec(struct request *rq)
{ {
return NULL; /* the optimizer will remove all calls to this function */
return (struct bio_vec){ };
} }
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */