1

dma-mapping: call ->unmap_page and ->unmap_sg unconditionally

Almost all instances of the dma_map_ops ->map_page()/map_sg() methods
implement ->unmap_page()/unmap_sg() too.  The once instance which doesn't
dma_dummy_ops which is used to fail the DMA mapping and thus there won't
be any calls to ->unmap_page()/unmap_sg().

Remove the checks for ->unmap_page()/unmap_sg() and call them directly to
create an interface that is symmetrical to ->map_page()/map_sg().

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Leon Romanovsky 2024-07-24 21:04:48 +03:00 committed by Christoph Hellwig
parent 3be9b84689
commit f69e342eec
2 changed files with 23 additions and 2 deletions

View File

@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
{
return DMA_MAPPING_ERROR;
}
static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
/*
* Dummy ops doesn't support map_page, so unmap_page should never be
* called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
return -EINVAL;
}
static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/*
* Dummy ops doesn't support map_sg, so unmap_sg should never be called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_supported(struct device *hwdev, u64 mask)
{
return 0;
@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask)
const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap,
.map_page = dma_dummy_map_page,
.unmap_page = dma_dummy_unmap_page,
.map_sg = dma_dummy_map_sg,
.unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported,
};

View File

@ -177,7 +177,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page)
else
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir);
}
@ -291,7 +291,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg)
else
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
EXPORT_SYMBOL(dma_unmap_sg_attrs);