dma-mapping: Simplify arch_setup_dma_ops()
The dma_base, size and iommu arguments are only used by ARM, and can now easily be deduced from the device itself, so there's no need to pass them through the callchain as well. Acked-by: Rob Herring <robh@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Michael Kelley <mhklinux@outlook.com> # For Hyper-V Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Tested-by: Hanjun Guo <guohanjun@huawei.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/5291c2326eab405b1aa7693aa964e8d3cb7193de.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
b67483b3c4
commit
f091e93306
@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
|||||||
/*
|
/*
|
||||||
* Plug in direct dma map ops.
|
* Plug in direct dma map ops.
|
||||||
*/
|
*/
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* IOC hardware snoops all DMA traffic keeping the caches consistent
|
* IOC hardware snoops all DMA traffic keeping the caches consistent
|
||||||
|
@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||||
/*
|
/*
|
||||||
|
@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||||
|
|
||||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
struct dma_iommu_mapping *mapping;
|
struct dma_iommu_mapping *mapping;
|
||||||
|
u64 dma_base = 0, size = 1ULL << 32;
|
||||||
|
|
||||||
|
if (dev->dma_range_map) {
|
||||||
|
dma_base = dma_range_map_min(dev->dma_range_map);
|
||||||
|
size = dma_range_map_max(dev->dma_range_map) - dma_base;
|
||||||
|
}
|
||||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||||
if (IS_ERR(mapping)) {
|
if (IS_ERR(mapping)) {
|
||||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||||
@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
|||||||
|
|
||||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||||
|
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Due to legacy code that sets the ->dma_coherent flag from a bus
|
* Due to legacy code that sets the ->dma_coherent flag from a bus
|
||||||
@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (device_iommu_mapped(dev))
|
if (device_iommu_mapped(dev))
|
||||||
arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
|
arm_setup_iommu_dma_ops(dev);
|
||||||
|
|
||||||
xen_setup_dma_ops(dev);
|
xen_setup_dma_ops(dev);
|
||||||
dev->archdata.dma_ops_setup = true;
|
dev->archdata.dma_ops_setup = true;
|
||||||
|
@ -46,8 +46,7 @@ void arch_teardown_dma_ops(struct device *dev)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
int cls = cache_line_size_of_cpu();
|
int cls = cache_line_size_of_cpu();
|
||||||
|
|
||||||
|
@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
dev->dma_coherent = coherent;
|
dev->dma_coherent = coherent;
|
||||||
}
|
}
|
||||||
|
@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
|||||||
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
|
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
bool coherent)
|
|
||||||
{
|
{
|
||||||
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
|
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
|
||||||
TAINT_CPU_OUT_OF_SPEC,
|
TAINT_CPU_OUT_OF_SPEC,
|
||||||
|
@ -1675,12 +1675,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
|
|||||||
if (ret == -EPROBE_DEFER)
|
if (ret == -EPROBE_DEFER)
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
|
|
||||||
/*
|
arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
|
||||||
* Historically this routine doesn't fail driver probing due to errors
|
|
||||||
* in acpi_iommu_configure_id()
|
|
||||||
*/
|
|
||||||
|
|
||||||
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
|
|||||||
|
|
||||||
void hv_setup_dma_ops(struct device *dev, bool coherent)
|
void hv_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
{
|
{
|
||||||
/*
|
arch_setup_dma_ops(dev, coherent);
|
||||||
* Hyper-V does not offer a vIOMMU in the guest
|
|
||||||
* VM, so pass 0/NULL for the IOMMU settings
|
|
||||||
*/
|
|
||||||
arch_setup_dma_ops(dev, 0, 0, coherent);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
||||||
|
|
||||||
|
@ -95,7 +95,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
|||||||
{
|
{
|
||||||
const struct bus_dma_region *map = NULL;
|
const struct bus_dma_region *map = NULL;
|
||||||
struct device_node *bus_np;
|
struct device_node *bus_np;
|
||||||
u64 dma_start = 0;
|
|
||||||
u64 mask, end = 0;
|
u64 mask, end = 0;
|
||||||
bool coherent;
|
bool coherent;
|
||||||
int iommu_ret;
|
int iommu_ret;
|
||||||
@ -118,7 +117,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
|||||||
return ret == -ENODEV ? 0 : ret;
|
return ret == -ENODEV ? 0 : ret;
|
||||||
} else {
|
} else {
|
||||||
/* Determine the overall bounds of all DMA regions */
|
/* Determine the overall bounds of all DMA regions */
|
||||||
dma_start = dma_range_map_min(map);
|
|
||||||
end = dma_range_map_max(map);
|
end = dma_range_map_max(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
|||||||
} else
|
} else
|
||||||
dev_dbg(dev, "device is behind an iommu\n");
|
dev_dbg(dev, "device is behind an iommu\n");
|
||||||
|
|
||||||
arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent);
|
arch_setup_dma_ops(dev, coherent);
|
||||||
|
|
||||||
if (iommu_ret)
|
if (iommu_ret)
|
||||||
of_dma_set_restricted_buffer(dev, np);
|
of_dma_set_restricted_buffer(dev, np);
|
||||||
|
@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, bool coherent);
|
||||||
bool coherent);
|
|
||||||
#else
|
#else
|
||||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||||
u64 size, bool coherent)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
|
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
|
||||||
|
Loading…
Reference in New Issue
Block a user