1

dmaengine: ti: k3-udma-glue: Add function to get device pointer for DMA API

Glue layer users should use the device of the DMA for DMA mapping and
allocations as it is the DMA which accesses to descriptors and buffers,
not the clients

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Link: https://lore.kernel.org/r/20201208090440.31792-5-peter.ujfalusi@ti.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Peter Ujfalusi 2020-12-08 11:04:24 +02:00 committed by Vinod Koul
parent 1609c15a20
commit 426506a7e0
4 changed files with 25 additions and 0 deletions

View File

@ -493,6 +493,13 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
{
return xudma_get_device(tx_chn->common.udmax);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
@ -1201,3 +1208,10 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
{
return xudma_get_device(rx_chn->common.udmax);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);

View File

@ -50,6 +50,12 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
EXPORT_SYMBOL(of_xudma_dev_get);
struct device *xudma_get_device(struct udma_dev *ud)
{
return ud->dev;
}
EXPORT_SYMBOL(xudma_get_device);
u32 xudma_dev_get_psil_base(struct udma_dev *ud)
{
return ud->psil_base;

View File

@ -112,6 +112,7 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
u32 dst_thread);
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);

View File

@ -41,6 +41,8 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
enum {
K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
@ -130,5 +132,7 @@ int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
#endif /* K3_UDMA_GLUE_H_ */