dmaengine fixes for v6.12
Driver fixes for: - TI driver fix to set EOP for cyclic BCDMA transfers - sh rz-dmac driver fix for handling config with zero address -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmcnoW0ACgkQfBQHDyUj g0c+Gw/+OE81N3SLjd5AhCvMNcXtI/z5fwmzajcwabEuySiBeYUluaTv+hm/Cn44 JJv5HieY5EbBPOx6MzR0MaiwwKnKqMIrgHH21T0+UR2hclmoyw0xOpvoDsMYMNE3 8nf451KRmG2KRHeHV7H8JqF6i7OBhkZaO4KUypTpmViB/3NX2e4LVT739Zn/+9gA ianWA3ldFUf9w2hqhEH/p8nx6v9AFNQFEuG91IzdkANMDy7GBqKo6C/PeJQrpZRX O3bhYlyATJVraQwzoWLOC2D5sSiFbcKmGl2WXd9gY6p74NCd3+oaCo4XLAhPRbGF LGUk5xwJ5mdz9vd0X3r0/iMjk/Yvkb6Wc3mjuxeAYMIC0p8Xa5BGvyPRmc5VnvaP Gpd5aWoEqlIiIM7c699OgHD0l7WnXLbS091JnBhAwiNwaX8IlhuIX18qwyJvcMHN U/DELpVRPa47kohoWxsD7egGBmx8K5nO2nPMnSPA1feit9RBR82W4NGZhk5aywBo v7IiVTTP5N07arKcYtUMNvrM3InnmH/mmJ0Xdrni2RpNPgNATB/z6qcCaRFLejel nsEwSCOusdkDAsIM7y5moZd4UecppwbrJZdOVvR6Co8ncyCE9eo3wUC9hp5coOD6 N7A/Znyy+STntOMj6sxJF6zpV2UNFzGwxIkH2sIuBUpbGcjMnck= =Ngtc -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: - TI driver fix to set EOP for cyclic BCDMA transfers - sh rz-dmac driver fix for handling config with zero address * tag 'dmaengine-fix-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: ti: k3-udma: Set EOP for all TRs in cyclic BCDMA transfer dmaengine: sh: rz-dmac: handle configs where one address is zero
This commit is contained in:
commit
e8529dcb12
@ -601,22 +601,25 @@ static int rz_dmac_config(struct dma_chan *chan,
|
|||||||
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
|
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
channel->src_per_address = config->src_addr;
|
|
||||||
channel->dst_per_address = config->dst_addr;
|
channel->dst_per_address = config->dst_addr;
|
||||||
|
|
||||||
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
|
|
||||||
if (val == CHCFG_DS_INVALID)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
|
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
|
||||||
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
|
if (channel->dst_per_address) {
|
||||||
|
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
|
||||||
|
if (val == CHCFG_DS_INVALID)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
|
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
|
||||||
if (val == CHCFG_DS_INVALID)
|
}
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
|
channel->src_per_address = config->src_addr;
|
||||||
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
|
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
|
||||||
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
|
if (channel->src_per_address) {
|
||||||
|
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
|
||||||
|
if (val == CHCFG_DS_INVALID)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
|
|||||||
|
|
||||||
d->static_tr.elcnt = elcnt;
|
d->static_tr.elcnt = elcnt;
|
||||||
|
|
||||||
/*
|
|
||||||
* PDMA must to close the packet when the channel is in packet mode.
|
|
||||||
* For TR mode when the channel is not cyclic we also need PDMA to close
|
|
||||||
* the packet otherwise the transfer will stall because PDMA holds on
|
|
||||||
* the data it has received from the peripheral.
|
|
||||||
*/
|
|
||||||
if (uc->config.pkt_mode || !uc->cyclic) {
|
if (uc->config.pkt_mode || !uc->cyclic) {
|
||||||
|
/*
|
||||||
|
* PDMA must close the packet when the channel is in packet mode.
|
||||||
|
* For TR mode when the channel is not cyclic we also need PDMA
|
||||||
|
* to close the packet otherwise the transfer will stall because
|
||||||
|
* PDMA holds on the data it has received from the peripheral.
|
||||||
|
*/
|
||||||
unsigned int div = dev_width * elcnt;
|
unsigned int div = dev_width * elcnt;
|
||||||
|
|
||||||
if (uc->cyclic)
|
if (uc->cyclic)
|
||||||
d->static_tr.bstcnt = d->residue / d->sglen / div;
|
d->static_tr.bstcnt = d->residue / d->sglen / div;
|
||||||
else
|
else
|
||||||
d->static_tr.bstcnt = d->residue / div;
|
d->static_tr.bstcnt = d->residue / div;
|
||||||
|
} else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
|
||||||
|
uc->config.dir == DMA_DEV_TO_MEM &&
|
||||||
|
uc->cyclic) {
|
||||||
|
/*
|
||||||
|
* For cyclic mode with BCDMA we have to set EOP in each TR to
|
||||||
|
* prevent short packet errors seen on channel teardown. So the
|
||||||
|
* PDMA must close the packet after every TR transfer by setting
|
||||||
|
* burst count equal to the number of bytes transferred.
|
||||||
|
*/
|
||||||
|
struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
|
||||||
|
|
||||||
if (uc->config.dir == DMA_DEV_TO_MEM &&
|
d->static_tr.bstcnt =
|
||||||
d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
|
(tr_req->icnt0 * tr_req->icnt1) / dev_width;
|
||||||
return -EINVAL;
|
|
||||||
} else {
|
} else {
|
||||||
d->static_tr.bstcnt = 0;
|
d->static_tr.bstcnt = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (uc->config.dir == DMA_DEV_TO_MEM &&
|
||||||
|
d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
/* static TR for remote PDMA */
|
/* static TR for remote PDMA */
|
||||||
if (udma_configure_statictr(uc, d, dev_width, burst)) {
|
if (udma_configure_statictr(uc, d, dev_width, burst)) {
|
||||||
dev_err(uc->ud->dev,
|
dev_err(uc->ud->dev,
|
||||||
"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
|
"%s: StaticTR Z is limited to maximum %u (%u)\n",
|
||||||
__func__, d->static_tr.bstcnt);
|
__func__, uc->ud->match_data->statictr_z_mask,
|
||||||
|
d->static_tr.bstcnt);
|
||||||
|
|
||||||
udma_free_hwdesc(uc, d);
|
udma_free_hwdesc(uc, d);
|
||||||
kfree(d);
|
kfree(d);
|
||||||
@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
|
|||||||
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
|
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int num_tr;
|
int num_tr;
|
||||||
|
u32 period_csf = 0;
|
||||||
|
|
||||||
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
|
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
|
||||||
&tr0_cnt1, &tr1_cnt0);
|
&tr0_cnt1, &tr1_cnt0);
|
||||||
@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
|
|||||||
period_addr = buf_addr |
|
period_addr = buf_addr |
|
||||||
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
|
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
|
||||||
|
* last TR of a descriptor, to mark the packet as complete.
|
||||||
|
* This is required for getting the teardown completion message in case
|
||||||
|
* of TX, and to avoid short-packet error in case of RX.
|
||||||
|
*
|
||||||
|
* As we are in cyclic mode, we do not know which period might be the
|
||||||
|
* last one, so set the flag for each period.
|
||||||
|
*/
|
||||||
|
if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
|
||||||
|
uc->ud->match_data->type == DMA_TYPE_BCDMA) {
|
||||||
|
period_csf = CPPI5_TR_CSF_EOP;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < periods; i++) {
|
for (i = 0; i < periods; i++) {
|
||||||
int tr_idx = i * num_tr;
|
int tr_idx = i * num_tr;
|
||||||
|
|
||||||
@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!(flags & DMA_PREP_INTERRUPT))
|
if (!(flags & DMA_PREP_INTERRUPT))
|
||||||
cppi5_tr_csf_set(&tr_req[tr_idx].flags,
|
period_csf |= CPPI5_TR_CSF_SUPR_EVT;
|
||||||
CPPI5_TR_CSF_SUPR_EVT);
|
|
||||||
|
if (period_csf)
|
||||||
|
cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
|
||||||
|
|
||||||
period_addr += period_len;
|
period_addr += period_len;
|
||||||
}
|
}
|
||||||
@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
|||||||
/* static TR for remote PDMA */
|
/* static TR for remote PDMA */
|
||||||
if (udma_configure_statictr(uc, d, dev_width, burst)) {
|
if (udma_configure_statictr(uc, d, dev_width, burst)) {
|
||||||
dev_err(uc->ud->dev,
|
dev_err(uc->ud->dev,
|
||||||
"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
|
"%s: StaticTR Z is limited to maximum %u (%u)\n",
|
||||||
__func__, d->static_tr.bstcnt);
|
__func__, uc->ud->match_data->statictr_z_mask,
|
||||||
|
d->static_tr.bstcnt);
|
||||||
|
|
||||||
udma_free_hwdesc(uc, d);
|
udma_free_hwdesc(uc, d);
|
||||||
kfree(d);
|
kfree(d);
|
||||||
|
Loading…
Reference in New Issue
Block a user