wifi: mt76: fix tx packet loss when scanning on DBDC
When queueing packets, only the MT76_RESET flag of the primary PHY is checked. If the primary PHY is scanning or changing channels, this can lead to packet loss for tx on the second PHY. Fix this by passing the phy to the .tx_queue_skb op and using it to check the correct flag. Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
7f819a2f4f
commit
5d581c3323
@ -532,7 +532,7 @@ error:
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
|
||||
enum mt76_txq_id qid, struct sk_buff *skb,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
|
||||
{
|
||||
@ -542,6 +542,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
struct mt76_dev *dev = phy->dev;
|
||||
struct ieee80211_hw *hw;
|
||||
int len, n = 0, ret = -ENOMEM;
|
||||
struct mt76_txwi_cache *t;
|
||||
@ -549,7 +550,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
dma_addr_t addr;
|
||||
u8 *txwi;
|
||||
|
||||
if (test_bit(MT76_RESET, &dev->phy.state))
|
||||
if (test_bit(MT76_RESET, &phy->state))
|
||||
goto free_skb;
|
||||
|
||||
t = mt76_get_txwi(dev);
|
||||
|
@ -256,7 +256,7 @@ struct mt76_queue_ops {
|
||||
int idx, int n_desc, int bufsize,
|
||||
u32 ring_base);
|
||||
|
||||
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q,
|
||||
enum mt76_txq_id qid, struct sk_buff *skb,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta);
|
||||
|
||||
@ -1127,7 +1127,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
|
||||
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
|
||||
|
@ -514,13 +514,14 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
|
||||
}
|
||||
|
||||
static int
|
||||
mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
|
||||
enum mt76_txq_id qid, struct sk_buff *skb,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
struct mt76_dev *dev = phy->dev;
|
||||
int err, len = skb->len;
|
||||
u16 idx = q->head;
|
||||
|
||||
|
@ -53,7 +53,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
|
||||
q->queued < q->ndesc / 2) {
|
||||
int ret;
|
||||
|
||||
ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb),
|
||||
ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb),
|
||||
wcid, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
@ -308,7 +308,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
|
||||
int idx;
|
||||
|
||||
non_aql = !info->tx_time_est;
|
||||
idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
|
||||
idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
|
||||
if (idx < 0 || !sta)
|
||||
return idx;
|
||||
|
||||
|
@ -850,13 +850,14 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
|
||||
enum mt76_txq_id qid, struct sk_buff *skb,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
struct mt76_dev *dev = phy->dev;
|
||||
u16 idx = q->head;
|
||||
int err;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user