1

igc: Add lock to safeguard global Qbv variables

Access to shared variables through hrtimer requires locking in order
to protect the variables because actions to write into these variables
(oper_gate_closed, admin_gate_closed, and qbv_transition) might potentially
occur simultaneously. This patch provides a locking mechanisms to avoid
such scenarios.

Fixes: 175c241288 ("igc: Fix TX Hang issue when QBV Gate is closed")
Suggested-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
Tested-by: Naama Meir <naamax.meir@linux.intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20230807205129.3129346-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Muhammad Husaini Zulkifli 2023-08-07 13:51:29 -07:00 committed by Jakub Kicinski
parent b9077ef4c1
commit 06b412589e
2 changed files with 36 additions and 2 deletions

View File

@ -195,6 +195,10 @@ struct igc_adapter {
u32 qbv_config_change_errors;
bool qbv_transition;
unsigned int qbv_count;
/* Access to oper_gate_closed, admin_gate_closed and qbv_transition
* are protected by the qbv_tx_lock.
*/
spinlock_t qbv_tx_lock;
/* OS defined structs */
struct pci_dev *pdev;

View File

@ -4801,6 +4801,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
adapter->nfc_rule_count = 0;
spin_lock_init(&adapter->stats64_lock);
spin_lock_init(&adapter->qbv_tx_lock);
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGC_FLAG_HAS_MSIX;
@ -6119,15 +6120,15 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
{
unsigned long flags;
int i;
adapter->base_time = 0;
adapter->cycle_time = NSEC_PER_SEC;
adapter->taprio_offload_enable = false;
adapter->qbv_config_change_errors = 0;
adapter->qbv_transition = false;
adapter->qbv_count = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
@ -6136,10 +6137,28 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
adapter->qbv_transition = false;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->oper_gate_closed = false;
ring->admin_gate_closed = false;
}
spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
return 0;
}
static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
{
igc_qbv_clear_schedule(adapter);
return 0;
}
@ -6150,6 +6169,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
struct timespec64 now;
unsigned long flags;
size_t n;
int i;
@ -6217,6 +6237,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
start_time += e->interval;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
/* Check whether a queue gets configured.
* If not, set the start and end time to be end time.
*/
@ -6241,6 +6263,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
}
}
spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
struct net_device *dev = adapter->netdev;
@ -6619,8 +6643,11 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
{
struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
hrtimer);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
adapter->qbv_transition = true;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *tx_ring = adapter->tx_ring[i];
@ -6633,6 +6660,9 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
}
}
adapter->qbv_transition = false;
spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
return HRTIMER_NORESTART;
}