Including fixes from can and netfilter.
Things are slowing down quite a bit, mostly driver fixes here. No known ongoing investigations. Current release - new code bugs: - eth: ti: am65-cpsw: - fix multi queue Rx on J7 - fix warning in am65_cpsw_nuss_remove_rx_chns() Previous releases - regressions: - mptcp: do not require admin perm to list endpoints, got missed in a refactoring - mptcp: use sock_kfree_s instead of kfree Previous releases - always broken: - sctp: properly validate chunk size in sctp_sf_ootb() fix OOB access - virtio_net: make RSS interact properly with queue number - can: mcp251xfd: mcp251xfd_get_tef_len(): fix length calculation - can: mcp251xfd: mcp251xfd_ring_alloc(): fix coalescing configuration when switching CAN modes Misc: - revert earlier hns3 fixes, they were ignoring IOMMU abstractions and need to be reworked - can: {cc770,sja1000}_isa: allow building on x86_64 Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmctJTUACgkQMUZtbf5S Irt2YQ/+LTGPXcn+KMRgDnocqoNbu9qrpFhGVO9iWCzG9iOni9NKIBqpBnUqth5S m7GlaR1sl/6f4Fdb1jEXdC3AmzrVK6EDV53k9ec73U0O5U/w+WMGILEfKMNbFWKh PYujJqTJHbwyOtyFNQPC9BMJexvKgJl7CQqztABcJtL6mPZqTFwMoL24JT6vfGlb vZYiyQmxezmnQq4Z/d4g3E/Qf6WO5MwGnKZMfbSJ5EnmFERE7IZwkZQcz4WEOwU8 DhlXEiO0A9u26J4DMUvNNWIzcQB9jE2J03S/kYcsBZZjYNYZYveIx6WuRmFJPYv8 GDeNXa/6m7rj5kbYQNm594X04pnkMsCQhuPPpLbvTK+EJjKmy1P2ulWa8REnjBdL ZwMJ1KLXS6iZYKnZbPjTipPQEkn6D5Vrrso5QHPnPCoZuJsMvtfCVGOWQ/LST+7D Fpfdo9XUZ49MKfGVTYLtOkhJoyLYbfT5lt4Y/2SFS2HyOQtNC8s8h1KZxxB+44uW N+bTO0U8BLtsTC/c9Hjc4K4m594mi0NSMMZ610jf4J60FrtrznKr7PACJ4siNRLj 3Sf5Clb0MGltolzrGsStksEXYm8u7tWPvQUGGy/HNPTnT7qwkX78ywpB4P4VtCym UDfmoWNMyWi/HJ6wlCzUIGXIolLN4cf3QpvqV2U5AHLDtynV8Ig= =iWv1 -----END PGP SIGNATURE----- Merge tag 'net-6.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from can and netfilter. Things are slowing down quite a bit, mostly driver fixes here. No known ongoing investigations. Current release - new code bugs: - eth: ti: am65-cpsw: - fix multi queue Rx on J7 - fix warning in am65_cpsw_nuss_remove_rx_chns() Previous releases - regressions: - mptcp: do not require admin perm to list endpoints, got missed in a refactoring - mptcp: use sock_kfree_s instead of kfree Previous releases - always broken: - sctp: properly validate chunk size in sctp_sf_ootb() fix OOB access - virtio_net: make RSS interact properly with queue number - can: mcp251xfd: mcp251xfd_get_tef_len(): fix length calculation - can: mcp251xfd: mcp251xfd_ring_alloc(): fix coalescing configuration when switching CAN modes Misc: - revert earlier hns3 fixes, they were ignoring IOMMU abstractions and need to be reworked - can: {cc770,sja1000}_isa: allow building on x86_64" * tag 'net-6.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (42 commits) drivers: net: ionic: add missed debugfs cleanup to ionic_probe() error path net/smc: do not leave a dangling sk pointer in __smc_create() rxrpc: Fix missing locking causing hanging calls net/smc: Fix lookup of netdev by using ib_device_get_netdev() net: arc: rockchip: fix emac mdio node support net: arc: fix the device for dma_map_single/dma_unmap_single virtio_net: Update rss when set queue virtio_net: Sync rss config to device when virtnet_probe virtio_net: Add hash_key_length check virtio_net: Support dynamic rss indirection table size netfilter: nf_tables: wait for rcu grace period on net_device removal net: stmmac: Fix unbalanced IRQ wake disable warning on single irq case net: vertexcom: mse102x: Fix possible double free of TX skb mptcp: use sock_kfree_s instead of kfree mptcp: no admin perm to list endpoints net: phy: ti: add PHY_RST_AFTER_CLK_EN flag net: ethernet: ti: am65-cpsw: fix warning in am65_cpsw_nuss_remove_rx_chns() net: ethernet: ti: am65-cpsw: Fix multi queue Rx on J7 net: hns3: fix kernel crash when uninstalling driver Revert "Merge branch 'there-are-some-bugfix-for-the-hns3-ethernet-driver'" ...
This commit is contained in:
commit
bfc64d9b7e
4
CREDITS
4
CREDITS
@ -1204,6 +1204,10 @@ S: Dreisbachstrasse 24
|
||||
S: D-57250 Netphen
|
||||
S: Germany
|
||||
|
||||
N: Florian Fainelli
|
||||
E: f.fainelli@gmail.com
|
||||
D: DSA
|
||||
|
||||
N: Rik Faith
|
||||
E: faith@acm.org
|
||||
D: Future Domain TMC-16x0 SCSI driver (author)
|
||||
|
@ -61,7 +61,7 @@ properties:
|
||||
- gmii
|
||||
- rgmii
|
||||
- sgmii
|
||||
- 1000BaseX
|
||||
- 1000base-x
|
||||
|
||||
xlnx,phy-type:
|
||||
description:
|
||||
|
@ -293,7 +293,6 @@ operations:
|
||||
doc: Get endpoint information
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
do: &get-addr-attrs
|
||||
request:
|
||||
attributes:
|
||||
|
@ -121,7 +121,7 @@ format, the Group Extension is set in the PS-field.
|
||||
|
||||
On the other hand, when using PDU1 format, the PS-field contains a so-called
|
||||
Destination Address, which is _not_ part of the PGN. When communicating a PGN
|
||||
from user space to kernel (or vice versa) and PDU2 format is used, the PS-field
|
||||
from user space to kernel (or vice versa) and PDU1 format is used, the PS-field
|
||||
of the PGN shall be set to zero. The Destination Address shall be set
|
||||
elsewhere.
|
||||
|
||||
|
@ -16083,7 +16083,6 @@ F: drivers/net/wireless/
|
||||
|
||||
NETWORKING [DSA]
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
M: Vladimir Oltean <olteanv@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/dsa/
|
||||
|
@ -1011,7 +1011,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
|
||||
|
||||
/* common for all type of bus errors */
|
||||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
/* propagate the error condition to the CAN stack */
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
@ -1027,26 +1026,32 @@ static int c_can_handle_bus_err(struct net_device *dev,
|
||||
case LEC_STUFF_ERROR:
|
||||
netdev_dbg(dev, "stuff error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
case LEC_FORM_ERROR:
|
||||
netdev_dbg(dev, "form error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
case LEC_ACK_ERROR:
|
||||
netdev_dbg(dev, "ack error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_BIT1_ERROR:
|
||||
netdev_dbg(dev, "bit1 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_BIT0_ERROR:
|
||||
netdev_dbg(dev, "bit0 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_CRC_ERROR:
|
||||
netdev_dbg(dev, "CRC error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -7,7 +7,7 @@ if CAN_CC770
|
||||
|
||||
config CAN_CC770_ISA
|
||||
tristate "ISA Bus based legacy CC770 driver"
|
||||
depends on ISA
|
||||
depends on HAS_IOPORT
|
||||
help
|
||||
This driver adds legacy support for CC770 and AN82527 chips
|
||||
connected to the ISA bus using I/O port, memory mapped or
|
||||
|
@ -1765,7 +1765,8 @@ static int m_can_close(struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
m_can_stop(dev);
|
||||
free_irq(dev->irq, dev);
|
||||
if (dev->irq)
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
m_can_clean(dev);
|
||||
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
config CAN_ROCKCHIP_CANFD
|
||||
tristate "Rockchip CAN-FD controller"
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on OF
|
||||
depends on ARCH_ROCKCHIP || COMPILE_TEST
|
||||
select CAN_RX_OFFLOAD
|
||||
help
|
||||
Say Y here if you want to use CAN-FD controller found on
|
||||
|
@ -87,7 +87,7 @@ config CAN_PLX_PCI
|
||||
|
||||
config CAN_SJA1000_ISA
|
||||
tristate "ISA Bus based legacy SJA1000 driver"
|
||||
depends on ISA
|
||||
depends on HAS_IOPORT
|
||||
help
|
||||
This driver adds legacy support for SJA1000 chips connected to
|
||||
the ISA bus using I/O port, memory mapped or indirect access.
|
||||
|
@ -2,7 +2,7 @@
|
||||
//
|
||||
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
|
||||
//
|
||||
// Copyright (c) 2019, 2020, 2021 Pengutronix,
|
||||
// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
|
||||
// Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
//
|
||||
// Based on:
|
||||
@ -483,9 +483,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
|
||||
};
|
||||
const struct ethtool_coalesce ec = {
|
||||
.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
|
||||
.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
|
||||
.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
|
||||
1 : priv->rx_obj_num_coalesce_irq,
|
||||
.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
|
||||
.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
|
||||
.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
|
||||
1 : priv->tx_obj_num_coalesce_irq,
|
||||
};
|
||||
struct can_ram_layout layout;
|
||||
|
||||
|
@ -16,9 +16,9 @@
|
||||
|
||||
#include "mcp251xfd.h"
|
||||
|
||||
static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
|
||||
static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
|
||||
{
|
||||
return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
|
||||
return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -122,7 +122,11 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
|
||||
/* If the chip says the TX-FIFO is empty, but there are no TX
|
||||
* buffers free in the ring, we assume all have been sent.
|
||||
*/
|
||||
if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) &&
|
||||
mcp251xfd_get_tx_free(tx_ring) == 0) {
|
||||
*len_p = tx_ring->obj_num;
|
||||
return 0;
|
||||
}
|
||||
|
@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
struct device *dev = ndev->dev.parent;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TX_BD_NUM; i++) {
|
||||
@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
|
||||
stats->tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
|
||||
dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr),
|
||||
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
|
||||
|
||||
/* return the sk_buff to system */
|
||||
@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
|
||||
static int arc_emac_rx(struct net_device *ndev, int budget)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct device *dev = ndev->dev.parent;
|
||||
unsigned int work_done;
|
||||
|
||||
for (work_done = 0; work_done < budget; work_done++) {
|
||||
@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
|
||||
continue;
|
||||
}
|
||||
|
||||
addr = dma_map_single(&ndev->dev, (void *)skb->data,
|
||||
addr = dma_map_single(dev, (void *)skb->data,
|
||||
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, addr)) {
|
||||
if (dma_mapping_error(dev, addr)) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(ndev, "cannot map dma buffer\n");
|
||||
dev_kfree_skb(skb);
|
||||
@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
|
||||
}
|
||||
|
||||
/* unmap previosly mapped skb */
|
||||
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
|
||||
dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr),
|
||||
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
|
||||
|
||||
pktlen = info & LEN_MASK;
|
||||
@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct phy_device *phy_dev = ndev->phydev;
|
||||
struct device *dev = ndev->dev.parent;
|
||||
int i;
|
||||
|
||||
phy_dev->autoneg = AUTONEG_ENABLE;
|
||||
@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
|
||||
if (unlikely(!rx_buff->skb))
|
||||
return -ENOMEM;
|
||||
|
||||
addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
|
||||
addr = dma_map_single(dev, (void *)rx_buff->skb->data,
|
||||
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, addr)) {
|
||||
if (dma_mapping_error(dev, addr)) {
|
||||
netdev_err(ndev, "cannot dma map\n");
|
||||
dev_kfree_skb(rx_buff->skb);
|
||||
return -ENOMEM;
|
||||
@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
|
||||
static void arc_free_tx_queue(struct net_device *ndev)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct device *dev = ndev->dev.parent;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TX_BD_NUM; i++) {
|
||||
@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
|
||||
struct buffer_state *tx_buff = &priv->tx_buff[i];
|
||||
|
||||
if (tx_buff->skb) {
|
||||
dma_unmap_single(&ndev->dev,
|
||||
dma_unmap_single(dev,
|
||||
dma_unmap_addr(tx_buff, addr),
|
||||
dma_unmap_len(tx_buff, len),
|
||||
DMA_TO_DEVICE);
|
||||
@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
|
||||
static void arc_free_rx_queue(struct net_device *ndev)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct device *dev = ndev->dev.parent;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < RX_BD_NUM; i++) {
|
||||
@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev)
|
||||
struct buffer_state *rx_buff = &priv->rx_buff[i];
|
||||
|
||||
if (rx_buff->skb) {
|
||||
dma_unmap_single(&ndev->dev,
|
||||
dma_unmap_single(dev,
|
||||
dma_unmap_addr(rx_buff, addr),
|
||||
dma_unmap_len(rx_buff, len),
|
||||
DMA_FROM_DEVICE);
|
||||
@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
unsigned int len, *txbd_curr = &priv->txbd_curr;
|
||||
struct net_device_stats *stats = &ndev->stats;
|
||||
__le32 *info = &priv->txbd[*txbd_curr].info;
|
||||
struct device *dev = ndev->dev.parent;
|
||||
dma_addr_t addr;
|
||||
|
||||
if (skb_padto(skb, ETH_ZLEN))
|
||||
@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
|
||||
DMA_TO_DEVICE);
|
||||
addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
|
||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
||||
stats->tx_dropped++;
|
||||
stats->tx_errors++;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
||||
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
|
||||
struct device_node *np = priv->dev->of_node;
|
||||
const char *name = "Synopsys MII Bus";
|
||||
struct device_node *mdio_node;
|
||||
struct mii_bus *bus;
|
||||
int error;
|
||||
|
||||
@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
||||
|
||||
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
|
||||
|
||||
error = of_mdiobus_register(bus, priv->dev->of_node);
|
||||
/* Backwards compatibility for EMAC nodes without MDIO subnode. */
|
||||
mdio_node = of_get_child_by_name(np, "mdio");
|
||||
if (!mdio_node)
|
||||
mdio_node = of_node_get(np);
|
||||
|
||||
error = of_mdiobus_register(bus, mdio_node);
|
||||
of_node_put(mdio_node);
|
||||
if (error) {
|
||||
mdiobus_free(bus);
|
||||
return dev_err_probe(priv->dev, error,
|
||||
|
@ -56,7 +56,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
|
||||
__entry->fd_format = qm_fd_get_format(fd);
|
||||
__entry->fd_offset = qm_fd_get_offset(fd);
|
||||
__entry->fd_length = qm_fd_get_length(fd);
|
||||
__entry->fd_status = fd->status;
|
||||
__entry->fd_status = __be32_to_cpu(fd->status);
|
||||
__assign_str(name);
|
||||
),
|
||||
|
||||
|
@ -665,19 +665,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
|
||||
if (!num_vfs) {
|
||||
enetc_msg_psi_free(pf);
|
||||
kfree(pf->vf_state);
|
||||
pf->num_vfs = 0;
|
||||
pci_disable_sriov(pdev);
|
||||
} else {
|
||||
pf->num_vfs = num_vfs;
|
||||
|
||||
pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
|
||||
GFP_KERNEL);
|
||||
if (!pf->vf_state) {
|
||||
pf->num_vfs = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = enetc_msg_psi_init(pf);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
|
||||
@ -696,7 +688,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
err_en_sriov:
|
||||
enetc_msg_psi_free(pf);
|
||||
err_msg_psi:
|
||||
kfree(pf->vf_state);
|
||||
pf->num_vfs = 0;
|
||||
|
||||
return err;
|
||||
@ -1286,6 +1277,12 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
pf = enetc_si_priv(si);
|
||||
pf->si = si;
|
||||
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
|
||||
if (pf->total_vfs) {
|
||||
pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
|
||||
GFP_KERNEL);
|
||||
if (!pf->vf_state)
|
||||
goto err_alloc_vf_state;
|
||||
}
|
||||
|
||||
err = enetc_setup_mac_addresses(node, pf);
|
||||
if (err)
|
||||
@ -1363,6 +1360,8 @@ err_alloc_si_res:
|
||||
free_netdev(ndev);
|
||||
err_alloc_netdev:
|
||||
err_setup_mac_addresses:
|
||||
kfree(pf->vf_state);
|
||||
err_alloc_vf_state:
|
||||
enetc_psi_destroy(pdev);
|
||||
err_psi_create:
|
||||
return err;
|
||||
@ -1389,6 +1388,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
|
||||
enetc_free_si_resources(priv);
|
||||
|
||||
free_netdev(si->ndev);
|
||||
kfree(pf->vf_state);
|
||||
|
||||
enetc_psi_destroy(pdev);
|
||||
}
|
||||
|
@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
|
||||
{
|
||||
struct enetc_ndev_priv *priv = netdev_priv(ndev);
|
||||
struct sockaddr *saddr = addr;
|
||||
int err;
|
||||
|
||||
if (!is_valid_ether_addr(saddr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
|
||||
err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
eth_hw_addr_set(ndev, saddr->sa_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enetc_vf_set_features(struct net_device *ndev,
|
||||
|
@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
|
||||
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
if (!pci_id)
|
||||
continue;
|
||||
if (IS_ENABLED(CONFIG_PCI_IOV))
|
||||
if (IS_ENABLED(CONFIG_PCI_IOV)) {
|
||||
device_lock(&ae_dev->pdev->dev);
|
||||
pci_disable_sriov(ae_dev->pdev);
|
||||
device_unlock(&ae_dev->pdev->dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
|
||||
|
@ -1293,10 +1293,8 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
|
||||
|
||||
/* save the buffer addr until the last read operation */
|
||||
*save_buf = read_buf;
|
||||
}
|
||||
|
||||
/* get data ready for the first time to read */
|
||||
if (!*ppos) {
|
||||
/* get data ready for the first time to read */
|
||||
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
|
||||
read_buf, hns3_dbg_cmd[index].buf_len);
|
||||
if (ret)
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -381,24 +380,6 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
|
||||
#define HNS3_INVALID_PTYPE \
|
||||
ARRAY_SIZE(hns3_rx_ptype_tbl)
|
||||
|
||||
static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_iotlb_gather iotlb_gather;
|
||||
size_t granule;
|
||||
|
||||
if (!domain || !iommu_is_dma_domain(domain))
|
||||
return;
|
||||
|
||||
granule = 1 << __ffs(domain->pgsize_bitmap);
|
||||
iova = ALIGN_DOWN(iova, granule);
|
||||
iotlb_gather.start = iova;
|
||||
iotlb_gather.end = iova + granule - 1;
|
||||
iotlb_gather.pgsize = granule;
|
||||
|
||||
iommu_iotlb_sync(domain, &iotlb_gather);
|
||||
}
|
||||
|
||||
static irqreturn_t hns3_irq_handle(int irq, void *vector)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vector = vector;
|
||||
@ -1051,8 +1032,6 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
|
||||
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
{
|
||||
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||
struct net_device *netdev = ring_to_netdev(ring);
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hns3_tx_spare *tx_spare;
|
||||
struct page *page;
|
||||
dma_addr_t dma;
|
||||
@ -1094,7 +1073,6 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
tx_spare->buf = page_address(page);
|
||||
tx_spare->len = PAGE_SIZE << order;
|
||||
ring->tx_spare = tx_spare;
|
||||
ring->tx_copybreak = priv->tx_copybreak;
|
||||
return;
|
||||
|
||||
dma_mapping_error:
|
||||
@ -1746,9 +1724,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int type)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hnae3_handle *handle = ring->tqp->handle;
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
unsigned int size;
|
||||
dma_addr_t dma;
|
||||
|
||||
@ -1780,13 +1756,6 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
|
||||
* prefetch
|
||||
*/
|
||||
ae_dev = hns3_get_ae_dev(handle);
|
||||
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||
hns3_dma_map_sync(dev, dma);
|
||||
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->length = size;
|
||||
desc_cb->dma = dma;
|
||||
@ -2483,6 +2452,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
netdev->features = features;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4898,30 +4868,6 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||
}
|
||||
|
||||
static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
|
||||
{
|
||||
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
|
||||
#define HNS3_MAX_PACKET_SIZE (64 * 1024)
|
||||
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
|
||||
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
|
||||
struct hnae3_handle *handle = priv->ae_handle;
|
||||
|
||||
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
|
||||
return;
|
||||
|
||||
if (!(domain && iommu_is_dma_domain(domain)))
|
||||
return;
|
||||
|
||||
priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
|
||||
priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
|
||||
|
||||
if (priv->tx_copybreak < priv->min_tx_copybreak)
|
||||
priv->tx_copybreak = priv->min_tx_copybreak;
|
||||
if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
|
||||
handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
|
||||
}
|
||||
|
||||
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
||||
unsigned int ring_type)
|
||||
{
|
||||
@ -5155,7 +5101,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
hns3_update_tx_spare_buf_config(priv);
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
ret = hns3_alloc_ring_memory(&priv->ring[i]);
|
||||
if (ret) {
|
||||
@ -5360,8 +5305,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
priv->ae_handle = handle;
|
||||
priv->tx_timeout_count = 0;
|
||||
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
|
||||
priv->min_tx_copybreak = 0;
|
||||
priv->min_tx_spare_buf_size = 0;
|
||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||
|
||||
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
||||
|
@ -596,8 +596,6 @@ struct hns3_nic_priv {
|
||||
struct hns3_enet_coalesce rx_coal;
|
||||
u32 tx_copybreak;
|
||||
u32 rx_copybreak;
|
||||
u32 min_tx_copybreak;
|
||||
u32 min_tx_spare_buf_size;
|
||||
};
|
||||
|
||||
union l3_hdr_info {
|
||||
|
@ -1933,31 +1933,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (copybreak < priv->min_tx_copybreak) {
|
||||
netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
|
||||
copybreak, priv->min_tx_copybreak);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (buf_size < priv->min_tx_spare_buf_size) {
|
||||
netdev_err(netdev,
|
||||
"tx spare buf size %u should be no less than %u!\n",
|
||||
buf_size, priv->min_tx_spare_buf_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_set_tunable(struct net_device *netdev,
|
||||
const struct ethtool_tunable *tuna,
|
||||
const void *data)
|
||||
@ -1974,10 +1949,6 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_TX_COPYBREAK:
|
||||
ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->tx_copybreak = *(u32 *)data;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||
@ -1992,10 +1963,6 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
break;
|
||||
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
|
||||
ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
||||
new_tx_spare_buf_size = *(u32 *)data;
|
||||
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
@ -3585,17 +3584,6 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_set_reset_pending(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
{
|
||||
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
|
||||
@ -3616,7 +3604,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
*/
|
||||
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
|
||||
hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
|
||||
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
hdev->rst_stats.imp_rst_cnt++;
|
||||
@ -3626,7 +3614,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
|
||||
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
hdev->rst_stats.global_rst_cnt++;
|
||||
return HCLGE_VECTOR0_EVENT_RST;
|
||||
@ -3781,7 +3769,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
|
||||
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
|
||||
HCLGE_NAME, pci_name(hdev->pdev));
|
||||
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
|
||||
IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
|
||||
0, hdev->misc_vector.name, hdev);
|
||||
if (ret) {
|
||||
hclge_free_vector(hdev, 0);
|
||||
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
|
||||
@ -4074,7 +4062,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
||||
case HNAE3_FUNC_RESET:
|
||||
dev_info(&pdev->dev, "PF reset requested\n");
|
||||
/* schedule again to check later */
|
||||
hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
|
||||
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
break;
|
||||
default:
|
||||
@ -4108,8 +4096,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET &&
|
||||
rst_level < hdev->reset_type)
|
||||
return HNAE3_NONE_RESET;
|
||||
@ -4251,7 +4237,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
|
||||
return false;
|
||||
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
|
||||
hdev->rst_stats.reset_fail_cnt++;
|
||||
hclge_set_reset_pending(hdev, hdev->reset_type);
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"re-schedule reset task(%u)\n",
|
||||
hdev->rst_stats.reset_fail_cnt);
|
||||
@ -4494,20 +4480,8 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
||||
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGE_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
|
||||
BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
|
||||
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -11917,6 +11891,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
hclge_init_rxd_adv_layout(hdev);
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
ret = hclge_init_wol(hdev);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev,
|
||||
@ -11929,10 +11906,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hclge_state_init(hdev);
|
||||
hdev->last_reset_time = jiffies;
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
enable_irq(hdev->misc_vector.vector_irq);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
@ -12338,7 +12311,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
/* Disable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, false);
|
||||
disable_irq(hdev->misc_vector.vector_irq);
|
||||
synchronize_irq(hdev->misc_vector.vector_irq);
|
||||
|
||||
/* Disable all hw interrupts */
|
||||
hclge_config_mac_tnl_int(hdev, false);
|
||||
|
@ -58,9 +58,6 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_ptp *ptp = hdev->ptp;
|
||||
|
||||
if (!ptp)
|
||||
return false;
|
||||
|
||||
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
|
||||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
|
||||
ptp->tx_skipped++;
|
||||
|
@ -510,9 +510,9 @@ out:
|
||||
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
struct hnae3_knic_private_info *kinfo)
|
||||
{
|
||||
#define HCLGE_RING_REG_OFFSET 0x200
|
||||
#define HCLGE_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_num;
|
||||
int data_num_sum;
|
||||
u32 *reg = data;
|
||||
@ -533,11 +533,10 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
reg_num = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
|
||||
tqp = kinfo->tqp[j];
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGE_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGE_RING_REG_OFFSET * j);
|
||||
}
|
||||
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
|
||||
|
||||
|
@ -1395,17 +1395,6 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_RESET_WAIT_US 20000
|
||||
@ -1555,7 +1544,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
hdev->rst_stats.rst_fail_cnt);
|
||||
|
||||
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_type);
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
|
||||
if (hclgevf_is_reset_pending(hdev)) {
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
@ -1675,8 +1664,6 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
@ -1686,15 +1673,14 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
|
||||
|
||||
if (hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclgevf_get_reset_level(&hdev->default_reset_request);
|
||||
else
|
||||
hdev->reset_level = HNAE3_VF_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
|
||||
hdev->reset_level);
|
||||
|
||||
/* reset of this VF requested */
|
||||
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -1705,20 +1691,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGEVF_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
|
||||
BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
|
||||
BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
|
||||
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -1875,14 +1849,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
*/
|
||||
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
|
||||
/* prepare for full reset of stack + pcie interface */
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
|
||||
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
|
||||
|
||||
/* "defer" schedule the reset task again */
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
} else {
|
||||
hdev->reset_attempts++;
|
||||
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_level);
|
||||
set_bit(hdev->reset_level, &hdev->reset_pending);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
}
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -2005,7 +1979,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"receive reset interrupt 0x%x!\n", rst_ing_reg);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
|
||||
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
|
||||
@ -2315,7 +2289,6 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
mutex_init(&hdev->mbx_resp.mbx_mutex);
|
||||
sema_init(&hdev->reset_sem, 1);
|
||||
@ -3015,6 +2988,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
|
||||
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
void *data)
|
||||
{
|
||||
#define HCLGEVF_RING_REG_OFFSET 0x200
|
||||
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_um;
|
||||
u32 *reg = data;
|
||||
|
||||
@ -147,11 +147,10 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
reg_um = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < hdev->num_tqps; j++) {
|
||||
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
|
||||
tqp = &hdev->htqp[j].q;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGEVF_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
*reg++ = hclgevf_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGEVF_RING_REG_OFFSET * j);
|
||||
}
|
||||
|
||||
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
|
||||
|
@ -1205,12 +1205,10 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (hw->mac.type != e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val) {
|
||||
e_dbg("Failed to force SMBUS: %d\n", ret_val);
|
||||
goto release;
|
||||
}
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val) {
|
||||
e_dbg("Failed to force SMBUS: %d\n", ret_val);
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
|
||||
@ -1273,13 +1271,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
||||
}
|
||||
|
||||
release:
|
||||
if (hw->mac.type == e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val)
|
||||
e_dbg("Failed to force SMBUS over MTL system: %d\n",
|
||||
ret_val);
|
||||
}
|
||||
|
||||
hw->phy.ops.release(hw);
|
||||
out:
|
||||
if (ret_val)
|
||||
|
@ -755,6 +755,7 @@ enum i40e_filter_state {
|
||||
I40E_FILTER_ACTIVE, /* Added to switch by FW */
|
||||
I40E_FILTER_FAILED, /* Rejected by FW */
|
||||
I40E_FILTER_REMOVE, /* To be removed */
|
||||
I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */
|
||||
/* There is no 'removed' state; the filter struct is freed */
|
||||
};
|
||||
struct i40e_mac_filter {
|
||||
|
@ -89,6 +89,7 @@ static char *i40e_filter_state_string[] = {
|
||||
"ACTIVE",
|
||||
"FAILED",
|
||||
"REMOVE",
|
||||
"NEW_SYNC",
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1255,6 +1255,7 @@ int i40e_count_filters(struct i40e_vsi *vsi)
|
||||
|
||||
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
|
||||
if (f->state == I40E_FILTER_NEW ||
|
||||
f->state == I40E_FILTER_NEW_SYNC ||
|
||||
f->state == I40E_FILTER_ACTIVE)
|
||||
++cnt;
|
||||
}
|
||||
@ -1441,6 +1442,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
|
||||
|
||||
new->f = add_head;
|
||||
new->state = add_head->state;
|
||||
if (add_head->state == I40E_FILTER_NEW)
|
||||
add_head->state = I40E_FILTER_NEW_SYNC;
|
||||
|
||||
/* Add the new filter to the tmp list */
|
||||
hlist_add_head(&new->hlist, tmp_add_list);
|
||||
@ -1550,6 +1553,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
|
||||
return -ENOMEM;
|
||||
new_mac->f = add_head;
|
||||
new_mac->state = add_head->state;
|
||||
if (add_head->state == I40E_FILTER_NEW)
|
||||
add_head->state = I40E_FILTER_NEW_SYNC;
|
||||
|
||||
/* Add the new filter to the tmp list */
|
||||
hlist_add_head(&new_mac->hlist, tmp_add_list);
|
||||
@ -2437,7 +2442,8 @@ static int
|
||||
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
|
||||
struct i40e_mac_filter *f)
|
||||
{
|
||||
bool enable = f->state == I40E_FILTER_NEW;
|
||||
bool enable = f->state == I40E_FILTER_NEW ||
|
||||
f->state == I40E_FILTER_NEW_SYNC;
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
int aq_ret;
|
||||
|
||||
@ -2611,6 +2617,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
|
||||
/* Add it to the hash list */
|
||||
hlist_add_head(&new->hlist, &tmp_add_list);
|
||||
f->state = I40E_FILTER_NEW_SYNC;
|
||||
}
|
||||
|
||||
/* Count the number of active (current and new) VLAN
|
||||
@ -2762,7 +2769,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
|
||||
/* Only update the state if we're still NEW */
|
||||
if (new->f->state == I40E_FILTER_NEW)
|
||||
if (new->f->state == I40E_FILTER_NEW ||
|
||||
new->f->state == I40E_FILTER_NEW_SYNC)
|
||||
new->f->state = new->state;
|
||||
hlist_del(&new->hlist);
|
||||
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
|
||||
|
@ -552,13 +552,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
|
||||
static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
|
||||
{
|
||||
ice_eswitch_stop_reprs(pf);
|
||||
repr->ops.rem(repr);
|
||||
|
||||
xa_erase(&pf->eswitch.reprs, repr->id);
|
||||
|
||||
if (xa_empty(&pf->eswitch.reprs))
|
||||
ice_eswitch_disable_switchdev(pf);
|
||||
|
||||
ice_eswitch_release_repr(pf, repr);
|
||||
repr->ops.rem(repr);
|
||||
ice_repr_destroy(repr);
|
||||
|
||||
if (xa_empty(&pf->eswitch.reprs)) {
|
||||
|
@ -1830,11 +1830,12 @@ static int
|
||||
ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
|
||||
struct ice_fdir_fltr *input)
|
||||
{
|
||||
u16 dest_vsi, q_index = 0;
|
||||
s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
|
||||
u16 orig_q_index = 0;
|
||||
struct ice_pf *pf;
|
||||
struct ice_hw *hw;
|
||||
int flow_type;
|
||||
u16 dest_vsi;
|
||||
u8 dest_ctl;
|
||||
|
||||
if (!vsi || !fsp || !input)
|
||||
|
@ -53,6 +53,8 @@
|
||||
*/
|
||||
#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
|
||||
|
||||
#define ICE_FDIR_NO_QUEUE_IDX -1
|
||||
|
||||
enum ice_fltr_prgm_desc_dest {
|
||||
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
|
||||
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
|
||||
@ -186,7 +188,7 @@ struct ice_fdir_fltr {
|
||||
u16 flex_fltr;
|
||||
|
||||
/* filter control */
|
||||
u16 q_index;
|
||||
s16 q_index;
|
||||
u16 orig_q_index;
|
||||
u16 dest_vsi;
|
||||
u8 dest_ctl;
|
||||
|
@ -141,6 +141,7 @@ enum idpf_vport_state {
|
||||
* @adapter: Adapter back pointer
|
||||
* @vport: Vport back pointer
|
||||
* @vport_id: Vport identifier
|
||||
* @link_speed_mbps: Link speed in mbps
|
||||
* @vport_idx: Relative vport index
|
||||
* @state: See enum idpf_vport_state
|
||||
* @netstats: Packet and byte stats
|
||||
@ -150,6 +151,7 @@ struct idpf_netdev_priv {
|
||||
struct idpf_adapter *adapter;
|
||||
struct idpf_vport *vport;
|
||||
u32 vport_id;
|
||||
u32 link_speed_mbps;
|
||||
u16 vport_idx;
|
||||
enum idpf_vport_state state;
|
||||
struct rtnl_link_stats64 netstats;
|
||||
@ -287,7 +289,6 @@ struct idpf_port_stats {
|
||||
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
|
||||
* @port_stats: per port csum, header split, and other offload stats
|
||||
* @link_up: True if link is up
|
||||
* @link_speed_mbps: Link speed in mbps
|
||||
* @sw_marker_wq: workqueue for marker packets
|
||||
*/
|
||||
struct idpf_vport {
|
||||
@ -331,7 +332,6 @@ struct idpf_vport {
|
||||
struct idpf_port_stats port_stats;
|
||||
|
||||
bool link_up;
|
||||
u32 link_speed_mbps;
|
||||
|
||||
wait_queue_head_t sw_marker_wq;
|
||||
};
|
||||
|
@ -1296,24 +1296,19 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data)
|
||||
static int idpf_get_link_ksettings(struct net_device *netdev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct idpf_vport *vport;
|
||||
|
||||
idpf_vport_ctrl_lock(netdev);
|
||||
vport = idpf_netdev_to_vport(netdev);
|
||||
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
||||
|
||||
ethtool_link_ksettings_zero_link_mode(cmd, supported);
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.port = PORT_NONE;
|
||||
if (vport->link_up) {
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
cmd->base.duplex = DUPLEX_FULL;
|
||||
cmd->base.speed = vport->link_speed_mbps;
|
||||
cmd->base.speed = np->link_speed_mbps;
|
||||
} else {
|
||||
cmd->base.duplex = DUPLEX_UNKNOWN;
|
||||
cmd->base.speed = SPEED_UNKNOWN;
|
||||
}
|
||||
|
||||
idpf_vport_ctrl_unlock(netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1786,6 +1786,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
|
||||
*/
|
||||
err = idpf_vc_core_init(adapter);
|
||||
if (err) {
|
||||
cancel_delayed_work_sync(&adapter->mbx_task);
|
||||
idpf_deinit_dflt_mbx(adapter);
|
||||
goto unlock_mutex;
|
||||
}
|
||||
@ -1860,7 +1861,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
|
||||
* mess with. Nothing below should use those variables from new_vport
|
||||
* and should instead always refer to them in vport if they need to.
|
||||
*/
|
||||
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
|
||||
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
|
||||
|
||||
/* Adjust resource parameters prior to reallocating resources */
|
||||
switch (reset_cause) {
|
||||
@ -1906,7 +1907,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
|
||||
/* Same comment as above regarding avoiding copying the wait_queues and
|
||||
* mutexes applies here. We do not want to mess with those if possible.
|
||||
*/
|
||||
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
|
||||
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
|
||||
|
||||
if (reset_cause == IDPF_SR_Q_CHANGE)
|
||||
idpf_vport_alloc_vec_indexes(vport);
|
||||
|
@ -141,7 +141,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
|
||||
}
|
||||
np = netdev_priv(vport->netdev);
|
||||
|
||||
vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
|
||||
np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
|
||||
|
||||
if (vport->link_up == v2e->link_status)
|
||||
return;
|
||||
@ -3063,7 +3063,6 @@ init_failed:
|
||||
adapter->state = __IDPF_VER_CHECK;
|
||||
if (adapter->vcxn_mngr)
|
||||
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
|
||||
idpf_deinit_dflt_mbx(adapter);
|
||||
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
|
||||
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
|
||||
msecs_to_jiffies(task_delay));
|
||||
|
@ -394,6 +394,7 @@ err_out_free_irqs:
|
||||
err_out_pci:
|
||||
ionic_dev_teardown(ionic);
|
||||
ionic_clear_pci(ionic);
|
||||
ionic_debugfs_del_dev(ionic);
|
||||
err_out:
|
||||
mutex_destroy(&ionic->dev_cmd_lock);
|
||||
ionic_devlink_free(ionic);
|
||||
|
@ -3780,6 +3780,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
|
||||
/* Request the Wake IRQ in case of another line
|
||||
* is used for WoL
|
||||
*/
|
||||
priv->wol_irq_disabled = true;
|
||||
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
|
||||
ret = request_irq(priv->wol_irq, stmmac_interrupt,
|
||||
IRQF_SHARED, dev->name, dev);
|
||||
|
@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
|
||||
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
|
||||
struct cppi5_host_desc_t *desc_rx;
|
||||
struct device *dev = common->dev;
|
||||
struct am65_cpsw_swdata *swdata;
|
||||
dma_addr_t desc_dma;
|
||||
dma_addr_t buf_dma;
|
||||
void *swdata;
|
||||
|
||||
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
|
||||
if (!desc_rx) {
|
||||
@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
|
||||
cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
|
||||
buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
|
||||
swdata = cppi5_hdesc_get_swdata(desc_rx);
|
||||
*((void **)swdata) = page_address(page);
|
||||
swdata->page = page;
|
||||
swdata->flow_id = flow_idx;
|
||||
|
||||
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
|
||||
desc_rx, desc_dma);
|
||||
@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
|
||||
|
||||
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
|
||||
struct page *page,
|
||||
bool allow_direct,
|
||||
int desc_idx)
|
||||
bool allow_direct)
|
||||
{
|
||||
page_pool_put_full_page(flow->page_pool, page, allow_direct);
|
||||
flow->pages[desc_idx] = NULL;
|
||||
}
|
||||
|
||||
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
|
||||
{
|
||||
struct am65_cpsw_rx_flow *flow = data;
|
||||
struct am65_cpsw_rx_chn *rx_chn = data;
|
||||
struct cppi5_host_desc_t *desc_rx;
|
||||
struct am65_cpsw_rx_chn *rx_chn;
|
||||
struct am65_cpsw_swdata *swdata;
|
||||
dma_addr_t buf_dma;
|
||||
struct page *page;
|
||||
u32 buf_dma_len;
|
||||
void *page_addr;
|
||||
void **swdata;
|
||||
int desc_idx;
|
||||
u32 flow_id;
|
||||
|
||||
rx_chn = &flow->common->rx_chns;
|
||||
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
|
||||
swdata = cppi5_hdesc_get_swdata(desc_rx);
|
||||
page_addr = *swdata;
|
||||
page = swdata->page;
|
||||
flow_id = swdata->flow_id;
|
||||
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
|
||||
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
|
||||
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
|
||||
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
|
||||
|
||||
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
|
||||
rx_chn->dsize_log2);
|
||||
am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
|
||||
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
|
||||
}
|
||||
|
||||
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
|
||||
@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
|
||||
ret = -ENOMEM;
|
||||
goto fail_rx;
|
||||
}
|
||||
flow->pages[i] = page;
|
||||
|
||||
ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
|
||||
if (ret < 0) {
|
||||
dev_err(common->dev,
|
||||
"cannot submit page to rx channel flow %d, error %d\n",
|
||||
flow_idx, ret);
|
||||
am65_cpsw_put_page(flow, page, false, i);
|
||||
am65_cpsw_put_page(flow, page, false);
|
||||
goto fail_rx;
|
||||
}
|
||||
}
|
||||
@ -764,8 +759,8 @@ fail_tx:
|
||||
|
||||
fail_rx:
|
||||
for (i = 0; i < common->rx_ch_num_flows; i++)
|
||||
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
|
||||
am65_cpsw_nuss_rx_cleanup, 0);
|
||||
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
|
||||
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||
|
||||
am65_cpsw_destroy_xdp_rxqs(common);
|
||||
|
||||
@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
|
||||
dev_err(common->dev, "rx teardown timeout\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < common->rx_ch_num_flows; i++) {
|
||||
for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
|
||||
napi_disable(&rx_chn->flows[i].napi_rx);
|
||||
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
|
||||
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
|
||||
am65_cpsw_nuss_rx_cleanup, 0);
|
||||
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
|
||||
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||
}
|
||||
|
||||
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
|
||||
@ -1028,7 +1023,7 @@ pool_free:
|
||||
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
|
||||
struct am65_cpsw_port *port,
|
||||
struct xdp_buff *xdp,
|
||||
int desc_idx, int cpu, int *len)
|
||||
int cpu, int *len)
|
||||
{
|
||||
struct am65_cpsw_common *common = flow->common;
|
||||
struct am65_cpsw_ndev_priv *ndev_priv;
|
||||
@ -1101,7 +1096,7 @@ drop:
|
||||
}
|
||||
|
||||
page = virt_to_head_page(xdp->data);
|
||||
am65_cpsw_put_page(flow, page, true, desc_idx);
|
||||
am65_cpsw_put_page(flow, page, true);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
|
||||
struct am65_cpsw_ndev_stats *stats;
|
||||
struct cppi5_host_desc_t *desc_rx;
|
||||
struct device *dev = common->dev;
|
||||
struct am65_cpsw_swdata *swdata;
|
||||
struct page *page, *new_page;
|
||||
dma_addr_t desc_dma, buf_dma;
|
||||
struct am65_cpsw_port *port;
|
||||
int headroom, desc_idx, ret;
|
||||
struct net_device *ndev;
|
||||
u32 flow_idx = flow->id;
|
||||
struct sk_buff *skb;
|
||||
struct xdp_buff xdp;
|
||||
int headroom, ret;
|
||||
void *page_addr;
|
||||
void **swdata;
|
||||
u32 *psdata;
|
||||
|
||||
*xdp_state = AM65_CPSW_XDP_PASS;
|
||||
@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
|
||||
__func__, flow_idx, &desc_dma);
|
||||
|
||||
swdata = cppi5_hdesc_get_swdata(desc_rx);
|
||||
page_addr = *swdata;
|
||||
page = virt_to_page(page_addr);
|
||||
page = swdata->page;
|
||||
page_addr = page_address(page);
|
||||
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
|
||||
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
|
||||
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
|
||||
@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
|
||||
|
||||
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
|
||||
|
||||
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
|
||||
rx_chn->dsize_log2);
|
||||
|
||||
skb = am65_cpsw_build_skb(page_addr, ndev,
|
||||
AM65_CPSW_MAX_PACKET_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
|
||||
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
|
||||
pkt_len, false);
|
||||
*xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
|
||||
*xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
|
||||
cpu, &pkt_len);
|
||||
if (*xdp_state != AM65_CPSW_XDP_PASS)
|
||||
goto allocate;
|
||||
@ -1247,10 +1239,8 @@ allocate:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
flow->pages[desc_idx] = new_page;
|
||||
|
||||
if (netif_dormant(ndev)) {
|
||||
am65_cpsw_put_page(flow, new_page, true, desc_idx);
|
||||
am65_cpsw_put_page(flow, new_page, true);
|
||||
ndev->stats.rx_dropped++;
|
||||
return 0;
|
||||
}
|
||||
@ -1258,7 +1248,7 @@ allocate:
|
||||
requeue:
|
||||
ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
|
||||
if (WARN_ON(ret < 0)) {
|
||||
am65_cpsw_put_page(flow, new_page, true, desc_idx);
|
||||
am65_cpsw_put_page(flow, new_page, true);
|
||||
ndev->stats.rx_errors++;
|
||||
ndev->stats.rx_dropped++;
|
||||
}
|
||||
@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
|
||||
for (i = 0; i < common->rx_ch_num_flows; i++) {
|
||||
flow = &rx_chn->flows[i];
|
||||
flow->page_pool = NULL;
|
||||
flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
|
||||
sizeof(*flow->pages), GFP_KERNEL);
|
||||
if (!flow->pages)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
|
||||
@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
|
||||
flow = &rx_chn->flows[i];
|
||||
flow->id = i;
|
||||
flow->common = common;
|
||||
flow->irq = -EINVAL;
|
||||
|
||||
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
|
||||
rx_flow_cfg.rx_cfg.size = max_desc_num;
|
||||
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
|
||||
/* share same FDQ for all flows */
|
||||
rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
|
||||
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
|
||||
|
||||
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
|
||||
@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
|
||||
if (ret) {
|
||||
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
|
||||
i, flow->irq, ret);
|
||||
flow->irq = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -3349,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||
|
||||
for (i = 0; i < common->rx_ch_num_flows; i++)
|
||||
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
|
||||
&rx_chan->flows[i],
|
||||
am65_cpsw_nuss_rx_cleanup, 0);
|
||||
rx_chan,
|
||||
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||
|
||||
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
|
||||
|
||||
|
@ -101,10 +101,14 @@ struct am65_cpsw_rx_flow {
|
||||
struct hrtimer rx_hrtimer;
|
||||
unsigned long rx_pace_timeout;
|
||||
struct page_pool *page_pool;
|
||||
struct page **pages;
|
||||
char name[32];
|
||||
};
|
||||
|
||||
struct am65_cpsw_swdata {
|
||||
u32 flow_id;
|
||||
struct page *page;
|
||||
};
|
||||
|
||||
struct am65_cpsw_rx_chn {
|
||||
struct device *dev;
|
||||
struct device *dma_dev;
|
||||
|
@ -222,7 +222,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
|
||||
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
|
||||
struct spi_transfer *xfer = &mses->spi_xfer;
|
||||
struct spi_message *msg = &mses->spi_msg;
|
||||
struct sk_buff *tskb;
|
||||
struct sk_buff *tskb = NULL;
|
||||
int ret;
|
||||
|
||||
netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
|
||||
@ -235,7 +235,6 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
|
||||
if (!tskb)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_kfree_skb(txp);
|
||||
txp = tskb;
|
||||
}
|
||||
|
||||
@ -257,6 +256,8 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
|
||||
mse->stats.xfer_err++;
|
||||
}
|
||||
|
||||
dev_kfree_skb(tskb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -924,13 +924,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
|
||||
skbuf_dma->sg_len = sg_len;
|
||||
dma_tx_desc->callback_param = lp;
|
||||
dma_tx_desc->callback_result = axienet_dma_tx_cb;
|
||||
dmaengine_submit(dma_tx_desc);
|
||||
dma_async_issue_pending(lp->tx_chan);
|
||||
txq = skb_get_tx_queue(lp->ndev, skb);
|
||||
netdev_tx_sent_queue(txq, skb->len);
|
||||
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
|
||||
MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
|
||||
|
||||
dmaengine_submit(dma_tx_desc);
|
||||
dma_async_issue_pending(lp->tx_chan);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
xmit_error_unmap_sg:
|
||||
|
@ -147,6 +147,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
|
||||
/* IRQ related */ \
|
||||
.config_intr = dp83848_config_intr, \
|
||||
.handle_interrupt = dp83848_handle_interrupt, \
|
||||
\
|
||||
.flags = PHY_RST_AFTER_CLK_EN, \
|
||||
}
|
||||
|
||||
static struct phy_driver dp83848_driver[] = {
|
||||
|
@ -368,15 +368,16 @@ struct receive_queue {
|
||||
* because table sizes may be differ according to the device configuration.
|
||||
*/
|
||||
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
|
||||
#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
|
||||
struct virtio_net_ctrl_rss {
|
||||
u32 hash_types;
|
||||
u16 indirection_table_mask;
|
||||
u16 unclassified_queue;
|
||||
u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
|
||||
u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
|
||||
u16 max_tx_vq;
|
||||
u8 hash_key_length;
|
||||
u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
|
||||
|
||||
u16 *indirection_table;
|
||||
};
|
||||
|
||||
/* Control VQ buffers: protected by the rtnl lock */
|
||||
@ -512,6 +513,25 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
|
||||
struct page *page, void *buf,
|
||||
int len, int truesize);
|
||||
|
||||
static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
|
||||
{
|
||||
if (!indir_table_size) {
|
||||
rss->indirection_table = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
|
||||
if (!rss->indirection_table)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
|
||||
{
|
||||
kfree(rss->indirection_table);
|
||||
}
|
||||
|
||||
static bool is_xdp_frame(void *ptr)
|
||||
{
|
||||
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
|
||||
@ -3374,15 +3394,59 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
|
||||
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
|
||||
}
|
||||
|
||||
static bool virtnet_commit_rss_command(struct virtnet_info *vi);
|
||||
|
||||
static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
|
||||
{
|
||||
u32 indir_val = 0;
|
||||
int i = 0;
|
||||
|
||||
for (; i < vi->rss_indir_table_size; ++i) {
|
||||
indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
|
||||
vi->rss.indirection_table[i] = indir_val;
|
||||
}
|
||||
vi->rss.max_tx_vq = queue_pairs;
|
||||
}
|
||||
|
||||
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
|
||||
{
|
||||
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
|
||||
struct scatterlist sg;
|
||||
struct virtio_net_ctrl_rss old_rss;
|
||||
struct net_device *dev = vi->dev;
|
||||
struct scatterlist sg;
|
||||
|
||||
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
|
||||
return 0;
|
||||
|
||||
/* Firstly check if we need update rss. Do updating if both (1) rss enabled and
|
||||
* (2) no user configuration.
|
||||
*
|
||||
* During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
|
||||
* the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs
|
||||
* update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
|
||||
*/
|
||||
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
|
||||
memcpy(&old_rss, &vi->rss, sizeof(old_rss));
|
||||
if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
|
||||
vi->rss.indirection_table = old_rss.indirection_table;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
virtnet_rss_update_by_qpairs(vi, queue_pairs);
|
||||
|
||||
if (!virtnet_commit_rss_command(vi)) {
|
||||
/* restore ctrl_rss if commit_rss_command failed */
|
||||
rss_indirection_table_free(&vi->rss);
|
||||
memcpy(&vi->rss, &old_rss, sizeof(old_rss));
|
||||
|
||||
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
|
||||
queue_pairs);
|
||||
return -EINVAL;
|
||||
}
|
||||
rss_indirection_table_free(&old_rss);
|
||||
goto succ;
|
||||
}
|
||||
|
||||
mq = kzalloc(sizeof(*mq), GFP_KERNEL);
|
||||
if (!mq)
|
||||
return -ENOMEM;
|
||||
@ -3395,12 +3459,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
|
||||
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
|
||||
queue_pairs);
|
||||
return -EINVAL;
|
||||
} else {
|
||||
vi->curr_queue_pairs = queue_pairs;
|
||||
/* virtnet_open() will refill when device is going to up. */
|
||||
if (dev->flags & IFF_UP)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
succ:
|
||||
vi->curr_queue_pairs = queue_pairs;
|
||||
/* virtnet_open() will refill when device is going to up. */
|
||||
if (dev->flags & IFF_UP)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3828,11 +3892,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
|
||||
/* prepare sgs */
|
||||
sg_init_table(sgs, 4);
|
||||
|
||||
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
|
||||
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
|
||||
sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
|
||||
|
||||
sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
|
||||
sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
|
||||
if (vi->has_rss) {
|
||||
sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
|
||||
sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
|
||||
} else {
|
||||
sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
|
||||
}
|
||||
|
||||
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
|
||||
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
|
||||
@ -3856,21 +3924,14 @@ err:
|
||||
|
||||
static void virtnet_init_default_rss(struct virtnet_info *vi)
|
||||
{
|
||||
u32 indir_val = 0;
|
||||
int i = 0;
|
||||
|
||||
vi->rss.hash_types = vi->rss_hash_types_supported;
|
||||
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
|
||||
vi->rss.indirection_table_mask = vi->rss_indir_table_size
|
||||
? vi->rss_indir_table_size - 1 : 0;
|
||||
vi->rss.unclassified_queue = 0;
|
||||
|
||||
for (; i < vi->rss_indir_table_size; ++i) {
|
||||
indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
|
||||
vi->rss.indirection_table[i] = indir_val;
|
||||
}
|
||||
virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
|
||||
|
||||
vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
|
||||
vi->rss.hash_key_length = vi->rss_key_size;
|
||||
|
||||
netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
|
||||
@ -6420,10 +6481,19 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
virtio_cread16(vdev, offsetof(struct virtio_net_config,
|
||||
rss_max_indirection_table_length));
|
||||
}
|
||||
err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
if (vi->has_rss || vi->has_rss_hash_report) {
|
||||
vi->rss_key_size =
|
||||
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
|
||||
if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
|
||||
dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
|
||||
vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
|
||||
vi->rss_hash_types_supported =
|
||||
virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
|
||||
@ -6551,6 +6621,15 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
if (vi->has_rss || vi->has_rss_hash_report) {
|
||||
if (!virtnet_commit_rss_command(vi)) {
|
||||
dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
|
||||
dev->hw_features &= ~NETIF_F_RXHASH;
|
||||
vi->has_rss_hash_report = false;
|
||||
vi->has_rss = false;
|
||||
}
|
||||
}
|
||||
|
||||
virtnet_set_queues(vi, vi->curr_queue_pairs);
|
||||
|
||||
/* a random MAC address has been assigned, notify the device.
|
||||
@ -6674,6 +6753,8 @@ static void virtnet_remove(struct virtio_device *vdev)
|
||||
|
||||
remove_vq_common(vi);
|
||||
|
||||
rss_indirection_table_free(&vi->rss);
|
||||
|
||||
free_netdev(vi->dev);
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
|
||||
return 0;
|
||||
|
||||
err_unmap_skbs:
|
||||
while (--i > 0)
|
||||
while (i--)
|
||||
t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
|
||||
|
||||
return ret;
|
||||
|
@ -1103,6 +1103,7 @@ struct nft_rule_blob {
|
||||
* @name: name of the chain
|
||||
* @udlen: user data length
|
||||
* @udata: user data in the chain
|
||||
* @rcu_head: rcu head for deferred release
|
||||
* @blob_next: rule blob pointer to the next in the chain
|
||||
*/
|
||||
struct nft_chain {
|
||||
@ -1120,6 +1121,7 @@ struct nft_chain {
|
||||
char *name;
|
||||
u16 udlen;
|
||||
u8 *udata;
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
/* Only used during control plane commit phase: */
|
||||
struct nft_rule_blob *blob_next;
|
||||
@ -1263,6 +1265,7 @@ static inline void nft_use_inc_restore(u32 *use)
|
||||
* @sets: sets in the table
|
||||
* @objects: stateful objects in the table
|
||||
* @flowtables: flow tables in the table
|
||||
* @net: netnamespace this table belongs to
|
||||
* @hgenerator: handle generator state
|
||||
* @handle: table handle
|
||||
* @use: number of chain references to this table
|
||||
@ -1282,6 +1285,7 @@ struct nft_table {
|
||||
struct list_head sets;
|
||||
struct list_head objects;
|
||||
struct list_head flowtables;
|
||||
possible_net_t net;
|
||||
u64 hgenerator;
|
||||
u64 handle;
|
||||
u32 use;
|
||||
|
@ -287,6 +287,7 @@
|
||||
EM(rxrpc_call_see_input, "SEE input ") \
|
||||
EM(rxrpc_call_see_release, "SEE release ") \
|
||||
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
|
||||
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
|
||||
E_(rxrpc_call_see_zap, "SEE zap ")
|
||||
|
||||
#define rxrpc_txqueue_traces \
|
||||
|
@ -112,7 +112,6 @@ const struct genl_ops mptcp_pm_nl_ops[11] = {
|
||||
.dumpit = mptcp_pm_nl_get_addr_dumpit,
|
||||
.policy = mptcp_pm_get_addr_nl_policy,
|
||||
.maxattr = MPTCP_PM_ATTR_TOKEN,
|
||||
.flags = GENL_UNS_ADMIN_PERM,
|
||||
},
|
||||
{
|
||||
.cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
|
||||
|
@ -91,6 +91,7 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
|
||||
struct mptcp_pm_addr_entry *addr)
|
||||
{
|
||||
struct mptcp_pm_addr_entry *entry, *tmp;
|
||||
struct sock *sk = (struct sock *)msk;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
|
||||
if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
|
||||
@ -98,7 +99,7 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
|
||||
* be used multiple times (e.g. fullmesh mode).
|
||||
*/
|
||||
list_del_rcu(&entry->list);
|
||||
kfree(entry);
|
||||
sock_kfree_s(sk, entry, sizeof(*entry));
|
||||
msk->pm.local_addr_used--;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1495,6 +1495,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
INIT_LIST_HEAD(&table->sets);
|
||||
INIT_LIST_HEAD(&table->objects);
|
||||
INIT_LIST_HEAD(&table->flowtables);
|
||||
write_pnet(&table->net, net);
|
||||
table->family = family;
|
||||
table->flags = flags;
|
||||
table->handle = ++nft_net->table_handle;
|
||||
@ -11430,22 +11431,48 @@ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_data_dump);
|
||||
|
||||
int __nft_release_basechain(struct nft_ctx *ctx)
|
||||
static void __nft_release_basechain_now(struct nft_ctx *ctx)
|
||||
{
|
||||
struct nft_rule *rule, *nr;
|
||||
|
||||
if (WARN_ON(!nft_is_base_chain(ctx->chain)))
|
||||
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
|
||||
list_del(&rule->list);
|
||||
nf_tables_rule_release(ctx, rule);
|
||||
}
|
||||
nf_tables_chain_destroy(ctx->chain);
|
||||
}
|
||||
|
||||
static void nft_release_basechain_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
|
||||
struct nft_ctx ctx = {
|
||||
.family = chain->table->family,
|
||||
.chain = chain,
|
||||
.net = read_pnet(&chain->table->net),
|
||||
};
|
||||
|
||||
__nft_release_basechain_now(&ctx);
|
||||
put_net(ctx.net);
|
||||
}
|
||||
|
||||
int __nft_release_basechain(struct nft_ctx *ctx)
|
||||
{
|
||||
struct nft_rule *rule;
|
||||
|
||||
if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain)))
|
||||
return 0;
|
||||
|
||||
nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
|
||||
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
|
||||
list_del(&rule->list);
|
||||
list_for_each_entry(rule, &ctx->chain->rules, list)
|
||||
nft_use_dec(&ctx->chain->use);
|
||||
nf_tables_rule_release(ctx, rule);
|
||||
}
|
||||
|
||||
nft_chain_del(ctx->chain);
|
||||
nft_use_dec(&ctx->table->use);
|
||||
nf_tables_chain_destroy(ctx->chain);
|
||||
|
||||
if (maybe_get_net(ctx->net))
|
||||
call_rcu(&ctx->chain->rcu_head, nft_release_basechain_rcu);
|
||||
else
|
||||
__nft_release_basechain_now(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -516,6 +516,7 @@ void rxrpc_connect_client_calls(struct rxrpc_local *local)
|
||||
|
||||
spin_lock(&local->client_call_lock);
|
||||
list_move_tail(&call->wait_link, &bundle->waiting_calls);
|
||||
rxrpc_see_call(call, rxrpc_call_see_waiting_call);
|
||||
spin_unlock(&local->client_call_lock);
|
||||
|
||||
if (rxrpc_bundle_has_space(bundle))
|
||||
@ -586,7 +587,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
|
||||
_debug("call is waiting");
|
||||
ASSERTCMP(call->call_id, ==, 0);
|
||||
ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
|
||||
/* May still be on ->new_client_calls. */
|
||||
spin_lock(&local->client_call_lock);
|
||||
list_del_init(&call->wait_link);
|
||||
spin_unlock(&local->client_call_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3751,7 +3751,7 @@ enum sctp_disposition sctp_sf_ootb(struct net *net,
|
||||
}
|
||||
|
||||
ch = (struct sctp_chunkhdr *)ch_end;
|
||||
} while (ch_end < skb_tail_pointer(skb));
|
||||
} while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
|
||||
|
||||
if (ootb_shut_ack)
|
||||
return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
|
||||
|
@ -3359,8 +3359,10 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
|
||||
else
|
||||
rc = smc_create_clcsk(net, sk, family);
|
||||
|
||||
if (rc)
|
||||
if (rc) {
|
||||
sk_common_release(sk);
|
||||
sock->sk = NULL;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -899,9 +899,7 @@ static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
|
||||
struct ib_device *ibdev = smcibdev->ibdev;
|
||||
struct net_device *ndev;
|
||||
|
||||
if (!ibdev->ops.get_netdev)
|
||||
return;
|
||||
ndev = ibdev->ops.get_netdev(ibdev, port + 1);
|
||||
ndev = ib_device_get_netdev(ibdev, port + 1);
|
||||
if (ndev) {
|
||||
smcibdev->ndev_ifidx[port] = ndev->ifindex;
|
||||
dev_put(ndev);
|
||||
@ -921,9 +919,7 @@ void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
|
||||
port_cnt = smcibdev->ibdev->phys_port_cnt;
|
||||
for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
|
||||
libdev = smcibdev->ibdev;
|
||||
if (!libdev->ops.get_netdev)
|
||||
continue;
|
||||
lndev = libdev->ops.get_netdev(libdev, i + 1);
|
||||
lndev = ib_device_get_netdev(libdev, i + 1);
|
||||
dev_put(lndev);
|
||||
if (lndev != ndev)
|
||||
continue;
|
||||
|
@ -1054,9 +1054,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
|
||||
for (i = 1; i <= SMC_MAX_PORTS; i++) {
|
||||
if (!rdma_is_port_valid(ibdev->ibdev, i))
|
||||
continue;
|
||||
if (!ibdev->ibdev->ops.get_netdev)
|
||||
continue;
|
||||
ndev = ibdev->ibdev->ops.get_netdev(ibdev->ibdev, i);
|
||||
ndev = ib_device_get_netdev(ibdev->ibdev, i);
|
||||
if (!ndev)
|
||||
continue;
|
||||
dev_put(ndev);
|
||||
|
Loading…
Reference in New Issue
Block a user