net: hns3: default enable tx bounce buffer when smmu enabled
The SMMU engine on HIP09 chip has a hardware issue.
SMMU pagetable prefetch features may prefetch and use a invalid PTE
even the PTE is valid at that time. This will cause the device trigger
fake pagefaults. The solution is to avoid prefetching by adding a
SYNC command when smmu mapping a iova. But the performance of nic has a
sharp drop. Then we do this workaround, always enable tx bounce buffer,
avoid mapping/unmapping on TX path.
This issue only affects HNS3, so we always enable
tx bounce buffer when smmu enabled to improve performance.
Fixes: 295ba232a8
("net: hns3: add device version to replace pci revision")
Signed-off-by: Peiyang Wang <wangpeiyang1@huawei.com>
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Jijie Shao <shaojijie@huawei.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
637f414763
commit
e6ab19443b
@ -11,6 +11,7 @@
|
|||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/ip.h>
|
#include <linux/ip.h>
|
||||||
#include <linux/ipv6.h>
|
#include <linux/ipv6.h>
|
||||||
|
#include <linux/iommu.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
@ -1032,6 +1033,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
|
|||||||
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||||
{
|
{
|
||||||
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||||
|
struct net_device *netdev = ring_to_netdev(ring);
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hns3_tx_spare *tx_spare;
|
struct hns3_tx_spare *tx_spare;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
@ -1073,6 +1076,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
|||||||
tx_spare->buf = page_address(page);
|
tx_spare->buf = page_address(page);
|
||||||
tx_spare->len = PAGE_SIZE << order;
|
tx_spare->len = PAGE_SIZE << order;
|
||||||
ring->tx_spare = tx_spare;
|
ring->tx_spare = tx_spare;
|
||||||
|
ring->tx_copybreak = priv->tx_copybreak;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_mapping_error:
|
dma_mapping_error:
|
||||||
@ -4868,6 +4872,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
|||||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
|
||||||
|
{
|
||||||
|
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
|
||||||
|
#define HNS3_MAX_PACKET_SIZE (64 * 1024)
|
||||||
|
|
||||||
|
struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
|
||||||
|
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
|
||||||
|
struct hnae3_handle *handle = priv->ae_handle;
|
||||||
|
|
||||||
|
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!(domain && iommu_is_dma_domain(domain)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
|
||||||
|
priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
|
||||||
|
|
||||||
|
if (priv->tx_copybreak < priv->min_tx_copybreak)
|
||||||
|
priv->tx_copybreak = priv->min_tx_copybreak;
|
||||||
|
if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
|
||||||
|
handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
|
||||||
|
}
|
||||||
|
|
||||||
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
||||||
unsigned int ring_type)
|
unsigned int ring_type)
|
||||||
{
|
{
|
||||||
@ -5101,6 +5129,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
|||||||
int i, j;
|
int i, j;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
hns3_update_tx_spare_buf_config(priv);
|
||||||
for (i = 0; i < ring_num; i++) {
|
for (i = 0; i < ring_num; i++) {
|
||||||
ret = hns3_alloc_ring_memory(&priv->ring[i]);
|
ret = hns3_alloc_ring_memory(&priv->ring[i]);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -5305,6 +5334,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||||||
priv->ae_handle = handle;
|
priv->ae_handle = handle;
|
||||||
priv->tx_timeout_count = 0;
|
priv->tx_timeout_count = 0;
|
||||||
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
|
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
|
||||||
|
priv->min_tx_copybreak = 0;
|
||||||
|
priv->min_tx_spare_buf_size = 0;
|
||||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
|
|
||||||
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
||||||
|
@ -596,6 +596,8 @@ struct hns3_nic_priv {
|
|||||||
struct hns3_enet_coalesce rx_coal;
|
struct hns3_enet_coalesce rx_coal;
|
||||||
u32 tx_copybreak;
|
u32 tx_copybreak;
|
||||||
u32 rx_copybreak;
|
u32 rx_copybreak;
|
||||||
|
u32 min_tx_copybreak;
|
||||||
|
u32 min_tx_spare_buf_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
union l3_hdr_info {
|
union l3_hdr_info {
|
||||||
|
@ -1933,6 +1933,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
|
||||||
|
if (copybreak < priv->min_tx_copybreak) {
|
||||||
|
netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
|
||||||
|
copybreak, priv->min_tx_copybreak);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
|
||||||
|
if (buf_size < priv->min_tx_spare_buf_size) {
|
||||||
|
netdev_err(netdev,
|
||||||
|
"tx spare buf size %u should be no less than %u!\n",
|
||||||
|
buf_size, priv->min_tx_spare_buf_size);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_set_tunable(struct net_device *netdev,
|
static int hns3_set_tunable(struct net_device *netdev,
|
||||||
const struct ethtool_tunable *tuna,
|
const struct ethtool_tunable *tuna,
|
||||||
const void *data)
|
const void *data)
|
||||||
@ -1949,6 +1974,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
|||||||
|
|
||||||
switch (tuna->id) {
|
switch (tuna->id) {
|
||||||
case ETHTOOL_TX_COPYBREAK:
|
case ETHTOOL_TX_COPYBREAK:
|
||||||
|
ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
priv->tx_copybreak = *(u32 *)data;
|
priv->tx_copybreak = *(u32 *)data;
|
||||||
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||||
@ -1963,6 +1992,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
|||||||
|
|
||||||
break;
|
break;
|
||||||
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
|
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
|
||||||
|
ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
||||||
new_tx_spare_buf_size = *(u32 *)data;
|
new_tx_spare_buf_size = *(u32 *)data;
|
||||||
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
|
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
|
||||||
|
Loading…
Reference in New Issue
Block a user