1

Merge branch 'am65-cpsw-rx-mq'

Roger Quadros says:

====================
net: ethernet: ti: am65-cpsw: Add multi queue RX support

am65-cpsw can support up to 8 queues at Rx. So far we have
been using only one queue (i.e. default flow) for all RX traffic.

This series adds multi-queue support. The driver starts with
1 RX queue by default. User can increase the RX queues via ethtool,
e.g. 'ethtool -L ethx rx <N>'

The series also adds regmap and regfield support to some of the
ALE registers. It adds Policer/Classifier registers and fields.

Converting the existing ALE control APIs to regfields can be a separate
exercise.

Some helper functions are added to read/write to the Policer/Classifier
registers and a default Classifier setup function is added that
routes packets based on their PCP/DSCP priority to different RX queues.

Signed-off-by: Roger Quadros <rogerq@kernel.org>
---
Changes in v4:
- Use single macro AM65_CPSW_MAX_QUEUES for both TX and RX queues
  to simplify code
- reuse am65_cpsw_get/set_per_queue_coalesce for am65_cpsw_get/set_coalesce.
- return -EINVAL if unsupported tx/rx_coalesce_usecs in
  am65_cpsw_set_coalesce.
- reverse Xmas tree declaration order fixes in cpsw_ale
- Link to v3: https://lore.kernel.org/r/20240703-am65-cpsw-multi-rx-v3-0-f11cd860fd72@kernel.org

Changes in v3:
- code style fixes
- squashed patches 5 and 6
- added comment about priority to thread mapping table.
- Added Reviewed-by Simon Horman.
- Link to v2: https://lore.kernel.org/r/20240628-am65-cpsw-multi-rx-v2-0-c399cb77db56@kernel.org

Changes in v2:
- rebase to net/next
- fixed RX stall issue during iperf
- Link to v1: https://lore.kernel.org/r/20240606-am65-cpsw-multi-rx-v1-0-0704b0cb6fdc@kernel.org
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-09-13 10:49:01 +01:00
commit bdf2ba157e
5 changed files with 610 additions and 273 deletions

View File

@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
{ {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
ch->max_rx = AM65_CPSW_MAX_RX_QUEUES; ch->max_rx = AM65_CPSW_MAX_QUEUES;
ch->max_tx = AM65_CPSW_MAX_TX_QUEUES; ch->max_tx = AM65_CPSW_MAX_QUEUES;
ch->rx_count = AM65_CPSW_MAX_RX_QUEUES; ch->rx_count = common->rx_ch_num_flows;
ch->tx_count = common->tx_ch_num; ch->tx_count = common->tx_ch_num;
} }
@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
if (common->usage_count) if (common->usage_count)
return -EBUSY; return -EBUSY;
am65_cpsw_nuss_remove_tx_chns(common); return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
chs->rx_count);
return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
} }
static void static void
@ -913,34 +912,54 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD); s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
} }
static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;
tx_chn = &common->tx_chns[0];
coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
return 0;
}
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue, static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn; struct am65_cpsw_tx_chn *tx_chn;
if (queue >= AM65_CPSW_MAX_TX_QUEUES) if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL; return -EINVAL;
tx_chn = &common->tx_chns[queue]; tx_chn = &common->tx_chns[queue];
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000; coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
rx_flow = &common->rx_chns.flows[queue];
coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
return 0;
}
static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
return -EINVAL;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
rx_flow = &common->rx_chns.flows[queue];
if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
return -EINVAL;
rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
return 0; return 0;
} }
@ -948,43 +967,7 @@ static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coales
struct kernel_ethtool_coalesce *kernel_coal, struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev); return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
struct am65_cpsw_tx_chn *tx_chn;
tx_chn = &common->tx_chns[0];
if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
return -EINVAL;
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
return -EINVAL;
common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
return 0;
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_tx_chn *tx_chn;
if (queue >= AM65_CPSW_MAX_TX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
queue);
coal->tx_coalesce_usecs = 20;
}
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
return 0;
} }
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = { const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {

View File

@ -138,7 +138,7 @@
AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
/* Number of TX/RX descriptors */ /* Number of TX/RX descriptors per channel/flow */
#define AM65_CPSW_MAX_TX_DESC 500 #define AM65_CPSW_MAX_TX_DESC 500
#define AM65_CPSW_MAX_RX_DESC 500 #define AM65_CPSW_MAX_RX_DESC 500
@ -150,6 +150,7 @@
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define AM65_CPSW_DEFAULT_TX_CHNS 8 #define AM65_CPSW_DEFAULT_TX_CHNS 8
#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
/* CPPI streaming packet interface */ /* CPPI streaming packet interface */
#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
@ -331,7 +332,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
} }
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
struct page *page) struct page *page, u32 flow_idx)
{ {
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx; struct cppi5_host_desc_t *desc_rx;
@ -364,7 +365,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx); swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = page_address(page); *((void **)swdata) = page_address(page);
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
desc_rx, desc_dma);
} }
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
@ -399,22 +401,27 @@ static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
{ {
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq; struct xdp_rxq_info *rxq;
int i; int id, port;
for (i = 0; i < common->port_num; i++) { for (id = 0; id < common->rx_ch_num_flows; id++) {
if (!common->ports[i].ndev) flow = &rx_chn->flows[id];
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
continue; continue;
rxq = &common->ports[i].xdp_rxq; rxq = &common->ports[port].xdp_rxq[id];
if (xdp_rxq_info_is_reg(rxq)) if (xdp_rxq_info_is_reg(rxq))
xdp_rxq_info_unreg(rxq); xdp_rxq_info_unreg(rxq);
} }
if (rx_chn->page_pool) { if (flow->page_pool) {
page_pool_destroy(rx_chn->page_pool); page_pool_destroy(flow->page_pool);
rx_chn->page_pool = NULL; flow->page_pool = NULL;
}
} }
} }
@ -428,32 +435,45 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
.nid = dev_to_node(common->dev), .nid = dev_to_node(common->dev),
.dev = common->dev, .dev = common->dev,
.dma_dir = DMA_BIDIRECTIONAL, .dma_dir = DMA_BIDIRECTIONAL,
.napi = &common->napi_rx, /* .napi set dynamically */
}; };
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq; struct xdp_rxq_info *rxq;
struct page_pool *pool; struct page_pool *pool;
int i, ret; int id, port, ret;
for (id = 0; id < common->rx_ch_num_flows; id++) {
flow = &rx_chn->flows[id];
pp_params.napi = &flow->napi_rx;
pool = page_pool_create(&pp_params); pool = page_pool_create(&pp_params);
if (IS_ERR(pool)) if (IS_ERR(pool)) {
return PTR_ERR(pool); ret = PTR_ERR(pool);
goto err;
}
rx_chn->page_pool = pool; flow->page_pool = pool;
for (i = 0; i < common->port_num; i++) { /* using same page pool is allowed as no running rx handlers
if (!common->ports[i].ndev) * simultaneously for both ndevs
*/
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
continue; continue;
rxq = &common->ports[i].xdp_rxq; rxq = &common->ports[port].xdp_rxq[id];
ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0); ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
id, flow->napi_rx.napi_id);
if (ret) if (ret)
goto err; goto err;
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); ret = xdp_rxq_info_reg_mem_model(rxq,
MEM_TYPE_PAGE_POOL,
pool);
if (ret) if (ret)
goto err; goto err;
} }
}
return 0; return 0;
@ -497,25 +517,27 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
desc_idx); desc_idx);
} }
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn, static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page, struct page *page,
bool allow_direct, bool allow_direct,
int desc_idx) int desc_idx)
{ {
page_pool_put_full_page(rx_chn->page_pool, page, allow_direct); page_pool_put_full_page(flow->page_pool, page, allow_direct);
rx_chn->pages[desc_idx] = NULL; flow->pages[desc_idx] = NULL;
} }
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{ {
struct am65_cpsw_rx_chn *rx_chn = data; struct am65_cpsw_rx_flow *flow = data;
struct cppi5_host_desc_t *desc_rx; struct cppi5_host_desc_t *desc_rx;
struct am65_cpsw_rx_chn *rx_chn;
dma_addr_t buf_dma; dma_addr_t buf_dma;
u32 buf_dma_len; u32 buf_dma_len;
void *page_addr; void *page_addr;
void **swdata; void **swdata;
int desc_idx; int desc_idx;
rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx); swdata = cppi5_hdesc_get_swdata(desc_rx);
page_addr = *swdata; page_addr = *swdata;
@ -526,7 +548,7 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
rx_chn->dsize_log2); rx_chn->dsize_log2);
am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx); am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
} }
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@ -602,7 +624,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
struct am65_cpsw_host *host_p = am65_common_get_host(common); struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int port_idx, i, ret, tx; int port_idx, i, ret, tx, flow_idx;
struct am65_cpsw_rx_flow *flow;
u32 val, port_mask; u32 val, port_mask;
struct page *page; struct page *page;
@ -670,27 +693,26 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
return ret; return ret;
} }
for (i = 0; i < rx_chn->descs_num; i++) { for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
page = page_pool_dev_alloc_pages(rx_chn->page_pool); flow = &rx_chn->flows[flow_idx];
for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
page = page_pool_dev_alloc_pages(flow->page_pool);
if (!page) { if (!page) {
dev_err(common->dev, "cannot allocate page in flow %d\n",
flow_idx);
ret = -ENOMEM; ret = -ENOMEM;
if (i)
goto fail_rx; goto fail_rx;
return ret;
} }
rx_chn->pages[i] = page; flow->pages[i] = page;
ret = am65_cpsw_nuss_rx_push(common, page); ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
if (ret < 0) { if (ret < 0) {
dev_err(common->dev, dev_err(common->dev,
"cannot submit page to channel rx: %d\n", "cannot submit page to rx channel flow %d, error %d\n",
ret); flow_idx, ret);
am65_cpsw_put_page(rx_chn, page, false, i); am65_cpsw_put_page(flow, page, false, i);
if (i)
goto fail_rx; goto fail_rx;
}
return ret;
} }
} }
@ -700,6 +722,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
goto fail_rx; goto fail_rx;
} }
for (i = 0; i < common->rx_ch_num_flows ; i++) {
napi_enable(&rx_chn->flows[i].napi_rx);
if (rx_chn->flows[i].irq_disabled) {
rx_chn->flows[i].irq_disabled = false;
enable_irq(rx_chn->flows[i].irq);
}
}
for (tx = 0; tx < common->tx_ch_num; tx++) { for (tx = 0; tx < common->tx_ch_num; tx++) {
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) { if (ret) {
@ -711,12 +741,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
napi_enable(&tx_chn[tx].napi_tx); napi_enable(&tx_chn[tx].napi_tx);
} }
napi_enable(&common->napi_rx);
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
enable_irq(rx_chn->irq);
}
dev_dbg(common->dev, "cpsw_nuss started\n"); dev_dbg(common->dev, "cpsw_nuss started\n");
return 0; return 0;
@ -727,11 +751,24 @@ fail_tx:
tx--; tx--;
} }
for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
flow = &rx_chn->flows[flow_idx];
if (!flow->irq_disabled) {
disable_irq(flow->irq);
flow->irq_disabled = true;
}
napi_disable(&flow->napi_rx);
}
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx: fail_rx:
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn, for (i = 0; i < common->rx_ch_num_flows; i--)
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
am65_cpsw_nuss_rx_cleanup, 0); am65_cpsw_nuss_rx_cleanup, 0);
am65_cpsw_destroy_xdp_rxqs(common);
return ret; return ret;
} }
@ -780,12 +817,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
dev_err(common->dev, "rx teardown timeout\n"); dev_err(common->dev, "rx teardown timeout\n");
} }
napi_disable(&common->napi_rx); for (i = 0; i < common->rx_ch_num_flows; i++) {
hrtimer_cancel(&common->rx_hrtimer); napi_disable(&rx_chn->flows[i].napi_rx);
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, am65_cpsw_nuss_rx_cleanup, 0);
am65_cpsw_nuss_rx_cleanup, !!i); }
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@ -794,10 +831,6 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
for (i = 0; i < rx_chn->descs_num; i++) {
if (rx_chn->pages[i])
am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
}
am65_cpsw_destroy_xdp_rxqs(common); am65_cpsw_destroy_xdp_rxqs(common);
dev_dbg(common->dev, "cpsw_nuss stopped\n"); dev_dbg(common->dev, "cpsw_nuss stopped\n");
@ -868,7 +901,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
goto runtime_put; goto runtime_put;
} }
ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES); ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
if (ret) { if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n"); dev_err(common->dev, "cannot set real number of rx queues\n");
goto runtime_put; goto runtime_put;
@ -992,12 +1025,12 @@ pool_free:
return ret; return ret;
} }
static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port, struct am65_cpsw_port *port,
struct xdp_buff *xdp, struct xdp_buff *xdp,
int desc_idx, int cpu, int *len) int desc_idx, int cpu, int *len)
{ {
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv; struct am65_cpsw_ndev_priv *ndev_priv;
struct net_device *ndev = port->ndev; struct net_device *ndev = port->ndev;
struct am65_cpsw_ndev_stats *stats; struct am65_cpsw_ndev_stats *stats;
@ -1026,7 +1059,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
ret = AM65_CPSW_XDP_PASS; ret = AM65_CPSW_XDP_PASS;
goto out; goto out;
case XDP_TX: case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp); xdpf = xdp_convert_buff_to_frame(xdp);
@ -1068,7 +1101,8 @@ drop:
} }
page = virt_to_head_page(xdp->data); page = virt_to_head_page(xdp->data);
am65_cpsw_put_page(rx_chn, page, true, desc_idx); am65_cpsw_put_page(flow, page, true, desc_idx);
out: out:
return ret; return ret;
} }
@ -1106,11 +1140,12 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
} }
} }
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
u32 flow_idx, int cpu, int *xdp_state) int cpu, int *xdp_state)
{ {
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info; u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv; struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats; struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx; struct cppi5_host_desc_t *desc_rx;
@ -1120,6 +1155,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_port *port; struct am65_cpsw_port *port;
int headroom, desc_idx, ret; int headroom, desc_idx, ret;
struct net_device *ndev; struct net_device *ndev;
u32 flow_idx = flow->id;
struct sk_buff *skb; struct sk_buff *skb;
struct xdp_buff xdp; struct xdp_buff xdp;
void *page_addr; void *page_addr;
@ -1174,10 +1210,10 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
} }
if (port->xdp_prog) { if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq); xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false); pkt_len, false);
*xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
cpu, &pkt_len); cpu, &pkt_len);
if (*xdp_state != AM65_CPSW_XDP_PASS) if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate; goto allocate;
@ -1195,7 +1231,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb_mark_for_recycle(skb); skb_mark_for_recycle(skb);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info); am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&common->napi_rx, skb); napi_gro_receive(&flow->napi_rx, skb);
stats = this_cpu_ptr(ndev_priv->stats); stats = this_cpu_ptr(ndev_priv->stats);
@ -1205,24 +1241,24 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
allocate: allocate:
new_page = page_pool_dev_alloc_pages(rx_chn->page_pool); new_page = page_pool_dev_alloc_pages(flow->page_pool);
if (unlikely(!new_page)) { if (unlikely(!new_page)) {
dev_err(dev, "page alloc failed\n"); dev_err(dev, "page alloc failed\n");
return -ENOMEM; return -ENOMEM;
} }
rx_chn->pages[desc_idx] = new_page; flow->pages[desc_idx] = new_page;
if (netif_dormant(ndev)) { if (netif_dormant(ndev)) {
am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
return 0; return 0;
} }
requeue: requeue:
ret = am65_cpsw_nuss_rx_push(common, new_page); ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) { if (WARN_ON(ret < 0)) {
am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
} }
@ -1232,54 +1268,48 @@ requeue:
static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
{ {
struct am65_cpsw_common *common = struct am65_cpsw_rx_flow *flow = container_of(timer,
container_of(timer, struct am65_cpsw_common, rx_hrtimer); struct am65_cpsw_rx_flow,
rx_hrtimer);
enable_irq(common->rx_chns.irq); enable_irq(flow->irq);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{ {
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
int flow = AM65_CPSW_MAX_RX_FLOWS; struct am65_cpsw_common *common = flow->common;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int xdp_state_or = 0; int xdp_state_or = 0;
int cur_budget, ret; int cur_budget, ret;
int xdp_state; int xdp_state;
int num_rx = 0; int num_rx = 0;
/* process every flow */ /* process only this flow */
while (flow--) { cur_budget = budget;
cur_budget = budget - num_rx;
while (cur_budget--) { while (cur_budget--) {
ret = am65_cpsw_nuss_rx_packets(common, flow, cpu, ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
&xdp_state);
xdp_state_or |= xdp_state; xdp_state_or |= xdp_state;
if (ret) if (ret)
break; break;
num_rx++; num_rx++;
} }
if (num_rx >= budget)
break;
}
if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
xdp_do_flush(); xdp_do_flush();
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (common->rx_irq_disabled) { if (flow->irq_disabled) {
common->rx_irq_disabled = false; flow->irq_disabled = false;
if (unlikely(common->rx_pace_timeout)) { if (unlikely(flow->rx_pace_timeout)) {
hrtimer_start(&common->rx_hrtimer, hrtimer_start(&flow->rx_hrtimer,
ns_to_ktime(common->rx_pace_timeout), ns_to_ktime(flow->rx_pace_timeout),
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED);
} else { } else {
enable_irq(common->rx_chns.irq); enable_irq(flow->irq);
} }
} }
} }
@ -1527,11 +1557,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{ {
struct am65_cpsw_common *common = dev_id; struct am65_cpsw_rx_flow *flow = dev_id;
common->rx_irq_disabled = true; flow->irq_disabled = true;
disable_irq_nosync(irq); disable_irq_nosync(irq);
napi_schedule(&common->napi_rx); napi_schedule(&flow->napi_rx);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -2176,7 +2206,7 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
} }
} }
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
{ {
struct device *dev = common->dev; struct device *dev = common->dev;
int i; int i;
@ -2191,15 +2221,9 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
devm_free_irq(dev, tx_chn->irq, tx_chn); devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx); netif_napi_del(&tx_chn->napi_tx);
if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
memset(tx_chn, 0, sizeof(*tx_chn));
} }
am65_cpsw_nuss_free_tx_chns(common);
} }
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
@ -2331,19 +2355,22 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn); k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
} }
static void am65_cpsw_nuss_remove_rx_chns(void *data) static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
{ {
struct am65_cpsw_common *common = data;
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_rx_chn *rx_chn; struct am65_cpsw_rx_chn *rx_chn;
struct am65_cpsw_rx_flow *flows;
int i;
rx_chn = &common->rx_chns; rx_chn = &common->rx_chns;
flows = rx_chn->flows;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (!(rx_chn->irq < 0)) for (i = 0; i < common->rx_ch_num_flows; i++) {
devm_free_irq(dev, rx_chn->irq, common); if (!(flows[i].irq < 0))
devm_free_irq(dev, flows[i].irq, &flows[i]);
netif_napi_del(&common->napi_rx); netif_napi_del(&flows[i].napi_rx);
}
am65_cpsw_nuss_free_rx_chns(common); am65_cpsw_nuss_free_rx_chns(common);
@ -2356,6 +2383,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_rx_flow *flow;
u32 hdesc_size, hdesc_size_out; u32 hdesc_size, hdesc_size_out;
u32 fdqring_id; u32 fdqring_id;
int i, ret = 0; int i, ret = 0;
@ -2364,12 +2392,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
AM65_CPSW_NAV_SW_DATA_SIZE); AM65_CPSW_NAV_SW_DATA_SIZE);
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS; rx_cfg.flow_id_num = common->rx_ch_num_flows;
rx_cfg.flow_id_base = common->rx_flow_id_base; rx_cfg.flow_id_base = common->rx_flow_id_base;
/* init all flows */ /* init all flows */
rx_chn->dev = dev; rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num; rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
for (i = 0; i < common->rx_ch_num_flows; i++) {
flow = &rx_chn->flows[i];
flow->page_pool = NULL;
flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
sizeof(*flow->pages), GFP_KERNEL);
if (!flow->pages)
return -ENOMEM;
}
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) { if (IS_ERR(rx_chn->rx_chn)) {
@ -2392,13 +2429,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->dsize_log2 = __fls(hdesc_size_out); rx_chn->dsize_log2 = __fls(hdesc_size_out);
WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
rx_chn->page_pool = NULL;
rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
sizeof(*rx_chn->pages), GFP_KERNEL);
if (!rx_chn->pages)
return -ENOMEM;
common->rx_flow_id_base = common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@ -2422,6 +2452,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
}; };
flow = &rx_chn->flows[i];
flow->id = i;
flow->common = common;
rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num; rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num; rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
@ -2438,29 +2472,36 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i); i);
rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
if (flow->irq <= 0) {
if (rx_chn->irq < 0) {
dev_err(dev, "Failed to get rx dma irq %d\n", dev_err(dev, "Failed to get rx dma irq %d\n",
rx_chn->irq); flow->irq);
ret = rx_chn->irq; ret = flow->irq;
goto err; goto err;
} }
}
netif_napi_add(common->dma_ndev, &common->napi_rx, snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
netif_napi_add(common->dma_ndev, &flow->napi_rx,
am65_cpsw_nuss_rx_poll); am65_cpsw_nuss_rx_poll);
hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; HRTIMER_MODE_REL_PINNED);
flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, rx_chn->irq, ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq, am65_cpsw_nuss_rx_irq,
IRQF_TRIGGER_HIGH, dev_name(dev), common); IRQF_TRIGGER_HIGH,
flow->name, flow);
if (ret) { if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n", dev_err(dev, "failure requesting rx %d irq %u, %d\n",
rx_chn->irq, ret); i, flow->irq, ret);
goto err; goto err;
} }
}
/* setup classifier to route priorities to flows */
cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
err: err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
@ -2705,8 +2746,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
/* alloc netdev */ /* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev, port->ndev = devm_alloc_etherdev_mqs(common->dev,
sizeof(struct am65_cpsw_ndev_priv), sizeof(struct am65_cpsw_ndev_priv),
AM65_CPSW_MAX_TX_QUEUES, AM65_CPSW_MAX_QUEUES,
AM65_CPSW_MAX_RX_QUEUES); AM65_CPSW_MAX_QUEUES);
if (!port->ndev) { if (!port->ndev) {
dev_err(dev, "error allocating slave net_device %u\n", dev_err(dev, "error allocating slave net_device %u\n",
port->port_id); port->port_id);
@ -3303,9 +3344,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
} }
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan, k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
am65_cpsw_nuss_rx_cleanup, !!i); &rx_chan->flows[i],
am65_cpsw_nuss_rx_cleanup, 0);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
@ -3346,12 +3388,21 @@ err_cleanup_ndev:
return ret; return ret;
} }
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
int num_tx, int num_rx)
{ {
int ret; int ret;
am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_remove_rx_chns(common);
common->tx_ch_num = num_tx; common->tx_ch_num = num_tx;
common->rx_ch_num_flows = num_rx;
ret = am65_cpsw_nuss_init_tx_chns(common); ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
return ret; return ret;
} }
@ -3481,6 +3532,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1; common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete); init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
common->pf_p0_rx_ptype_rrobin = false; common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1; common->default_vlan = 1;
@ -3672,8 +3724,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
return ret; return ret;
/* If RX IRQ was disabled before suspend, keep it disabled */ /* If RX IRQ was disabled before suspend, keep it disabled */
if (common->rx_irq_disabled) for (i = 0; i < common->rx_ch_num_flows; i++) {
disable_irq(common->rx_chns.irq); if (common->rx_chns.flows[i].irq_disabled)
disable_irq(common->rx_chns.flows[i].irq);
}
am65_cpts_resume(common->cpts); am65_cpts_resume(common->cpts);

View File

@ -21,9 +21,7 @@ struct am65_cpts;
#define HOST_PORT_NUM 0 #define HOST_PORT_NUM 0
#define AM65_CPSW_MAX_TX_QUEUES 8 #define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */
#define AM65_CPSW_MAX_RX_QUEUES 1
#define AM65_CPSW_MAX_RX_FLOWS 1
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014 #define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
@ -58,7 +56,7 @@ struct am65_cpsw_port {
struct am65_cpsw_qos qos; struct am65_cpsw_qos qos;
struct devlink_port devlink_port; struct devlink_port devlink_port;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES];
/* Only for suspend resume context */ /* Only for suspend resume context */
u32 vid_context; u32 vid_context;
}; };
@ -94,16 +92,27 @@ struct am65_cpsw_tx_chn {
u32 rate_mbps; u32 rate_mbps;
}; };
struct am65_cpsw_rx_flow {
u32 id;
struct napi_struct napi_rx;
struct am65_cpsw_common *common;
int irq;
bool irq_disabled;
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout;
struct page_pool *page_pool;
struct page **pages;
char name[32];
};
struct am65_cpsw_rx_chn { struct am65_cpsw_rx_chn {
struct device *dev; struct device *dev;
struct device *dma_dev; struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool; struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn; struct k3_udma_glue_rx_channel *rx_chn;
struct page_pool *page_pool;
struct page **pages;
u32 descs_num; u32 descs_num;
unsigned char dsize_log2; unsigned char dsize_log2;
int irq; struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES];
}; };
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0) #define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
@ -145,16 +154,12 @@ struct am65_cpsw_common {
u32 tx_ch_rate_msk; u32 tx_ch_rate_msk;
u32 rx_flow_id_base; u32 rx_flow_id_base;
struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES]; struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES];
struct completion tdown_complete; struct completion tdown_complete;
atomic_t tdown_cnt; atomic_t tdown_cnt;
int rx_ch_num_flows;
struct am65_cpsw_rx_chn rx_chns; struct am65_cpsw_rx_chn rx_chns;
struct napi_struct napi_rx;
bool rx_irq_disabled;
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout;
u32 nuss_ver; u32 nuss_ver;
u32 cpsw_ver; u32 cpsw_ver;
@ -203,8 +208,8 @@ struct am65_cpsw_ndev_priv {
#define am65_common_get_host(common) (&(common)->host) #define am65_common_get_host(common) (&(common)->host)
#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1]) #define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
#define am65_cpsw_napi_to_common(pnapi) \ #define am65_cpsw_napi_to_rx_flow(pnapi) \
container_of(pnapi, struct am65_cpsw_common, napi_rx) container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx)
#define am65_cpsw_napi_to_tx_chn(pnapi) \ #define am65_cpsw_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx) container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
@ -215,8 +220,8 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave; extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common); void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common); int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx); int num_tx, int num_rx);
bool am65_cpsw_port_dev_check(const struct net_device *dev); bool am65_cpsw_port_dev_check(const struct net_device *dev);

View File

@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
@ -45,6 +46,24 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C #define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg))) #define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
#define ALE_POLICER_PORT_OUI 0x100
#define ALE_POLICER_DA_SA 0x104
#define ALE_POLICER_VLAN 0x108
#define ALE_POLICER_ETHERTYPE_IPSA 0x10c
#define ALE_POLICER_IPDA 0x110
#define ALE_POLICER_PIR 0x118
#define ALE_POLICER_CIR 0x11c
#define ALE_POLICER_TBL_CTL 0x120
#define ALE_POLICER_CTL 0x124
#define ALE_POLICER_TEST_CTL 0x128
#define ALE_POLICER_HIT_STATUS 0x12c
#define ALE_THREAD_DEF 0x134
#define ALE_THREAD_CTL 0x138
#define ALE_THREAD_VAL 0x13c
#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31)
#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0)
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134 #define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
/* ALE_AGING_TIMER */ /* ALE_AGING_TIMER */
@ -76,7 +95,7 @@ enum {
* @dev_id: ALE version/SoC id * @dev_id: ALE version/SoC id
* @features: features supported by ALE * @features: features supported by ALE
* @tbl_entries: number of ALE entries * @tbl_entries: number of ALE entries
* @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg. * @reg_fields: pointer to array of register field configuration
* @nu_switch_ale: NU Switch ALE * @nu_switch_ale: NU Switch ALE
* @vlan_entry_tbl: ALE vlan entry fields description tbl * @vlan_entry_tbl: ALE vlan entry fields description tbl
*/ */
@ -84,7 +103,7 @@ struct cpsw_ale_dev_id {
const char *dev_id; const char *dev_id;
u32 features; u32 features;
u32 tbl_entries; u32 tbl_entries;
u32 major_ver_mask; const struct reg_field *reg_fields;
bool nu_switch_ale; bool nu_switch_ale;
const struct ale_entry_fld *vlan_entry_tbl; const struct ale_entry_fld *vlan_entry_tbl;
}; };
@ -102,7 +121,7 @@ struct cpsw_ale_dev_id {
#define ALE_UCAST_TOUCHED 3 #define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024 #define ALE_TABLE_SIZE_MULTIPLIER 1024
#define ALE_STATUS_SIZE_MASK 0x1f #define ALE_POLICER_SIZE_MULTIPLIER 8
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{ {
@ -1292,25 +1311,108 @@ void cpsw_ale_stop(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
} }
static const struct reg_field ale_fields_cpsw[] = {
/* CPSW_ALE_IDVER_REG */
[MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
[MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15),
};
static const struct reg_field ale_fields_cpsw_nu[] = {
/* CPSW_ALE_IDVER_REG */
[MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
[MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10),
/* CPSW_ALE_STATUS_REG */
[ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7),
[ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15),
/* CPSW_ALE_POLICER_PORT_OUI_REG */
[POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31),
[POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30),
[POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25),
[POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19),
[POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18),
[POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15),
[POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5),
/* CPSW_ALE_POLICER_DA_SA_REG */
[POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31),
[POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21),
[POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15),
[POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5),
/* CPSW_ALE_POLICER_VLAN_REG */
[POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31),
[POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21),
[POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15),
[POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5),
/* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */
[POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31),
[POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21),
[POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15),
[POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5),
/* CPSW_ALE_POLICER_IPDA_REG */
[POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31),
[POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21),
/* CPSW_ALE_POLICER_TBL_CTL_REG */
/**
* REG_FIELDS not defined for this as fields cannot be correctly
* used independently
*/
/* CPSW_ALE_POLICER_CTL_REG */
[POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31),
[POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29),
[POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28),
[POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26),
[POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23),
[POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21),
[POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20),
/* CPSW_ALE_POLICER_TEST_CTL_REG */
[POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31),
[POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30),
[POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29),
[POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28),
[POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4),
/* CPSW_ALE_POLICER_HIT_STATUS_REG */
[POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31),
[POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30),
[POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29),
/* CPSW_ALE_THREAD_DEF_REG */
[ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15),
[ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5),
/* CPSW_ALE_THREAD_CTL_REG */
[ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4),
/* CPSW_ALE_THREAD_VAL_REG */
[ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15),
[ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5),
};
static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = { static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
{ {
/* am3/4/5, dra7. dm814x, 66ak2hk-gbe */ /* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
.dev_id = "cpsw", .dev_id = "cpsw",
.tbl_entries = 1024, .tbl_entries = 1024,
.major_ver_mask = 0xff, .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw, .vlan_entry_tbl = vlan_entry_cpsw,
}, },
{ {
/* 66ak2h_xgbe */ /* 66ak2h_xgbe */
.dev_id = "66ak2h-xgbe", .dev_id = "66ak2h-xgbe",
.tbl_entries = 2048, .tbl_entries = 2048,
.major_ver_mask = 0xff, .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw, .vlan_entry_tbl = vlan_entry_cpsw,
}, },
{ {
.dev_id = "66ak2el", .dev_id = "66ak2el",
.features = CPSW_ALE_F_STATUS_REG, .features = CPSW_ALE_F_STATUS_REG,
.major_ver_mask = 0x7, .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true, .nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu, .vlan_entry_tbl = vlan_entry_nu,
}, },
@ -1318,7 +1420,7 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "66ak2g", .dev_id = "66ak2g",
.features = CPSW_ALE_F_STATUS_REG, .features = CPSW_ALE_F_STATUS_REG,
.tbl_entries = 64, .tbl_entries = 64,
.major_ver_mask = 0x7, .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true, .nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu, .vlan_entry_tbl = vlan_entry_nu,
}, },
@ -1326,20 +1428,20 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "am65x-cpsw2g", .dev_id = "am65x-cpsw2g",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.tbl_entries = 64, .tbl_entries = 64,
.major_ver_mask = 0x7, .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true, .nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu, .vlan_entry_tbl = vlan_entry_nu,
}, },
{ {
.dev_id = "j721e-cpswxg", .dev_id = "j721e-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.major_ver_mask = 0x7, .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg, .vlan_entry_tbl = vlan_entry_k3_cpswxg,
}, },
{ {
.dev_id = "am64-cpswxg", .dev_id = "am64-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.major_ver_mask = 0x7, .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg, .vlan_entry_tbl = vlan_entry_k3_cpswxg,
.tbl_entries = 512, .tbl_entries = 512,
}, },
@ -1361,47 +1463,80 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
return NULL; return NULL;
} }
static const struct regmap_config ale_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.name = "cpsw-ale",
};
static int cpsw_ale_regfield_init(struct cpsw_ale *ale)
{
const struct reg_field *reg_fields = ale->params.reg_fields;
struct device *dev = ale->params.dev;
struct regmap *regmap = ale->regmap;
int i;
for (i = 0; i < ALE_FIELDS_MAX; i++) {
ale->fields[i] = devm_regmap_field_alloc(dev, regmap,
reg_fields[i]);
if (IS_ERR(ale->fields[i])) {
dev_err(dev, "Unable to allocate regmap field %d\n", i);
return PTR_ERR(ale->fields[i]);
}
}
return 0;
}
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{ {
u32 ale_entries, rev_major, rev_minor, policers;
const struct cpsw_ale_dev_id *ale_dev_id; const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale; struct cpsw_ale *ale;
u32 rev, ale_entries; int ret;
ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id); ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
if (!ale_dev_id) if (!ale_dev_id)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
params->ale_entries = ale_dev_id->tbl_entries; params->ale_entries = ale_dev_id->tbl_entries;
params->major_ver_mask = ale_dev_id->major_ver_mask;
params->nu_switch_ale = ale_dev_id->nu_switch_ale; params->nu_switch_ale = ale_dev_id->nu_switch_ale;
params->reg_fields = ale_dev_id->reg_fields;
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale) if (!ale)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs,
&ale_regmap_cfg);
if (IS_ERR(ale->regmap)) {
dev_err(params->dev, "Couldn't create CPSW ALE regmap\n");
return ERR_PTR(-ENOMEM);
}
ale->params = *params;
ret = cpsw_ale_regfield_init(ale);
if (ret)
return ERR_PTR(ret);
ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID, ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
GFP_KERNEL); GFP_KERNEL);
if (!ale->p0_untag_vid_mask) if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ; ale->ageout = ale->params.ale_ageout * HZ;
ale->features = ale_dev_id->features; ale->features = ale_dev_id->features;
ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl; ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER); regmap_field_read(ale->fields[MINOR_VER], &rev_minor);
ale->version = regmap_field_read(ale->fields[MAJOR_VER], &rev_major);
(ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) | ale->version = rev_major << 8 | rev_minor;
ALE_VERSION_MINOR(rev);
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n", dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), rev_major, rev_minor);
ALE_VERSION_MINOR(rev));
if (ale->features & CPSW_ALE_F_STATUS_REG && if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.ale_entries) { !ale->params.ale_entries) {
ale_entries = regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries);
readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
ALE_STATUS_SIZE_MASK;
/* ALE available on newer NetCP switches has introduced /* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE * a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries. * table which shows the size as a multiple of 1024 entries.
@ -1415,8 +1550,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER; ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries; ale->params.ale_entries = ale_entries;
} }
if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.num_policers) {
regmap_field_read(ale->fields[ALE_POLICERS], &policers);
if (!policers)
return ERR_PTR(-EINVAL);
policers *= ALE_POLICER_SIZE_MULTIPLIER;
ale->params.num_policers = policers;
}
dev_info(ale->params.dev, dev_info(ale->params.dev,
"ALE Table size %ld\n", ale->params.ale_entries); "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries,
ale->params.num_policers);
/* set default bits for existing h/w */ /* set default bits for existing h/w */
ale->port_mask_bits = ale->params.ale_ports; ale->port_mask_bits = ale->params.ale_ports;
@ -1480,3 +1627,97 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{ {
return ale ? ale->params.ale_entries : 0; return ale ? ale->params.ale_entries : 0;
} }
/* Reads the specified policer index into ALE POLICER registers */
static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx)
{
idx &= ALE_POLICER_TBL_INDEX_MASK;
writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
}
/* Writes the ALE POLICER registers into the specified policer index */
static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx)
{
idx &= ALE_POLICER_TBL_INDEX_MASK;
idx |= ALE_POLICER_TBL_WRITE_ENABLE;
writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
}
/* enables/disables the custom thread value for the specified policer index */
static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx,
u32 thread_id, bool enable)
{
regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx);
regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id);
regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0);
}
/* Disable all policer entries and thread mappings */
static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
{
int i;
for (i = 0; i < ale->params.num_policers ; i++) {
cpsw_ale_policer_read_idx(ale, i);
regmap_field_write(ale->fields[POL_PORT_MEN], 0);
regmap_field_write(ale->fields[POL_PRI_MEN], 0);
regmap_field_write(ale->fields[POL_OUI_MEN], 0);
regmap_field_write(ale->fields[POL_DST_MEN], 0);
regmap_field_write(ale->fields[POL_SRC_MEN], 0);
regmap_field_write(ale->fields[POL_OVLAN_MEN], 0);
regmap_field_write(ale->fields[POL_IVLAN_MEN], 0);
regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0);
regmap_field_write(ale->fields[POL_IPSRC_MEN], 0);
regmap_field_write(ale->fields[POL_IPDST_MEN], 0);
regmap_field_write(ale->fields[POL_EN], 0);
regmap_field_write(ale->fields[POL_RED_DROP_EN], 0);
regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0);
regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0);
cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0);
}
}
/* Default classifier is to map 8 user priorities to N receive channels */
void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
{
int pri, idx;
/* IEEE802.1D-2004, Standard for Local and metropolitan area networks
* Table G-2 - Traffic type acronyms
* Table G-3 - Defining traffic types
* User priority values 1 and 2 effectively communicate a lower
* priority than 0. In the below table 0 is assigned to higher priority
* thread than 1 and 2 wherever possible.
* The below table maps which thread the user priority needs to be
* sent to for a given number of threads (RX channels). Upper threads
* have higher priority.
* e.g. if number of threads is 8 then user priority 0 will map to
* pri_thread_map[8-1][0] i.e. thread 2
*/
int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
{ 0, 0, 0, 0, 1, 1, 1, 1, },
{ 0, 0, 0, 0, 1, 1, 2, 2, },
{ 1, 0, 0, 1, 2, 2, 3, 3, },
{ 1, 0, 0, 1, 2, 3, 4, 4, },
{ 1, 0, 0, 2, 3, 4, 5, 5, },
{ 1, 0, 0, 2, 3, 4, 5, 6, },
{ 2, 0, 1, 3, 4, 5, 6, 7, } };
cpsw_ale_policer_reset(ale);
/* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */
for (pri = 0; pri < 8; pri++) {
idx = pri;
/* Classifier 'idx' match on priority 'pri' */
cpsw_ale_policer_read_idx(ale, idx);
regmap_field_write(ale->fields[POL_PRI_VAL], pri);
regmap_field_write(ale->fields[POL_PRI_MEN], 1);
cpsw_ale_policer_write_idx(ale, idx);
/* Map Classifier 'idx' to thread provided by the map */
cpsw_ale_policer_thread_idx_enable(ale, idx,
pri_thread_map[num_rx_ch - 1][pri],
1);
}
}

View File

@ -8,11 +8,14 @@
#ifndef __TI_CPSW_ALE_H__ #ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__ #define __TI_CPSW_ALE_H__
struct reg_fields;
struct cpsw_ale_params { struct cpsw_ale_params {
struct device *dev; struct device *dev;
void __iomem *ale_regs; void __iomem *ale_regs;
unsigned long ale_ageout; /* in secs */ unsigned long ale_ageout; /* in secs */
unsigned long ale_entries; unsigned long ale_entries;
unsigned long num_policers;
unsigned long ale_ports; unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries /* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific * are different than other versions of ALE. Also there are specific
@ -20,19 +23,69 @@ struct cpsw_ale_params {
* to identify this hardware. * to identify this hardware.
*/ */
bool nu_switch_ale; bool nu_switch_ale;
/* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So const struct reg_field *reg_fields;
* pass it from caller.
*/
u32 major_ver_mask;
const char *dev_id; const char *dev_id;
unsigned long bus_freq; unsigned long bus_freq;
}; };
struct ale_entry_fld; struct ale_entry_fld;
struct regmap;
enum ale_fields {
MINOR_VER,
MAJOR_VER,
ALE_ENTRIES,
ALE_POLICERS,
POL_PORT_MEN,
POL_TRUNK_ID,
POL_PORT_NUM,
POL_PRI_MEN,
POL_PRI_VAL,
POL_OUI_MEN,
POL_OUI_INDEX,
POL_DST_MEN,
POL_DST_INDEX,
POL_SRC_MEN,
POL_SRC_INDEX,
POL_OVLAN_MEN,
POL_OVLAN_INDEX,
POL_IVLAN_MEN,
POL_IVLAN_INDEX,
POL_ETHERTYPE_MEN,
POL_ETHERTYPE_INDEX,
POL_IPSRC_MEN,
POL_IPSRC_INDEX,
POL_IPDST_MEN,
POL_IPDST_INDEX,
POL_EN,
POL_RED_DROP_EN,
POL_YELLOW_DROP_EN,
POL_YELLOW_THRESH,
POL_POL_MATCH_MODE,
POL_PRIORITY_THREAD_EN,
POL_MAC_ONLY_DEF_DIS,
POL_TEST_CLR,
POL_TEST_CLR_RED,
POL_TEST_CLR_YELLOW,
POL_TEST_CLR_SELECTED,
POL_TEST_ENTRY,
POL_STATUS_HIT,
POL_STATUS_HIT_RED,
POL_STATUS_HIT_YELLOW,
ALE_DEFAULT_THREAD_EN,
ALE_DEFAULT_THREAD_VAL,
ALE_THREAD_CLASS_INDEX,
ALE_THREAD_ENABLE,
ALE_THREAD_VALUE,
/* terminator */
ALE_FIELDS_MAX,
};
struct cpsw_ale { struct cpsw_ale {
struct cpsw_ale_params params; struct cpsw_ale_params params;
struct timer_list timer; struct timer_list timer;
struct regmap *regmap;
struct regmap_field *fields[ALE_FIELDS_MAX];
unsigned long ageout; unsigned long ageout;
u32 version; u32 version;
u32 features; u32 features;
@ -140,5 +193,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask); int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add); bool add);
void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch);
#endif #endif