1

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: iavf: add support for TC U32 filters on VFs

Ahmed Zaki says:

The Intel Ethernet 800 Series is designed with a pipeline that has
an on-chip programmable capability called Dynamic Device Personalization
(DDP). A DDP package is loaded by the driver during probe time. The DDP
package programs functionality in both the parser and switching blocks in
the pipeline, allowing dynamic support for new and existing protocols.
Once the pipeline is configured, the driver can identify the protocol and
apply any HW action in different stages, for example, direct packets to
desired hardware queues (flow director), queue groups or drop.

Patches 1-8 introduce a DDP package parser API that enables different
pipeline stages in the driver to learn the HW parser capabilities from
the DDP package that is downloaded to HW. The parser library takes raw
packet patterns and masks (in binary) indicating the packet protocol fields
to be matched and generates the final HW profiles that can be applied at
the required stage. With this API, raw flow filtering for FDIR or RSS
could be done on new protocols or headers without any driver or Kernel
updates (only need to update the DDP package). These patches were submitted
before [1] but were not accepted mainly due to lack of a user.

Patches 9-11 extend the virtchnl support to allow the VF to request raw
flow director filters. Upon receiving the raw FDIR filter request, the PF
driver allocates and runs a parser lib instance and generates the hardware
profile definitions required to program the FDIR stage. These were also
submitted before [2].

Finally, patches 12 and 13 add TC U32 filter support to the iavf driver.
Using the parser API, the ice driver runs the raw patterns sent by the
user and then adds a new profile to the FDIR stage associated with the VF's
VSI. Refer to examples in patch 13 commit message.

[1]: https://lore.kernel.org/netdev/20230904021455.3944605-1-junfeng.guo@intel.com/
[2]: https://lore.kernel.org/intel-wired-lan/20230818064703.154183-1-junfeng.guo@intel.com/

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  iavf: add support for offloading tc U32 cls filters
  iavf: refactor add/del FDIR filters
  ice: enable FDIR filters from raw binary patterns for VFs
  ice: add method to disable FDIR SWAP option
  virtchnl: support raw packet in protocol header
  ice: add API for parser profile initialization
  ice: add UDP tunnels support to the parser
  ice: support turning on/off the parser's double vlan mode
  ice: add parser execution main loop
  ice: add parser internal helper functions
  ice: add debugging functions for the parser sections
  ice: parse and init various DDP parser sections
  ice: add parser create and destroy skeleton
====================

Link: https://patch.msgid.link/20240813222249.3708070-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-08-16 11:28:50 -07:00
commit 2dce239099
22 changed files with 4792 additions and 90 deletions

View File

@ -33,6 +33,7 @@
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_skbedit.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
@ -393,6 +394,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC)
#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_TC_U32)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@ -437,6 +440,7 @@ struct iavf_adapter {
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
u16 raw_fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
@ -444,6 +448,32 @@ struct iavf_adapter {
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
/* Must be called with fdir_fltr_lock lock held */
static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter)
{
return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
IAVF_MAX_FDIR_FILTERS;
}
static inline void
iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr)
{
if (iavf_is_raw_fdir(fltr))
adapter->raw_fdir_active_fltr++;
else
adapter->fdir_active_fltr++;
}
static inline void
iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr)
{
if (iavf_is_raw_fdir(fltr))
adapter->raw_fdir_active_fltr--;
else
adapter->fdir_active_fltr--;
}
/* Ethtool Private Flags */

View File

@ -927,7 +927,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
@ -1072,6 +1072,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
if (iavf_is_raw_fdir(fltr))
continue;
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
@ -1263,15 +1266,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return -EINVAL;
spin_lock_bh(&adapter->fdir_fltr_lock);
if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
spin_unlock_bh(&adapter->fdir_fltr_lock);
dev_err(&adapter->pdev->dev,
"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
IAVF_MAX_FDIR_FILTERS);
return -ENOSPC;
}
if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
@ -1291,23 +1286,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err)
err = iavf_fdir_add_fltr(adapter, fltr);
if (err)
goto ret;
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
if (adapter->link_up)
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
else
fltr->state = IAVF_FDIR_FLTR_INACTIVE;
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (adapter->link_up)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
ret:
if (err && fltr)
kfree(fltr);
mutex_unlock(&adapter->crit_lock);
@ -1324,34 +1306,11 @@ ret:
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
list_del(&fltr->list);
kfree(fltr);
adapter->fdir_active_fltr--;
fltr = NULL;
} else {
err = -EBUSY;
}
} else if (adapter->fdir_active_fltr) {
err = -EINVAL;
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
return err;
return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
/**

View File

@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
if (iavf_is_raw_fdir(fltr))
continue;
if (tmp->flow_type != fltr->flow_type)
continue;
@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
}
/**
* iavf_find_fdir_fltr_by_loc - find filter with location
* iavf_find_fdir_fltr - find FDIR filter
* @adapter: pointer to the VF adapter structure
* @loc: location to find.
* @is_raw: filter type, is raw (tc u32) or not (ethtool)
* @data: data to ID the filter, type dependent
*
* Returns pointer to Flow Director filter if found or null
* Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
*/
struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
bool is_raw, u32 data)
{
struct iavf_fdir_fltr *rule;
list_for_each_entry(rule, &adapter->fdir_list_head, list)
if (rule->loc == loc)
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
if ((is_raw && rule->cls_u32_handle == data) ||
(!is_raw && rule->loc == data))
return rule;
}
return NULL;
}
/**
* iavf_fdir_list_add_fltr - add a new node to the flow director filter list
* iavf_fdir_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
*
* Return: 0 on success or negative errno on failure.
*/
void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
spin_lock_bh(&adapter->fdir_fltr_lock);
if (iavf_fdir_max_reached(adapter)) {
spin_unlock_bh(&adapter->fdir_fltr_lock);
dev_err(&adapter->pdev->dev,
"Unable to add Flow Director filter (limit (%u) reached)\n",
IAVF_MAX_FDIR_FILTERS);
return -ENOSPC;
}
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
if (iavf_is_raw_fdir(fltr))
break;
if (rule->loc >= fltr->loc)
break;
parent = rule;
@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
iavf_inc_fdir_active_fltr(adapter, fltr);
if (adapter->link_up)
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
else
fltr->state = IAVF_FDIR_FLTR_INACTIVE;
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (adapter->link_up)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
return 0;
}
/**
* iavf_fdir_del_fltr - delete a flow director filter from the list
* @adapter: pointer to the VF adapter structure
* @is_raw: filter type, is raw (tc u32) or not (ethtool)
* @data: data to ID the filter, type dependent
*
* Return: 0 on success or negative errno on failure.
*/
int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
{
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
spin_lock_bh(&adapter->fdir_fltr_lock);
fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
list_del(&fltr->list);
iavf_dec_fdir_active_fltr(adapter, fltr);
kfree(fltr);
fltr = NULL;
} else {
err = -EBUSY;
}
} else if (adapter->fdir_active_fltr) {
err = -EINVAL;
}
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
spin_unlock_bh(&adapter->fdir_fltr_lock);
return err;
}

View File

@ -117,17 +117,26 @@ struct iavf_fdir_fltr {
u32 flow_id;
u32 cls_u32_handle; /* for FDIR added via tc u32 */
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr)
{
return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count;
}
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data);
struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
bool is_raw, u32 data);
#endif /* _IAVF_FDIR_H_ */

View File

@ -4013,7 +4013,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
* @adapter: board private structure
* @adapter: pointer to iavf adapter structure
* @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
@ -4031,6 +4031,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
}
}
/**
* iavf_add_cls_u32 - Add U32 classifier offloads
* @adapter: pointer to iavf adapter structure
* @cls_u32: pointer to tc_cls_u32_offload struct with flow info
*
* Return: 0 on success or negative errno on failure.
*/
static int iavf_add_cls_u32(struct iavf_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
struct netlink_ext_ack *extack = cls_u32->common.extack;
struct virtchnl_fdir_rule *rule_cfg;
struct virtchnl_filter_action *vact;
struct virtchnl_proto_hdrs *hdrs;
struct ethhdr *spec_h, *mask_h;
const struct tc_action *act;
struct iavf_fdir_fltr *fltr;
struct tcf_exts *exts;
unsigned int q_index;
int i, status = 0;
int off_base = 0;
if (cls_u32->knode.link_handle) {
NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
return -EOPNOTSUPP;
}
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
if (!fltr)
return -ENOMEM;
rule_cfg = &fltr->vc_add_msg.rule_cfg;
hdrs = &rule_cfg->proto_hdrs;
hdrs->count = 0;
/* The parser lib at the PF expects the packet starting with MAC hdr */
switch (ntohs(cls_u32->common.protocol)) {
case ETH_P_802_3:
break;
case ETH_P_IP:
spec_h = (struct ethhdr *)hdrs->raw.spec;
mask_h = (struct ethhdr *)hdrs->raw.mask;
spec_h->h_proto = htons(ETH_P_IP);
mask_h->h_proto = htons(0xFFFF);
off_base += ETH_HLEN;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
status = -EOPNOTSUPP;
goto free_alloc;
}
for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
__be32 val, mask;
int off;
off = off_base + cls_u32->knode.sel->keys[i].off;
val = cls_u32->knode.sel->keys[i].val;
mask = cls_u32->knode.sel->keys[i].mask;
if (off >= sizeof(hdrs->raw.spec)) {
NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
status = -EINVAL;
goto free_alloc;
}
memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
hdrs->raw.pkt_len = off + sizeof(val);
}
/* Only one action is allowed */
rule_cfg->action_set.count = 1;
vact = &rule_cfg->action_set.actions[0];
exts = cls_u32->knode.exts;
tcf_exts_for_each_action(i, act, exts) {
/* FDIR queue */
if (is_tcf_skbedit_rx_queue_mapping(act)) {
q_index = tcf_skbedit_rx_queue_mapping(act);
if (q_index >= adapter->num_active_queues) {
status = -EINVAL;
goto free_alloc;
}
vact->type = VIRTCHNL_ACTION_QUEUE;
vact->act_conf.queue.index = q_index;
break;
}
/* Drop */
if (is_tcf_gact_shot(act)) {
vact->type = VIRTCHNL_ACTION_DROP;
break;
}
/* Unsupported action */
NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
status = -EOPNOTSUPP;
goto free_alloc;
}
fltr->vc_add_msg.vsi_id = adapter->vsi.id;
fltr->cls_u32_handle = cls_u32->knode.handle;
return iavf_fdir_add_fltr(adapter, fltr);
free_alloc:
kfree(fltr);
return status;
}
/**
* iavf_del_cls_u32 - Delete U32 classifier offloads
* @adapter: pointer to iavf adapter structure
* @cls_u32: pointer to tc_cls_u32_offload struct with flow info
*
* Return: 0 on success or negative errno on failure.
*/
static int iavf_del_cls_u32(struct iavf_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
}
/**
* iavf_setup_tc_cls_u32 - U32 filter offloads
* @adapter: pointer to iavf adapter structure
* @cls_u32: pointer to tc_cls_u32_offload struct with flow info
*
* Return: 0 on success or negative errno on failure.
*/
static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
return -EOPNOTSUPP;
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
return iavf_add_cls_u32(adapter, cls_u32);
case TC_CLSU32_DELETE_KNODE:
return iavf_del_cls_u32(adapter, cls_u32);
default:
return -EOPNOTSUPP;
}
}
/**
* iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
@ -4050,6 +4198,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
case TC_SETUP_CLSU32:
return iavf_setup_tc_cls_u32(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
@ -4332,8 +4482,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter)
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
@ -4843,9 +4993,11 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
/* Enable HW TC offload if ADQ or tc U32 is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
TC_U32_SUPPORT(adapter))
hw_features |= NETIF_F_HW_TC;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;

View File

@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
@ -1961,8 +1962,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
* list on PF is already cleared after a reset
*/
list_del(&f->list);
iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@ -2135,8 +2136,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@ -2451,8 +2452,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
fdir->loc);
if (!iavf_is_raw_fdir(fdir))
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
fdir->loc);
else
dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
TC_U32_USERHTID(fdir->cls_u32_handle));
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
@ -2460,8 +2465,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
adapter->fdir_active_fltr--;
}
}
}
@ -2479,11 +2484,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
if (!iavf_is_raw_fdir(fdir))
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
else
dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
TC_U32_USERHTID(fdir->cls_u32_handle));
list_del(&fdir->list);
iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",

View File

@ -28,6 +28,8 @@ ice-y := ice_main.o \
ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
ice_parser.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \

View File

@ -10,6 +10,7 @@
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
#include "ice_parser.h"
#include <linux/avf/virtchnl.h>
#include "ice_switch.h"
#include "ice_fdir.h"

View File

@ -289,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
struct ice_pkg_enum *state, u32 sect_type,
u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset))
void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
struct ice_pkg_enum *state, u32 sect_type,
u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset))
{
void *entry;

View File

@ -261,10 +261,17 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@ -276,6 +283,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
@ -451,6 +459,11 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *
ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type, u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);

View File

@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
}
/**
* ice_disable_fd_swap - set register appropriately to disable FD SWAP
* @hw: pointer to the HW struct
* @prof_id: profile ID
*/
static void
ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
{
u16 swap_val, fvw_num;
unsigned int i;
swap_val = ICE_SWAP_VALID;
fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
/* Since the SWAP Flag in the Programming Desc doesn't work,
* here add method to disable the SWAP Option via setting
* certain SWAP and INSET register sets.
*/
for (i = 0; i < fvw_num ; i++) {
u32 raw_swap, raw_in;
unsigned int j;
raw_swap = 0;
raw_in = 0;
for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
}
/* write the FDIR swap register set */
wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
/* write the FDIR inset register set */
wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
}
}
/*
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
* @fd_swap: enable/disable FDIR paired src/dst fields swap option
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks, bool symm)
struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
if (blk == ICE_BLK_FD) {
if (blk == ICE_BLK_FD && fd_swap) {
/* For Flow Director block, the extraction sequence may
* need to be altered in the case where there are paired
* fields that have no match. This is necessary because
@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_update_fd_swap(hw, prof_id, es);
if (status)
goto err_ice_add_prof;
} else if (blk == ICE_BLK_FD) {
ice_disable_fd_swap(hw, prof_id);
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
@ -4098,6 +4145,54 @@ err_ice_add_prof_id_flow:
return status;
}
/**
* ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
* @hw: pointer to the HW struct
* @blk: HW block
* @dest_vsi: dest VSI
* @fdir_vsi: fdir programming VSI
* @hdl: profile handle
*
* Update the hardware tables to enable the FDIR profile indicated by @hdl for
* the VSI specified by @dest_vsi. On success, the flow will be enabled.
*
* Return: 0 on success or negative errno on failure.
*/
int
ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
u16 dest_vsi, u16 fdir_vsi, u64 hdl)
{
u16 vsi_num;
int status;
if (blk != ICE_BLK_FD)
return -EINVAL;
vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
status);
return status;
}
vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
status);
goto err;
}
return 0;
err:
vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
return status;
}
/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct

View File

@ -6,6 +6,8 @@
#include "ice_type.h"
#define ICE_FDIR_REG_SET_SIZE 4
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
@ -42,13 +44,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks, bool symm);
struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);

View File

@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = {
};
/* Packet types for GTPU */
static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
};
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm);
params->mask, symm, true);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
#define FLAG_GTP_EH_PDU BIT_ULL(14)
#define HI_BYTE_IN_WORD GENMASK(15, 8)
#define LO_BYTE_IN_WORD GENMASK(7, 0)
#define FLAG_GTPU_MSK \
(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
#define FLAG_GTPU_UP \
(FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
#define FLAG_GTPU_DW FLAG_GTP_EH_PDU
/**
* ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
* @hw: pointer to the HW struct
* @dest_vsi: dest VSI
* @fdir_vsi: fdir programming VSI
* @prof: stores parsed profile info from raw flow
* @blk: classification blk
*
* Return: 0 on success or negative errno on failure.
*/
int
ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
struct ice_parser_profile *prof, enum ice_block blk)
{
u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
struct ice_flow_prof_params *params __free(kfree);
u8 fv_words = hw->blk[blk].es.fvw;
int status;
int i, idx;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
params->es[i].prot_id = ICE_PROT_INVALID;
params->es[i].off = ICE_FV_OFFSET_INVAL;
}
for (i = 0; i < prof->fv_num; i++) {
if (hw->blk[blk].es.reverse)
idx = fv_words - i - 1;
else
idx = i;
params->es[idx].prot_id = prof->fv[i].proto_id;
params->es[idx].off = prof->fv[i].offset;
params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
HI_BYTE_IN_WORD) |
(((prof->fv[i].msk) >> BITS_PER_BYTE) &
LO_BYTE_IN_WORD);
}
switch (prof->flags) {
case FLAG_GTPU_DW:
params->attr = ice_attr_gtpu_down;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
break;
case FLAG_GTPU_UP:
params->attr = ice_attr_gtpu_up;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
break;
default:
if (prof->flags_msk & FLAG_GTPU_MSK) {
params->attr = ice_attr_gtpu_session;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
}
break;
}
status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)
return status;
status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
if (status)
ice_rem_prof(hw, blk, id);
return status;
}
/**
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
* @hw: pointer to the HW struct

View File

@ -5,6 +5,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
#include "ice_parser.h"
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type {
ICE_RSS_ANY_HEADERS
};
struct ice_vsi;
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
struct ice_parser_profile *prof, enum ice_block blk);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,540 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2024 Intel Corporation */
#ifndef _ICE_PARSER_H_
#define _ICE_PARSER_H_
#define ICE_SEC_DATA_OFFSET 4
#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
#define ICE_SEC_LBL_DATA_OFFSET 2
#define ICE_SID_LBL_ENTRY_SIZE 66
/*** ICE_SID_RXPARSER_IMEM section ***/
#define ICE_IMEM_TABLE_SIZE 192
/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM
* output.
*/
struct ice_bst_main {
bool alu0;
bool alu1;
bool alu2;
bool pg;
};
struct ice_bst_keybuilder {
u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */
bool tsr_ctrl; /* TCAM Search Register control */
};
/* Next protocol Key builder */
struct ice_np_keybuilder {
u8 opc;
u8 start_reg0;
u8 len_reg1;
};
enum ice_np_keybuilder_opcode {
ICE_NPKB_OPC_EXTRACT = 0,
ICE_NPKB_OPC_BUILD = 1,
ICE_NPKB_OPC_BYPASS = 2,
};
/* Parse Graph Key builder */
struct ice_pg_keybuilder {
bool flag0_ena;
bool flag1_ena;
bool flag2_ena;
bool flag3_ena;
u8 flag0_idx;
u8 flag1_idx;
u8 flag2_idx;
u8 flag3_idx;
u8 alu_reg_idx;
};
enum ice_alu_idx {
ICE_ALU0_IDX = 0,
ICE_ALU1_IDX = 1,
ICE_ALU2_IDX = 2,
};
enum ice_alu_opcode {
ICE_ALU_PARK = 0,
ICE_ALU_MOV_ADD = 1,
ICE_ALU_ADD = 2,
ICE_ALU_MOV_AND = 4,
ICE_ALU_AND = 5,
ICE_ALU_AND_IMM = 6,
ICE_ALU_MOV_OR = 7,
ICE_ALU_OR = 8,
ICE_ALU_MOV_XOR = 9,
ICE_ALU_XOR = 10,
ICE_ALU_NOP = 11,
ICE_ALU_BR = 12,
ICE_ALU_BREQ = 13,
ICE_ALU_BRNEQ = 14,
ICE_ALU_BRGT = 15,
ICE_ALU_BRLT = 16,
ICE_ALU_BRGEQ = 17,
ICE_ALU_BRLEG = 18,
ICE_ALU_SETEQ = 19,
ICE_ALU_ANDEQ = 20,
ICE_ALU_OREQ = 21,
ICE_ALU_SETNEQ = 22,
ICE_ALU_ANDNEQ = 23,
ICE_ALU_ORNEQ = 24,
ICE_ALU_SETGT = 25,
ICE_ALU_ANDGT = 26,
ICE_ALU_ORGT = 27,
ICE_ALU_SETLT = 28,
ICE_ALU_ANDLT = 29,
ICE_ALU_ORLT = 30,
ICE_ALU_MOV_SUB = 31,
ICE_ALU_SUB = 32,
ICE_ALU_INVALID = 64,
};
enum ice_proto_off_opcode {
ICE_PO_OFF_REMAIN = 0,
ICE_PO_OFF_HDR_ADD = 1,
ICE_PO_OFF_HDR_SUB = 2,
};
struct ice_alu {
enum ice_alu_opcode opc;
u8 src_start;
u8 src_len;
bool shift_xlate_sel;
u8 shift_xlate_key;
u8 src_reg_id;
u8 dst_reg_id;
bool inc0;
bool inc1;
u8 proto_offset_opc;
u8 proto_offset;
u8 branch_addr;
u16 imm;
bool dedicate_flags_ena;
u8 dst_start;
u8 dst_len;
bool flags_extr_imm;
u8 flags_start_imm;
};
/* Parser program code (iMEM) */
struct ice_imem_item {
u16 idx;
struct ice_bst_main b_m;
struct ice_bst_keybuilder b_kb;
u8 pg_prio;
struct ice_np_keybuilder np_kb;
struct ice_pg_keybuilder pg_kb;
struct ice_alu alu0;
struct ice_alu alu1;
struct ice_alu alu2;
};
/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
#define ICE_METAINIT_TABLE_SIZE 16
/* Metadata Initialization item */
struct ice_metainit_item {
u16 idx;
u8 tsr; /* TCAM Search key Register */
u16 ho; /* Header Offset register */
u16 pc; /* Program Counter register */
u16 pg_rn; /* Parse Graph Root Node */
u8 cd; /* Control Domain ID */
/* General Purpose Registers */
bool gpr_a_ctrl;
u8 gpr_a_data_mdid;
u8 gpr_a_data_start;
u8 gpr_a_data_len;
u8 gpr_a_id;
bool gpr_b_ctrl;
u8 gpr_b_data_mdid;
u8 gpr_b_data_start;
u8 gpr_b_data_len;
u8 gpr_b_id;
bool gpr_c_ctrl;
u8 gpr_c_data_mdid;
u8 gpr_c_data_start;
u8 gpr_c_data_len;
u8 gpr_c_id;
bool gpr_d_ctrl;
u8 gpr_d_data_mdid;
u8 gpr_d_data_start;
u8 gpr_d_data_len;
u8 gpr_d_id;
u64 flags; /* Initial value for all flags */
};
/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
* ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
* sections ***/
#define ICE_PG_CAM_TABLE_SIZE 2048
#define ICE_PG_SP_CAM_TABLE_SIZE 128
#define ICE_PG_NM_CAM_TABLE_SIZE 1024
#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
struct ice_pg_cam_key {
bool valid;
struct_group_attr(val, __packed,
u16 node_id; /* Node ID of protocol in parse graph */
bool flag0;
bool flag1;
bool flag2;
bool flag3;
u8 boost_idx; /* Boost TCAM match index */
u16 alu_reg;
u32 next_proto; /* next Protocol value (must be last) */
);
};
struct ice_pg_nm_cam_key {
bool valid;
struct_group_attr(val, __packed,
u16 node_id;
bool flag0;
bool flag1;
bool flag2;
bool flag3;
u8 boost_idx;
u16 alu_reg;
);
};
struct ice_pg_cam_action {
u16 next_node; /* Parser Node ID for the next round */
u8 next_pc; /* next Program Counter */
bool is_pg; /* is protocol group */
u8 proto_id; /* protocol ID or proto group ID */
bool is_mg; /* is marker group */
u8 marker_id; /* marker ID or marker group ID */
bool is_last_round;
bool ho_polarity; /* header offset polarity */
u16 ho_inc;
};
/* Parse Graph item */
struct ice_pg_cam_item {
u16 idx;
struct ice_pg_cam_key key;
struct ice_pg_cam_action action;
};
/* Parse Graph No Match item */
struct ice_pg_nm_cam_item {
u16 idx;
struct ice_pg_nm_cam_key key;
struct ice_pg_cam_action action;
};
struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
int size, struct ice_pg_cam_key *key);
struct ice_pg_nm_cam_item *
ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
struct ice_pg_cam_key *key);
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
u16 addr;
u8 key[ICE_BST_TCAM_KEY_SIZE];
u8 key_inv[ICE_BST_TCAM_KEY_SIZE];
u8 hit_idx_grp;
u8 pg_prio;
struct ice_np_keybuilder np_kb;
struct ice_pg_keybuilder pg_kb;
struct ice_alu alu0;
struct ice_alu alu1;
struct ice_alu alu2;
};
#define ICE_LBL_LEN 64
#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM"
#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM"
#define ICE_LBL_TNL_VXLAN "TNL_VXLAN"
#define ICE_LBL_TNL_GENEVE "TNL_GENEVE"
#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI"
enum ice_lbl_type {
ICE_LBL_BST_TYPE_UNKNOWN,
ICE_LBL_BST_TYPE_DVM,
ICE_LBL_BST_TYPE_SVM,
ICE_LBL_BST_TYPE_VXLAN,
ICE_LBL_BST_TYPE_GENEVE,
ICE_LBL_BST_TYPE_UDP_ECPRI,
};
struct ice_lbl_item {
u16 idx;
char label[ICE_LBL_LEN];
/* must be at the end, not part of the DDP section */
enum ice_lbl_type type;
};
struct ice_bst_tcam_item *
ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
struct ice_bst_tcam_item *
ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
struct ice_lbl_item *lbl_table,
enum ice_lbl_type type, u16 *start);
/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024
#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10
struct ice_ptype_mk_tcam_item {
u16 address;
u16 ptype;
u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE];
u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE];
} __packed;
struct ice_ptype_mk_tcam_item *
ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
u8 *pat, int len);
/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
#define ICE_MK_GRP_TABLE_SIZE 128
#define ICE_MK_COUNT_PER_GRP 8
/* Marker Group item */
struct ice_mk_grp_item {
int idx;
u8 markers[ICE_MK_COUNT_PER_GRP];
};
/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
#define ICE_PROTO_COUNT_PER_GRP 8
#define ICE_PROTO_GRP_TABLE_SIZE 192
#define ICE_PROTO_GRP_ITEM_SIZE 22
struct ice_proto_off {
bool polarity; /* true: positive, false: negative */
u8 proto_id;
u16 offset; /* 10 bit protocol offset */
};
/* Protocol Group item */
struct ice_proto_grp_item {
u16 idx;
struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
};
/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
#define ICE_FLG_RD_TABLE_SIZE 64
#define ICE_FLG_RDT_SIZE 64
/* Flags Redirection item */
struct ice_flg_rd_item {
u16 idx;
bool expose;
u8 intr_flg_id; /* Internal Flag ID */
};
u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
* ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
* sections ***/
#define ICE_XLT_KB_FLAG0_14_CNT 15
#define ICE_XLT_KB_TBL_CNT 8
#define ICE_XLT_KB_TBL_ENTRY_SIZE 24
struct ice_xlt_kb_entry {
u8 xlt1_ad_sel;
u8 xlt2_ad_sel;
u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT];
u8 xlt1_md_sel;
u8 xlt2_md_sel;
};
/* XLT Key Builder */
struct ice_xlt_kb {
u8 xlt1_pm; /* XLT1 Partition Mode */
u8 xlt2_pm; /* XLT2 Partition Mode */
u8 prof_id_pm; /* Profile ID Partition Mode */
u64 flag15;
struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT];
};
u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
/*** Parser API ***/
#define ICE_GPR_HV_IDX 64
#define ICE_GPR_HV_SIZE 32
#define ICE_GPR_ERR_IDX 84
#define ICE_GPR_FLG_IDX 104
#define ICE_GPR_FLG_SIZE 16
#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */
#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */
#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */
#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */
#define ICE_PARSER_MAX_PKT_LEN 504
#define ICE_PARSER_PKT_REV 32
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
#define ICE_MARKER_ID_NUM 8
#define ICE_PO_PAIR_SIZE 256
struct ice_gpr_pu {
/* array of flags to indicate if GRP needs to be updated */
bool gpr_val_upd[ICE_PARSER_GPR_NUM];
u16 gpr_val[ICE_PARSER_GPR_NUM];
u64 flg_msk;
u64 flg_val;
u16 err_msk;
u16 err_val;
};
enum ice_pg_prio {
ICE_PG_P0 = 0,
ICE_PG_P1 = 1,
ICE_PG_P2 = 2,
ICE_PG_P3 = 3,
};
struct ice_parser_rt {
struct ice_parser *psr;
u16 gpr[ICE_PARSER_GPR_NUM];
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
u8 bst_key[ICE_BST_KEY_SIZE];
struct ice_pg_cam_key pg_key;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
u16 offsets[ICE_PO_PAIR_SIZE];
};
struct ice_parser_proto_off {
u8 proto_id; /* hardware protocol ID */
u16 offset; /* offset from the start of the protocol header */
};
#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16
#define ICE_PARSER_FLAG_PSR_SIZE 8
#define ICE_PARSER_FV_SIZE 48
#define ICE_PARSER_FV_MAX 24
#define ICE_BT_TUN_PORT_OFF_H 16
#define ICE_BT_TUN_PORT_OFF_L 15
#define ICE_BT_VM_OFF 0
#define ICE_UDP_PORT_OFF_H 1
#define ICE_UDP_PORT_OFF_L 0
struct ice_parser_result {
u16 ptype; /* 16 bits hardware PTYPE */
/* array of protocol and header offset pairs */
struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE];
int po_num; /* # of protocol-offset pairs must <= 16 */
u64 flags_psr; /* parser flags */
u64 flags_pkt; /* packet flags */
u16 flags_sw; /* key builder flags for SW */
u16 flags_acl; /* key builder flags for ACL */
u16 flags_fd; /* key builder flags for FD */
u16 flags_rss; /* key builder flags for RSS */
};
void ice_parser_rt_reset(struct ice_parser_rt *rt);
void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
int pkt_len);
int ice_parser_rt_execute(struct ice_parser_rt *rt,
struct ice_parser_result *rslt);
struct ice_parser {
struct ice_hw *hw; /* pointer to the hardware structure */
struct ice_imem_item *imem_table;
struct ice_metainit_item *mi_table;
struct ice_pg_cam_item *pg_cam_table;
struct ice_pg_cam_item *pg_sp_cam_table;
struct ice_pg_nm_cam_item *pg_nm_cam_table;
struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
struct ice_bst_tcam_item *bst_tcam_table;
struct ice_lbl_item *bst_lbl_table;
struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
struct ice_mk_grp_item *mk_grp_table;
struct ice_proto_grp_item *proto_grp_table;
struct ice_flg_rd_item *flg_rd_table;
struct ice_xlt_kb *xlt_kb_sw;
struct ice_xlt_kb *xlt_kb_acl;
struct ice_xlt_kb *xlt_kb_fd;
struct ice_xlt_kb *xlt_kb_rss;
struct ice_parser_rt rt;
};
struct ice_parser *ice_parser_create(struct ice_hw *hw);
void ice_parser_destroy(struct ice_parser *psr);
void ice_parser_dvm_set(struct ice_parser *psr, bool on);
int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
int pkt_len, struct ice_parser_result *rslt);
void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
struct ice_parser_fv {
u8 proto_id; /* hardware protocol ID */
u16 offset; /* offset from the start of the protocol header */
u16 spec; /* pattern to match */
u16 msk; /* pattern mask */
};
struct ice_parser_profile {
/* array of field vectors */
struct ice_parser_fv fv[ICE_PARSER_FV_SIZE];
int fv_num; /* # of field vectors must <= 48 */
u16 flags; /* key builder flags */
u16 flags_msk; /* key builder flag mask */
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */
};
int ice_parser_profile_init(struct ice_parser_result *rslt,
const u8 *pkt_buf, const u8 *msk_buf,
int buf_len, enum ice_block blk,
struct ice_parser_profile *prof);
void ice_parser_profile_dump(struct ice_hw *hw,
struct ice_parser_profile *prof);
#endif /* _ICE_PARSER_H_ */

View File

@ -0,0 +1,861 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2024 Intel Corporation */
#include "ice_common.h"
static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
{
rt->gpr[ICE_GPR_TSR_IDX] = tsr;
}
static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho)
{
rt->gpr[ICE_GPR_HO_IDX] = ho;
memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
}
static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc)
{
rt->gpr[ICE_GPR_NP_IDX] = pc;
}
static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
{
rt->gpr[ICE_GPR_NN_IDX] = node;
}
static void
ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
{
struct ice_hw *hw = rt->psr->hw;
unsigned int word, id;
word = idx / ICE_GPR_FLG_SIZE;
id = idx % ICE_GPR_FLG_SIZE;
if (set) {
rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id);
ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx);
} else {
rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id);
ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx);
}
}
static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
{
struct ice_hw *hw = rt->psr->hw;
if (idx == ICE_GPR_HO_IDX)
ice_rt_ho_set(rt, val);
else
rt->gpr[idx] = val;
ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val);
}
static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
{
struct ice_hw *hw = rt->psr->hw;
if (set) {
rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx);
ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx);
} else {
rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx);
ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx);
}
}
/**
* ice_parser_rt_reset - reset the parser runtime
* @rt: pointer to the parser runtime
*/
void ice_parser_rt_reset(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_metainit_item *mi;
unsigned int i;
mi = &psr->mi_table[0];
memset(rt, 0, sizeof(*rt));
rt->psr = psr;
ice_rt_tsr_set(rt, mi->tsr);
ice_rt_ho_set(rt, mi->ho);
ice_rt_np_set(rt, mi->pc);
ice_rt_nn_set(rt, mi->pg_rn);
for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
if (mi->flags & BIT(i))
ice_rt_flag_set(rt, i, true);
}
}
/**
* ice_parser_rt_pktbuf_set - set a packet into parser runtime
* @rt: pointer to the parser runtime
* @pkt_buf: buffer with packet data
* @pkt_len: packet buffer length
*/
void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
int pkt_len)
{
int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
u16 ho = rt->gpr[ICE_GPR_HO_IDX];
memcpy(rt->pkt_buf, pkt_buf, len);
rt->pkt_len = pkt_len;
memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
}
static void ice_bst_key_init(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX];
u16 ho = rt->gpr[ICE_GPR_HO_IDX];
u8 *key = rt->bst_key;
int idd, i;
idd = ICE_BST_TCAM_KEY_SIZE - 1;
if (imem->b_kb.tsr_ctrl)
key[idd] = tsr;
else
key[idd] = imem->b_kb.prio;
idd = ICE_BST_KEY_TCAM_SIZE - 1;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
key[i] = rt->pkt_buf[ho + idd - i];
else
key[i] = 0;
}
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
key[0], key[1], key[2], key[3], key[4],
key[5], key[6], key[7], key[8], key[9]);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
}
static u16 ice_bit_rev_u16(u16 v, int len)
{
return bitrev16(v) >> (BITS_PER_TYPE(v) - len);
}
static u32 ice_bit_rev_u32(u32 v, int len)
{
return bitrev32(v) >> (BITS_PER_TYPE(v) - len);
}
static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
{
int offset;
u32 buf[2];
u64 val;
offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
memcpy(buf, &rt->gpr[offset], sizeof(buf));
buf[0] = bitrev8x4(buf[0]);
buf[1] = bitrev8x4(buf[1]);
val = *(u64 *)buf;
val >>= start % BITS_PER_TYPE(u16);
return ice_bit_rev_u32(val, len);
}
static u32 ice_pk_build(struct ice_parser_rt *rt,
struct ice_np_keybuilder *kb)
{
if (kb->opc == ICE_NPKB_OPC_EXTRACT)
return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1);
else if (kb->opc == ICE_NPKB_OPC_BUILD)
return rt->gpr[kb->start_reg0] |
((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16));
else if (kb->opc == ICE_NPKB_OPC_BYPASS)
return 0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n",
kb->opc);
return U32_MAX;
}
static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index)
{
int word = index / ICE_GPR_FLG_SIZE;
int id = index % ICE_GPR_FLG_SIZE;
return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id));
}
static int ice_imem_pgk_init(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
memset(&rt->pg_key, 0, sizeof(rt->pg_key));
rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb);
if (rt->pg_key.next_proto == U32_MAX)
return -EINVAL;
if (imem->pg_kb.flag0_ena)
rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx);
if (imem->pg_kb.flag1_ena)
rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx);
if (imem->pg_kb.flag2_ena)
rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx);
if (imem->pg_kb.flag3_ena)
rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx);
rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
rt->pg_key.node_id,
rt->pg_key.flag0,
rt->pg_key.flag1,
rt->pg_key.flag2,
rt->pg_key.flag3,
rt->pg_key.boost_idx,
rt->pg_key.alu_reg,
rt->pg_key.next_proto);
return 0;
}
static void ice_imem_alu0_set(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
rt->alu0 = &imem->alu0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
imem->idx);
}
static void ice_imem_alu1_set(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
rt->alu1 = &imem->alu1;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
imem->idx);
}
static void ice_imem_alu2_set(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
rt->alu2 = &imem->alu2;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
imem->idx);
}
static void ice_imem_pgp_set(struct ice_parser_rt *rt,
struct ice_imem_item *imem)
{
rt->pg_prio = imem->pg_prio;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
rt->pg_prio, imem->idx);
}
static int ice_bst_pgk_init(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
memset(&rt->pg_key, 0, sizeof(rt->pg_key));
rt->pg_key.boost_idx = bst->hit_idx_grp;
rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb);
if (rt->pg_key.next_proto == U32_MAX)
return -EINVAL;
if (bst->pg_kb.flag0_ena)
rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx);
if (bst->pg_kb.flag1_ena)
rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx);
if (bst->pg_kb.flag2_ena)
rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx);
if (bst->pg_kb.flag3_ena)
rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx);
rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
rt->pg_key.node_id,
rt->pg_key.flag0,
rt->pg_key.flag1,
rt->pg_key.flag2,
rt->pg_key.flag3,
rt->pg_key.boost_idx,
rt->pg_key.alu_reg,
rt->pg_key.next_proto);
return 0;
}
static void ice_bst_alu0_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu0 = &bst->alu0;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
bst->addr);
}
static void ice_bst_alu1_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu1 = &bst->alu1;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
bst->addr);
}
static void ice_bst_alu2_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->alu2 = &bst->alu2;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
bst->addr);
}
static void ice_bst_pgp_set(struct ice_parser_rt *rt,
struct ice_bst_tcam_item *bst)
{
rt->pg_prio = bst->pg_prio;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
rt->pg_prio, bst->addr);
}
static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_pg_cam_item *item;
item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
&rt->pg_key);
if (!item)
item = ice_pg_cam_match(psr->pg_sp_cam_table,
ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key);
return item;
}
static
struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
struct ice_pg_nm_cam_item *item;
item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
if (!item)
item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
ICE_PG_NM_SP_CAM_TABLE_SIZE,
&rt->pg_key);
return item;
}
static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
{
rt->pu.gpr_val_upd[idx] = true;
rt->pu.gpr_val[idx] = val;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
idx, val);
}
static void ice_pg_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc);
ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
}
static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val)
{
rt->pu.flg_msk |= BIT(idx);
if (val)
rt->pu.flg_val |= BIT(idx);
else
rt->pu.flg_val &= ~BIT(idx);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
idx, val);
}
static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
{
u32 hv_bit_sel;
int i;
if (!alu->dedicate_flags_ena)
return;
if (alu->flags_extr_imm) {
for (i = 0; i < alu->dst_len; i++)
ice_flg_add(rt, alu->dst_start + i,
!!(alu->flags_start_imm & BIT(i)));
} else {
for (i = 0; i < alu->dst_len; i++) {
hv_bit_sel = ice_hv_bit_sel(rt,
alu->flags_start_imm + i,
1);
ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel);
}
}
}
static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
{
if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD)
rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset);
else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB)
rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset);
else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN)
rt->po = rt->gpr[ICE_GPR_HO_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
rt->po);
}
static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
int start, int len)
{
int offset;
u32 val;
offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
memcpy(&val, &rt->gpr[offset], sizeof(val));
val = bitrev8x4(val);
val >>= start % BITS_PER_TYPE(u16);
return ice_bit_rev_u16(val, len);
}
static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val)
{
rt->pu.err_msk |= (u16)BIT(idx);
if (val)
rt->pu.flg_val |= (u64)BIT_ULL(idx);
else
rt->pu.flg_val &= ~(u64)BIT_ULL(idx);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
idx, val);
}
static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
bool val)
{
u16 flg_idx;
if (alu->dedicate_flags_ena) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
alu->opc);
return;
}
if (alu->dst_reg_id == ICE_GPR_ERR_IDX) {
if (alu->dst_start >= ICE_PARSER_ERR_NUM) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
alu->dst_start);
return;
}
ice_err_add(rt, alu->dst_start, val);
} else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) {
flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) +
alu->dst_start);
if (flg_idx >= ICE_PARSER_FLG_NUM) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
flg_idx);
return;
}
ice_flg_add(rt, flg_idx, val);
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
alu->dst_reg_id, alu->dst_start);
}
}
static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
{
u16 dst, src, shift, imm;
if (alu->shift_xlate_sel) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n");
return;
}
ice_po_update(rt, alu);
ice_flg_update(rt, alu);
dst = rt->gpr[alu->dst_reg_id];
src = ice_reg_bit_sel(rt, alu->src_reg_id,
alu->src_start, alu->src_len);
shift = alu->shift_xlate_key;
imm = alu->imm;
switch (alu->opc) {
case ICE_ALU_PARK:
break;
case ICE_ALU_MOV_ADD:
dst = (src << shift) + imm;
ice_gpr_add(rt, alu->dst_reg_id, dst);
break;
case ICE_ALU_ADD:
dst += (src << shift) + imm;
ice_gpr_add(rt, alu->dst_reg_id, dst);
break;
case ICE_ALU_ORLT:
if (src < imm)
ice_dst_reg_bit_set(rt, alu, true);
ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_OREQ:
if (src == imm)
ice_dst_reg_bit_set(rt, alu, true);
ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_SETEQ:
ice_dst_reg_bit_set(rt, alu, src == imm);
ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
break;
case ICE_ALU_MOV_XOR:
dst = (src << shift) ^ imm;
ice_gpr_add(rt, alu->dst_reg_id, dst);
break;
default:
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
alu->opc);
break;
}
}
static void ice_alu0_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
ice_alu_exe(rt, rt->alu0);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
}
static void ice_alu1_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
ice_alu_exe(rt, rt->alu1);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
}
static void ice_alu2_exe(struct ice_parser_rt *rt)
{
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
ice_alu_exe(rt, rt->alu2);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
}
static void ice_pu_exe(struct ice_parser_rt *rt)
{
struct ice_gpr_pu *pu = &rt->pu;
unsigned int i;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
for (i = 0; i < ICE_PARSER_GPR_NUM; i++) {
if (pu->gpr_val_upd[i])
ice_rt_gpr_set(rt, i, pu->gpr_val[i]);
}
for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
if (pu->flg_msk & BIT(i))
ice_rt_flag_set(rt, i, pu->flg_val & BIT(i));
}
for (i = 0; i < ICE_PARSER_ERR_NUM; i++) {
if (pu->err_msk & BIT(i))
ice_rt_err_set(rt, i, pu->err_val & BIT(i));
}
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
}
static void ice_alu_pg_exe(struct ice_parser_rt *rt)
{
memset(&rt->pu, 0, sizeof(rt->pu));
switch (rt->pg_prio) {
case (ICE_PG_P0):
ice_pg_exe(rt);
ice_alu0_exe(rt);
ice_alu1_exe(rt);
ice_alu2_exe(rt);
break;
case (ICE_PG_P1):
ice_alu0_exe(rt);
ice_pg_exe(rt);
ice_alu1_exe(rt);
ice_alu2_exe(rt);
break;
case (ICE_PG_P2):
ice_alu0_exe(rt);
ice_alu1_exe(rt);
ice_pg_exe(rt);
ice_alu2_exe(rt);
break;
case (ICE_PG_P3):
ice_alu0_exe(rt);
ice_alu1_exe(rt);
ice_alu2_exe(rt);
ice_pg_exe(rt);
break;
}
ice_pu_exe(rt);
if (rt->action->ho_inc == 0)
return;
if (rt->action->ho_polarity)
ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc);
else
ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc);
}
static void ice_proto_off_update(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
if (rt->action->is_pg) {
struct ice_proto_grp_item *proto_grp =
&psr->proto_grp_table[rt->action->proto_id];
u16 po;
int i;
for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
struct ice_proto_off *entry = &proto_grp->po[i];
if (entry->proto_id == U8_MAX)
break;
if (!entry->polarity)
po = rt->po + entry->offset;
else
po = rt->po - entry->offset;
rt->protocols[entry->proto_id] = true;
rt->offsets[entry->proto_id] = po;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
entry->proto_id, po);
}
} else {
rt->protocols[rt->action->proto_id] = true;
rt->offsets[rt->action->proto_id] = rt->po;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
rt->action->proto_id, rt->po);
}
}
static void ice_marker_set(struct ice_parser_rt *rt, int idx)
{
unsigned int byte = idx / BITS_PER_BYTE;
unsigned int bit = idx % BITS_PER_BYTE;
rt->markers[byte] |= (u8)BIT(bit);
}
static void ice_marker_update(struct ice_parser_rt *rt)
{
struct ice_parser *psr = rt->psr;
if (rt->action->is_mg) {
struct ice_mk_grp_item *mk_grp =
&psr->mk_grp_table[rt->action->marker_id];
int i;
for (i = 0; i < ICE_MARKER_ID_NUM; i++) {
u8 marker = mk_grp->markers[i];
if (marker == ICE_MARKER_MAX_SIZE)
break;
ice_marker_set(rt, marker);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
marker);
}
} else {
if (rt->action->marker_id != ICE_MARKER_MAX_SIZE)
ice_marker_set(rt, rt->action->marker_id);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
rt->action->marker_id);
}
}
static u16 ice_ptype_resolve(struct ice_parser_rt *rt)
{
struct ice_ptype_mk_tcam_item *item;
struct ice_parser *psr = rt->psr;
item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
rt->markers, ICE_MARKER_ID_SIZE);
if (item)
return item->ptype;
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n");
return U16_MAX;
}
static void ice_proto_off_resolve(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
int i;
for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) {
if (rt->protocols[i]) {
rslt->po[rslt->po_num].proto_id = (u8)i;
rslt->po[rslt->po_num].offset = rt->offsets[i];
rslt->po_num++;
}
}
}
static void ice_result_resolve(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
struct ice_parser *psr = rt->psr;
memset(rslt, 0, sizeof(*rslt));
memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX],
ICE_PARSER_FLAG_PSR_SIZE);
rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
ice_proto_off_resolve(rt, rslt);
rslt->ptype = ice_ptype_resolve(rt);
}
/**
* ice_parser_rt_execute - parser execution routine
* @rt: pointer to the parser runtime
* @rslt: input/output parameter to save parser result
*
* Return: 0 on success or errno.
*/
int ice_parser_rt_execute(struct ice_parser_rt *rt,
struct ice_parser_result *rslt)
{
struct ice_pg_nm_cam_item *pg_nm_cam;
struct ice_parser *psr = rt->psr;
struct ice_pg_cam_item *pg_cam;
int status = 0;
u16 node;
u16 pc;
node = rt->gpr[ICE_GPR_NN_IDX];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
while (true) {
struct ice_bst_tcam_item *bst;
struct ice_imem_item *imem;
pc = rt->gpr[ICE_GPR_NP_IDX];
imem = &psr->imem_table[pc];
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n",
pc);
ice_bst_key_init(rt, imem);
bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
if (!bst) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
status = ice_imem_pgk_init(rt, imem);
if (status)
break;
ice_imem_alu0_set(rt, imem);
ice_imem_alu1_set(rt, imem);
ice_imem_alu2_set(rt, imem);
ice_imem_pgp_set(rt, imem);
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n",
bst->addr);
if (imem->b_m.pg) {
status = ice_bst_pgk_init(rt, bst);
if (status)
break;
ice_bst_pgp_set(rt, bst);
} else {
status = ice_imem_pgk_init(rt, imem);
if (status)
break;
ice_imem_pgp_set(rt, imem);
}
if (imem->b_m.alu0)
ice_bst_alu0_set(rt, bst);
else
ice_imem_alu0_set(rt, imem);
if (imem->b_m.alu1)
ice_bst_alu1_set(rt, bst);
else
ice_imem_alu1_set(rt, imem);
if (imem->b_m.alu2)
ice_bst_alu2_set(rt, bst);
else
ice_imem_alu2_set(rt, imem);
}
rt->action = NULL;
pg_cam = ice_rt_pg_cam_match(rt);
if (!pg_cam) {
pg_nm_cam = ice_rt_pg_nm_cam_match(rt);
if (pg_nm_cam) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n",
pg_nm_cam->idx);
rt->action = &pg_nm_cam->action;
}
} else {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n",
pg_cam->idx);
rt->action = &pg_cam->action;
}
if (!rt->action) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
status = -EINVAL;
break;
}
ice_alu_pg_exe(rt);
ice_marker_update(rt);
ice_proto_off_update(rt);
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n",
rt->action->next_node);
if (rt->action->is_last_round) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
break;
}
if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) {
ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n",
rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len);
break;
}
}
ice_result_resolve(rt, rslt);
return status;
}

View File

@ -61,6 +61,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)

View File

@ -12,6 +12,7 @@
#include <net/devlink.h>
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
#include "ice_flow.h"
#include "ice_virtchnl_fdir.h"
#include "ice_vsi_vlan_ops.h"
@ -52,6 +53,12 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
/* Structure to store fdir fv entry */
struct ice_fdir_prof_info {
struct ice_parser_profile prof;
u64 fdir_active_cnt;
};
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@ -91,6 +98,7 @@ struct ice_vf {
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */

View File

@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;

View File

@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
ICE_FDIR_TUNNEL_TYPE_ECPRI,
ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
ICE_FDIR_TUNNEL_TYPE_GRE,
ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
ICE_FDIR_TUNNEL_TYPE_L2TPV2,
ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
};
struct virtchnl_fdir_fltr_conf {
@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf {
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
struct ice_parser_profile *prof;
bool parser_ena;
u8 *pkt_buf;
u8 pkt_len;
};
struct virtchnl_fdir_inset_map {
@ -786,6 +800,107 @@ err_exit:
return ret;
}
/**
* ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
* @proto: virtchnl protocol headers
*
* Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
* that common FDIR rule must have non-zero proto->count. Thus, we choose the
* tunnel_level and count of proto as the indicators. If both tunnel_level and
* count of proto are zero, this FDIR rule will be regarded as raw flow.
*
* Returns: true if headers describe raw flow, false otherwise.
*/
static bool
ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
{
return (proto->tunnel_level == 0 && proto->count == 0);
}
/**
* ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
* @vf: pointer to the VF info
* @proto: virtchnl protocol headers
* @conf: FDIR configuration for each filter
*
* Parse the virtual channel filter's raw flow and store it in @conf
*
* Return: 0 on success or negative errno on failure.
*/
static int
ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
u8 *pkt_buf, *msk_buf __free(kfree);
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
u16 udp_port = 0;
pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
hw = &pf->hw;
/* Get raw profile info via Parser Lib */
psr = ice_parser_create(hw);
if (IS_ERR(psr)) {
status = PTR_ERR(psr);
goto err_mem_alloc;
}
ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
if (status)
goto err_parser_destroy;
if (hw->debug_mask & ICE_DBG_PARSER)
ice_parser_result_dump(hw, &rslt);
conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
if (!conf->prof) {
status = -ENOMEM;
goto err_parser_destroy;
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
proto->raw.pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
if (hw->debug_mask & ICE_DBG_PARSER)
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
conf->pkt_len = proto->raw.pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
ice_parser_destroy(psr);
return 0;
err_parser_profile_init:
kfree(conf->prof);
err_parser_destroy:
ice_parser_destroy(psr);
err_mem_alloc:
kfree(pkt_buf);
return status;
}
/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
@ -813,6 +928,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
return -EINVAL;
}
/* For raw FDIR filters created by the parser */
if (ice_vc_fdir_is_raw_flow(proto))
return ice_vc_fdir_parse_raw(vf, proto, conf);
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
@ -1101,8 +1220,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
if (!ice_vc_validate_pattern(vf, proto))
return -EINVAL;
/* For raw FDIR filters created by the parser */
if (!ice_vc_fdir_is_raw_flow(proto))
if (!ice_vc_validate_pattern(vf, proto))
return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@ -1295,11 +1416,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
goto err_free_pkt;
if (conf->parser_ena) {
memcpy(pkt, conf->pkt_buf, conf->pkt_len);
} else {
ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
goto err_free_pkt;
}
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@ -1521,6 +1646,16 @@ err_exit:
return ret;
}
static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
{
return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
}
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
@ -1781,6 +1916,158 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
/**
* ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
* @fv_a: struct of parsed FDIR profile field vector
* @fv_b: struct of parsed FDIR profile field vector
*
* Check if the two parsed FDIR profile field vector context are different,
* including proto_id, offset and mask.
*
* Return: true on different, false on otherwise.
*/
static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
struct ice_parser_fv *fv_b)
{
return (fv_a->proto_id != fv_b->proto_id ||
fv_a->offset != fv_b->offset ||
fv_a->msk != fv_b->msk);
}
/**
* ice_vc_parser_fv_save - save parsed FDIR profile fv context
* @fv: struct of parsed FDIR profile field vector
* @fv_src: parsed FDIR profile field vector context to save
*
* Save the parsed FDIR profile field vector context, including proto_id,
* offset and mask.
*
* Return: Void.
*/
static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
struct ice_parser_fv *fv_src)
{
fv->proto_id = fv_src->proto_id;
fv->offset = fv_src->offset;
fv->msk = fv_src->msk;
fv->spec = 0;
}
/**
* ice_vc_add_fdir_raw - add a raw FDIR filter for VF
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
* @v_ret: the final VIRTCHNL code
* @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
* @len: length of the stat
*
* Return: 0 on success or negative errno on failure.
*/
static int
ice_vc_add_fdir_raw(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf,
enum virtchnl_status_code *v_ret,
struct virtchnl_fdir_add *stat, int len)
{
struct ice_vsi *vf_vsi, *ctrl_vsi;
struct ice_fdir_prof_info *pi;
struct ice_pf *pf = vf->pf;
int ret, ptg, id, i;
struct device *dev;
struct ice_hw *hw;
bool fv_found;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
*v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
return -ENODEV;
}
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
if (!ctrl_vsi) {
dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
vf->vf_id);
return -ENODEV;
}
fv_found = false;
/* Check if profile info already exists, then update the counter */
pi = &vf->fdir_prof_info[ptg];
if (pi->fdir_active_cnt != 0) {
for (i = 0; i < ICE_MAX_FV_WORDS; i++)
if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
&conf->prof->fv[i]))
break;
if (i == ICE_MAX_FV_WORDS) {
fv_found = true;
pi->fdir_active_cnt++;
}
}
/* HW profile setting is only required for the first time */
if (!fv_found) {
ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
ctrl_vsi->idx, conf->prof,
ICE_BLK_FD);
if (ret) {
*v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: insert hw prof failed\n",
vf->vf_id);
return ret;
}
}
ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
if (ret) {
*v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: insert FDIR list failed\n",
vf->vf_id);
return ret;
}
ret = ice_vc_fdir_set_irq_ctx(vf, conf,
VIRTCHNL_OP_ADD_FDIR_FILTER);
if (ret) {
dev_dbg(dev, "VF %d: set FDIR context failed\n",
vf->vf_id);
goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
if (ret) {
dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
vf->vf_id, ret);
goto err_clr_irq;
}
/* Save parsed profile fv info of the FDIR rule for the first time */
if (!fv_found) {
for (i = 0; i < conf->prof->fv_num; i++)
ice_vc_parser_fv_save(&pi->prof.fv[i],
&conf->prof->fv[i]);
pi->prof.fv_num = conf->prof->fv_num;
pi->fdir_active_cnt = 1;
}
return 0;
err_clr_irq:
ice_vc_fdir_clear_irq_ctx(vf);
err_rem_entry:
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
return ret;
}
/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
@ -1846,7 +2133,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
@ -1861,6 +2148,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto exit;
}
/* For raw FDIR filters created by the parser */
if (conf->parser_ena) {
ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
if (ret)
goto err_free_conf;
goto exit;
}
is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@ -1921,6 +2217,78 @@ err_exit:
return ret;
}
/**
* ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
* @v_ret: the final VIRTCHNL code
* @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
* @len: length of the stat
*
* Return: 0 on success or negative errno on failure.
*/
static int
ice_vc_del_fdir_raw(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf,
enum virtchnl_status_code *v_ret,
struct virtchnl_fdir_del *stat, int len)
{
struct ice_vsi *vf_vsi, *ctrl_vsi;
enum ice_block blk = ICE_BLK_FD;
struct ice_fdir_prof_info *pi;
struct ice_pf *pf = vf->pf;
struct device *dev;
struct ice_hw *hw;
unsigned long id;
u16 vsi_num;
int ptg;
int ret;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
*v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
if (ret) {
dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
vf->vf_id, ret);
return ret;
}
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
return -ENODEV;
}
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
if (!ctrl_vsi) {
dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
vf->vf_id);
return -ENODEV;
}
pi = &vf->fdir_prof_info[ptg];
if (pi->fdir_active_cnt != 0) {
pi->fdir_active_cnt--;
/* Remove the profile id flow if no active FDIR rule left */
if (!pi->fdir_active_cnt) {
vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
ice_rem_prof_id_flow(hw, blk, vsi_num, id);
vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
ice_rem_prof_id_flow(hw, blk, vsi_num, id);
}
}
conf->parser_ena = false;
return 0;
}
/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
@ -1933,7 +2301,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
struct ice_vf_fdir *fdir = &vf->fdir;
enum virtchnl_status_code v_ret;
struct ice_fdir_fltr *input;
enum ice_fltr_ptype flow;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@ -1983,6 +2354,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_exit;
}
/* For raw FDIR filters created by the parser */
if (conf->parser_ena) {
ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
if (ret)
goto err_del_tmr;
goto exit;
}
is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@ -1992,6 +2372,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_del_tmr;
}
/* Remove unused profiles to avoid unexpected behaviors */
input = &conf->input;
flow = input->flow_type;
if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
ice_vc_fdir_rem_prof(vf, flow, is_tun);
exit:
kfree(stat);
return ret;

View File

@ -247,6 +247,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@ -1121,6 +1122,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
@ -1266,13 +1268,22 @@ struct virtchnl_proto_hdrs {
u8 pad[3];
/**
* specify where protocol header start from.
* must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
union {
struct virtchnl_proto_hdr
proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
struct {
u16 pkt_len;
u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
} raw;
};
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);