From patchwork Tue Nov 3 08:28:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83496 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 255A8A0521; Tue, 3 Nov 2020 09:29:43 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 08075C7EC; Tue, 3 Nov 2020 09:29:42 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 87285C748 for ; Tue, 3 Nov 2020 09:29:39 +0100 (CET) IronPort-SDR: g4gd0QxeGvhsJIuNQOuiSEuiPPi205fzXh0qPynTcFMHRX9nKdDC2zrXcoFP3DPoElgvsNXToQ mSiuhASMG9Pg== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122321" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122321" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:38 -0800 IronPort-SDR: 7NhNGV8BxIjo8E6kZN8F+wVV/tRhs40tEft4QNVnqRiK7MuaOOTTCEQCoiVZ0TBeemtH9DKURL NtuF4VN9e5TA== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711365" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:36 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:04 +0000 Message-Id: <20201103082809.41149-2-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 1/6] net/i40e: add mirror rule config and add/del rule APIs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Define i40e_mirror_rule_conf structure that is used for set mirror flow rule to i40e register, and relocate the mirror related MACORs to header file. Signed-off-by: Steve Yang --- drivers/net/i40e/i40e_ethdev.c | 13 +++++-------- drivers/net/i40e/i40e_ethdev.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index f54769c29..b17c05eda 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -329,12 +329,6 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); -static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, - uint16_t seid, - uint16_t rule_type, - uint16_t *entries, - uint16_t count, - uint16_t rule_id); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t sw_id, uint8_t on); @@ -1742,6 +1736,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* initialize RSS rule list */ TAILQ_INIT(&pf->rss_config_list); + /* initialize mirror filter list */ + TAILQ_INIT(&pf->mirror_filter_list); + /* initialize Traffic Manager configuration */ i40e_tm_conf_init(dev); @@ -10223,7 +10220,7 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) * Add a mirror rule for a given veb. * **/ -static enum i40e_status_code +enum i40e_status_code i40e_aq_add_mirror_rule(struct i40e_hw *hw, uint16_t seid, uint16_t dst_id, uint16_t rule_type, uint16_t *entries, @@ -10274,7 +10271,7 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw, * Delete a mirror rule for a given veb. * **/ -static enum i40e_status_code +enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, uint16_t seid, uint16_t rule_type, uint16_t *entries, uint16_t count, uint16_t rule_id) diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index ea59a3e60..290a54daa 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -1075,6 +1075,30 @@ struct i40e_rss_filter { struct i40e_rte_flow_rss_conf rss_filter_info; }; +/** + * Mirror rule configuration + */ +struct i40e_mirror_rule_conf { + uint8_t rule_type; + uint16_t rule_id; /* the rule id assigned by firmware */ + uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */ + uint16_t num_entries; + /** + * the info stores depend on the rule type. + * If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here. + * If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored. + */ + uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE]; +}; + +TAILQ_HEAD(i40e_mirror_filter_list, i40e_mirror_filter); + +/* Mirror rule list structure */ +struct i40e_mirror_filter { + TAILQ_ENTRY(i40e_mirror_filter) next; + struct i40e_mirror_rule_conf conf; +}; + struct i40e_vf_msg_cfg { /* maximal VF message during a statistic period */ uint32_t max_msg; @@ -1145,6 +1169,7 @@ struct i40e_pf { struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ struct i40e_rte_flow_rss_conf rss_info; /* RSS info */ struct i40e_rss_conf_list rss_config_list; /* RSS rule list */ + struct i40e_mirror_filter_list mirror_filter_list; struct i40e_queue_regions queue_region; /* queue region info */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; @@ -1310,6 +1335,7 @@ union i40e_filter_t { struct rte_eth_tunnel_filter_conf tunnel_filter; struct i40e_tunnel_filter_conf consistent_tunnel_filter; struct i40e_rte_flow_rss_conf rss_conf; + struct i40e_mirror_rule_conf mirror_conf; }; typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1460,6 +1486,14 @@ int i40e_config_rss_filter(struct i40e_pf *pf, int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev); +enum i40e_status_code i40e_aq_add_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t dst_id, + uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t *rule_id); +enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t rule_id); + #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) From patchwork Tue Nov 3 08:28:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83497 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 156D3A0521; Tue, 3 Nov 2020 09:30:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1325EC7FA; Tue, 3 Nov 2020 09:29:44 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 46BCAC7FA for ; Tue, 3 Nov 2020 09:29:42 +0100 (CET) IronPort-SDR: vir442KrpQICP1UwpqKunc7LURNYVvE98ubMoSoNM+V0NcP0CfClUPkQhug0UmjvdlGcOzKTmi cys0s7JDY3Vg== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122332" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122332" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:41 -0800 IronPort-SDR: gBg3R4yPK7BOUt/m8FJVZofYeB51N+Tg2FP84BNAsdFZeYBPusG0mziiFob4KlnFSqZoTD6krY D5gQbxAwVtYg== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711404" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:39 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:05 +0000 Message-Id: <20201103082809.41149-3-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 2/6] net/i40e: define the mirror filter parser X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Define the sample filter parser for mirror, it will divide to two phases, the one is sample attributions pattern parsing, and the mirror config will be filled in according to pattern type VF/PF/VLAN when sample ratio is 1. The another is sample action parsing that the port id of mirror config will be filled in according to action type VF/PF. Signed-off-by: Steve Yang --- drivers/net/i40e/i40e_flow.c | 264 +++++++++++++++++++++++++- lib/librte_ethdev/rte_ethdev_driver.h | 1 + 2 files changed, 258 insertions(+), 7 deletions(-) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 5bec0c7a8..7928871bf 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1871,15 +1871,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter }, }; -#define NEXT_ITEM_OF_ACTION(act, actions, index) \ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ - index++; \ - act = actions + index; \ - } \ +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (index)++; \ + act = (actions) + (index); \ + } \ } while (0) +#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \ + ((vlan_item) ? ntohs(vlan_item->tci) & 0x0fff : (default_vid)) + /* Find the first VOID or non-VOID item pointer */ static const struct rte_flow_item * i40e_find_first_item(const struct rte_flow_item *item, bool is_void) @@ -5267,6 +5270,253 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev, return 0; } +static int +i40e_flow_parse_sample_attr_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item *next_item = pattern + 1; + enum rte_flow_item_type item_type, next_item_type; + const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *entries = mirror_config->entries; + uint8_t *rule_type = &mirror_config->rule_type; + uint16_t vf_id, vf_id_last, vlan_id, vlan_id_mask, vlan_id_last; + uint16_t i, j = 0, k = 0; + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "Not support group."); + return -rte_errno; + } + + item_type = item->type; + next_item_type = next_item->type; + if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END && + (item_type == RTE_FLOW_ITEM_TYPE_PF || + item_type == RTE_FLOW_ITEM_TYPE_VF || + item_type == RTE_FLOW_ITEM_TYPE_VLAN))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support a pattern item that is pf or vf or vlan."); + return -rte_errno; + } + + if (item_type == RTE_FLOW_ITEM_TYPE_PF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for PF mirror."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for VF mirror."); + return -rte_errno; + } + + vf_spec = item->spec; + vf_last = item->last; + vf_mask = item->mask; + if (item->spec || item->last) { + vf_id = (vf_spec ? vf_spec->id : 0); + vf_id_last = (vf_last ? vf_last->id : vf_id); + if (vf_id >= pf->vf_num || + vf_id_last >= pf->vf_num || + vf_id_last < vf_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item, + "VF ID is out of range."); + return -rte_errno; + } + for (i = vf_id; i <= vf_id_last; i++, k++) + if (!vf_mask || (vf_mask->id & (1 << k))) + entries[j++] = pf->vfs[i].vsi->seid; + mirror_config->num_entries = j; + } else if (item->mask) { + if (vf_mask->id >= (uint32_t)(1 << pf->vf_num)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, + "VF ID mask is out of range."); + return -rte_errno; + } + for (i = 0; i < pf->vf_num; i++) { + if (vf_mask->id & (1 << i)) + entries[j++] = pf->vfs[i].vsi->seid; + } + mirror_config->num_entries = j; + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not valid VF ID."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, attr, + "Only support ingress attribute for VLAN mirror."); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_last = item->last; + vlan_mask = item->mask; + if (item->spec || item->last) { + vlan_id = GET_VLAN_ID_FROM_TCI(vlan_spec, 0); + vlan_id_last = GET_VLAN_ID_FROM_TCI(vlan_last, vlan_id); + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + if (vlan_id >= ETH_MIRROR_MAX_VLANS || + vlan_id_last >= ETH_MIRROR_MAX_VLANS || + vlan_id_last < vlan_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, item, + "VLAN ID is out of range."); + return -rte_errno; + } + for (i = vlan_id; i <= vlan_id_last; i++, k++) + if (vlan_id_mask & (1 << k)) + entries[j++] = i; + mirror_config->num_entries = j; + } else if (item->mask) { + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (vlan_id_mask & (1 << i)) + entries[j++] = i; + mirror_config->num_entries = j; + } + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not valid VLAN ID."); + return -rte_errno; + } + } + + return 0; +} + +static int +i40e_flow_parse_sample_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *dst_vsi_seid = &mirror_config->dst_vsi_seid; + uint32_t index = 0; + + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_SAMPLE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not supported action."); + return -rte_errno; + } + + if (((const struct rte_flow_action_sample *)act->conf)->ratio != 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid ratio for mirror action"); + return -rte_errno; + } + + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + + if (act->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { + const struct rte_flow_action_port_id *conf = + (const struct rte_flow_action_port_id *)act->conf; + + if (!conf->id) { + *dst_vsi_seid = pf->main_vsi_seid; + } else if (conf->id <= pf->vf_num) { + *dst_vsi_seid = pf->vfs[conf->id - 1].vsi->seid; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid port id mirror action"); + return -rte_errno; + } + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Only support pf or vf parameter item."); + return -rte_errno; + } + + return 0; +} + +static int +i40e_parse_sample_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = i40e_flow_parse_sample_attr_pattern(dev, attr, pattern, + error, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_sample_action(dev, actions, error, filter); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_SAMPLE; + + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, diff --git a/lib/librte_ethdev/rte_ethdev_driver.h b/lib/librte_ethdev/rte_ethdev_driver.h index 0eacfd842..9c45124d5 100644 --- a/lib/librte_ethdev/rte_ethdev_driver.h +++ b/lib/librte_ethdev/rte_ethdev_driver.h @@ -477,6 +477,7 @@ enum rte_filter_type { RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_SAMPLE, RTE_ETH_FILTER_L2_TUNNEL, RTE_ETH_FILTER_GENERIC, }; From patchwork Tue Nov 3 08:28:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83498 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 59E93A0521; Tue, 3 Nov 2020 09:30:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 90AAFC810; Tue, 3 Nov 2020 09:29:47 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 2634BC810 for ; Tue, 3 Nov 2020 09:29:45 +0100 (CET) IronPort-SDR: YSed7cEyTSAh2nR18yS9ttUgigBbuzCTIoCIUZsUf/Juq4P+uh3ArWbV5wdo0I/lkfOv2q6kNo o41jZ2dpKCFA== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122335" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122335" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:44 -0800 IronPort-SDR: freikTRm0SUzUocqVculdVLxatw+2+OQJIyV+w6KbXVS/1wfFgf894usS+V/jNj0+P6GS0HJoj hF3Ha/ZuYasg== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711428" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:42 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:06 +0000 Message-Id: <20201103082809.41149-4-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 3/6] net/i40e: use generic flow command to re-realize mirror rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When set follow sample rule's ratio equal to one, its behavior is same as mirror-rule, so we can use "flow create * pattern * actions sample *" to replace old "set port * mirror-rule *" command now. The example of mirror rule command mapping to flow management command: (in below command, port 0 is PF and port 1-2 is VF): 1): Ingress pf => pf set port 0 mirror-rule 0 uplink-mirror dst-pool 2 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 2): Egress pf => pf set port 0 mirror-rule 0 downlink-mirror dst-pool 2 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 3): ingress pf => vf 1 set port 0 mirror-rule 0 uplink-mirror dst-pool 1 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 4): egress pf => vf 1 set port 0 mirror-rule 0 downlink-mirror dst-pool 1 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 5): ingress vf 0 1 => pf set port 0 mirror-rule 0 pool-mirror-up 0x3 dst-pool 2 on or flow create 0 ingress pattern vf id is 1 / end \ actions sample ratio 1 / port_id id 0 / end flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 0 / end or flow create 0 ingress pattern vf id last 1 / end \ actions sample ratio 1 / port_id id 0 / end 6): egress vf 0 1 => pf set port 0 mirror-rule 0 pool-mirror-down 0x3 dst-pool 2 on or flow create 0 egress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 0 / end flow create 0 egress pattern vf id is 1 / end \ actions sample ratio 1 / port_id id 0 / end or flow create 0 egress pattern vf id last 1 / end \ actions sample ratio 1 / port_id id 0 / end 7): ingress vf 0 => vf 1 set port 0 mirror-rule 0 pool-mirror-up 0x1 dst-pool 1 on or flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 2 / end 8): egress vf 0 => vf 1 set port 0 mirror-rule 0 pool-mirror-down 0x1 dst-pool 1 on or flow create 0 egress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 2 / end 9): ingress vlan 4,6 => vf 1 set port 0 mirror-rule 0 vlan-mirror 4,6 dst-pool 1 on or flow create 0 ingress pattern vlan vid is 4 / end \ actions sample ratio 1 / port_id id 2 / end flow create 0 ingress pattern vlan vid is 6 / end \ actions sample ratio 1 / port_id id 2 / end or flow create 0 ingress pattern vlan vid is 4 vid last 6 \ vid mask 0x5 / end \ actions sample ratio 1 / port_id id 2 / end Signed-off-by: Steve Yang --- drivers/net/i40e/i40e_flow.c | 153 +++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 7928871bf..d6c95415c 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -119,6 +119,7 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); +static int i40e_flow_flush_sample_filter(struct rte_eth_dev *dev); static int i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -5517,6 +5518,104 @@ i40e_parse_sample_filter(struct rte_eth_dev *dev, return 0; } +static int +i40e_config_sample_filter_set(struct rte_eth_dev *dev, + struct i40e_mirror_rule_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_filter *it; + struct i40e_mirror_filter *mirror_filter; + uint16_t rule_id; + int ret; + + if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); + return -ENOSYS; + } + if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { + PMD_DRV_LOG(ERR, "mirror table is full."); + return -ENOSPC; + } + + TAILQ_FOREACH(it, &pf->mirror_filter_list, next) { + if (it->conf.dst_vsi_seid == conf->dst_vsi_seid && + it->conf.rule_type == conf->rule_type && + it->conf.num_entries == conf->num_entries && + !memcmp(it->conf.entries, conf->entries, + conf->num_entries * sizeof(conf->entries[0]))) { + PMD_DRV_LOG(ERR, "mirror rule exists."); + return -EEXIST; + } + } + + mirror_filter = rte_zmalloc("i40e_mirror_filter", + sizeof(*mirror_filter), 0); + if (mirror_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + mirror_filter->conf = *conf; + + ret = i40e_aq_add_mirror_rule(hw, + pf->main_vsi->veb->seid, + mirror_filter->conf.dst_vsi_seid, + mirror_filter->conf.rule_type, + mirror_filter->conf.entries, + mirror_filter->conf.num_entries, + &rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + rte_free(mirror_filter); + return -ENOSYS; + } + + mirror_filter->conf.rule_id = rule_id; + + pf->nb_mirror_rule++; + + TAILQ_INSERT_TAIL(&pf->mirror_filter_list, mirror_filter, next); + + return 0; +} + +static int +i40e_config_sample_filter_del(struct rte_eth_dev *dev, + struct i40e_mirror_rule_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_filter *mirror_filter; + void *temp; + int ret; + + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + conf->rule_type, + conf->entries, + conf->num_entries, + conf->rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + + TAILQ_FOREACH_SAFE(mirror_filter, &pf->mirror_filter_list, next, temp) { + if (!memcmp(&mirror_filter->conf, conf, + sizeof(struct i40e_mirror_rule_conf))) { + TAILQ_REMOVE(&pf->mirror_filter_list, + mirror_filter, next); + rte_free(mirror_filter); + } + } + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -5562,6 +5661,12 @@ i40e_flow_validate(struct rte_eth_dev *dev, return ret; } + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_SAMPLE) { + ret = i40e_parse_sample_filter(dev, attr, pattern, + actions, &cons_filter, error); + return ret; + } + i = 0; /* Get the non-void item number of pattern */ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { @@ -5681,6 +5786,14 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->rss_config_list, i40e_rss_conf_list); break; + case RTE_ETH_FILTER_SAMPLE: + ret = i40e_config_sample_filter_set(dev, + &cons_filter.mirror_conf); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->mirror_filter_list, + i40e_mirror_filter_list); + break; default: goto free_flow; } @@ -5735,6 +5848,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev, ret = i40e_config_rss_filter_del(dev, &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); break; + case RTE_ETH_FILTER_SAMPLE: + ret = i40e_config_sample_filter_del(dev, + &((struct i40e_mirror_filter *)flow->rule)->conf); + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -5889,6 +6006,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) return -rte_errno; } + ret = i40e_flow_flush_sample_filter(dev); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush mirror flows."); + return -rte_errno; + } + return ret; } @@ -6035,6 +6160,34 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) return ret; } +/* remove the mirror filter */ +static int +i40e_flow_flush_sample_filter(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_flow *flow; + void *temp; + int32_t ret = -EINVAL; + + /* Delete mirror flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + struct i40e_mirror_filter *rule = flow->rule; + + if (flow->filter_type != RTE_ETH_FILTER_SAMPLE) + continue; + + if (rule) { + ret = i40e_config_sample_filter_del(dev, &rule->conf); + if (ret) + return ret; + } + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + + return ret; +} + static int i40e_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, From patchwork Tue Nov 3 08:28:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83499 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B5C57A0521; Tue, 3 Nov 2020 09:30:42 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0C007C7FC; Tue, 3 Nov 2020 09:29:50 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id EA845C828 for ; Tue, 3 Nov 2020 09:29:47 +0100 (CET) IronPort-SDR: +/yTC7VTMwQVZtuhE9R4kCm80HVSaNnMgi3L7cTCUIKe+vlhOP1iyb8E6RGyp2sgI5XB++geub LOB3sXoTaglg== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122346" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122346" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:47 -0800 IronPort-SDR: Bv1t5/J0GTjGLfO2Zq466EaeeP+DeDmWuh6lwdNbed2gdMOUJPf/6R3gA6io36VZh1tb85RBtB vDJqaVEqa35A== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711464" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:45 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:07 +0000 Message-Id: <20201103082809.41149-5-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 4/6] net/ixgbe: add mirror rule config and add/del rule APIs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Define ixgbe_flow_mirror_conf structure that is used for set mirror flow rule to ixgbe register, and relocate the mirror related MACORs to header file. Signed-off-by: Steve Yang --- drivers/net/ixgbe/ixgbe_ethdev.c | 8 ----- drivers/net/ixgbe/ixgbe_ethdev.h | 54 ++++++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 9a47a8b26..cc07b0e31 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -5724,14 +5724,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) return new_val; } -#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ -#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ -#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ -#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ -#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ - ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ - ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) - static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 3d35ea791..db95a53f1 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -216,6 +216,27 @@ struct ixgbe_rte_flow_rss_conf { uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; +#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ +#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ +#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ +#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ +#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ +#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ + ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP |\ + ETH_MIRROR_UPLINK_PORT | \ + ETH_MIRROR_DOWNLINK_PORT | \ + ETH_MIRROR_VLAN)) + +struct ixgbe_flow_mirror_conf { + uint8_t rule_type; + uint16_t rule_id; + uint8_t dst_pool; /* Destination pool for this mirror rule. */ + uint64_t pool_mask; /* Bitmap of pool for virtual pool mirroring */ + uint64_t vlan_mask; /* mask for valid VLAN ID. */ + /* VLAN ID list for vlan mirroring. */ + uint16_t vlan_id[ETH_MIRROR_MAX_VLANS]; +}; + /* structure for interrupt relative data */ struct ixgbe_interrupt { uint32_t flags; @@ -250,8 +271,6 @@ struct ixgbe_uta_info { uint32_t uta_shadow[IXGBE_MAX_UTA]; }; -#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ - struct ixgbe_mirror_info { struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES]; /**< store PF mirror rules configuration*/ @@ -337,6 +356,8 @@ struct ixgbe_filter_info { uint32_t syn_info; /* store the rss filter info */ struct ixgbe_rte_flow_rss_conf rss_info; + uint8_t mirror_mask; /* Bit mask for every used mirror filter */ + struct ixgbe_flow_mirror_conf mirror_filters[IXGBE_MAX_MIRROR_RULES]; }; struct ixgbe_l2_tn_key { @@ -830,4 +851,33 @@ ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, return idx; } +static inline int8_t +ixgbe_mirror_filter_insert(struct ixgbe_filter_info *filter_info, + struct ixgbe_flow_mirror_conf *mirror_conf) +{ + int i; + + for (i = 0; i < IXGBE_MAX_MIRROR_RULES; i++) { + if (!(filter_info->mirror_mask & (1 << i))) { + filter_info->mirror_mask |= 1 << i; + mirror_conf->rule_id = i; + filter_info->mirror_filters[i] = *mirror_conf; + return i; + } + } + return -1; +} + +static inline int +ixgbe_mirror_filter_remove(struct ixgbe_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= IXGBE_MAX_MIRROR_RULES) + return -1; + filter_info->mirror_mask &= ~(1 << idx); + memset(&filter_info->mirror_filters[idx], 0, + sizeof(filter_info->mirror_filters[0])); + return idx; +} + #endif /* _IXGBE_ETHDEV_H_ */ From patchwork Tue Nov 3 08:28:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83500 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CA28EA0521; Tue, 3 Nov 2020 09:31:03 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 89E48C7F4; Tue, 3 Nov 2020 09:29:53 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 3B31BC82E for ; Tue, 3 Nov 2020 09:29:51 +0100 (CET) IronPort-SDR: xxroNO2tbwOyWrzlJbQmOPUmXf1hLP1LzdRPMNSQwFXy1gzE/UYnA5tjIBJk8iU3WV9SXKzFIg aGoVDiQtLBaA== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122363" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122363" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:50 -0800 IronPort-SDR: L3kB5G7btH6erRNbRHasHHbuK3EB89yaayVHCs4rcW5Wly5NcvswDBReg8qEaiAMwdN5nLRuKs x6CLplUR9l1g== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711497" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:47 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:08 +0000 Message-Id: <20201103082809.41149-6-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 5/6] net/ixgbe: define the mirror filter parser X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Define the sample filter parser for mirror, it will divide to two phases, the one is sample attributions pattern parsing, and the mirror config will be filled in according to pattern type VF/PF/VLAN when sample ratio is 1. The another is sample action parsing that the port id of mirror config will be filled in according to action type VF/PF. Signed-off-by: Steve Yang --- drivers/net/ixgbe/ixgbe_flow.c | 245 +++++++++++++++++++++++++++++++++ 1 file changed, 245 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index dff04c462..0ad49ca48 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -49,6 +49,18 @@ #define IXGBE_MAX_N_TUPLE_PRIO 7 #define IXGBE_MAX_FLX_SOURCE_OFF 62 +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (index)++; \ + act = (actions) + (index); \ + } \ + } while (0) + +#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \ + ((vlan_item) ? ntohs((vlan_item)->tci) & 0x0fff : (default_vid)) + /* ntuple filter list structure */ struct ixgbe_ntuple_filter_ele { TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; @@ -79,6 +91,11 @@ struct ixgbe_rss_conf_ele { TAILQ_ENTRY(ixgbe_rss_conf_ele) entries; struct ixgbe_rte_flow_rss_conf filter_info; }; +/* rss filter list structure */ +struct ixgbe_mirror_conf_ele { + TAILQ_ENTRY(ixgbe_mirror_conf_ele) entries; + struct ixgbe_flow_mirror_conf filter_info; +}; /* ixgbe_flow memory list structure */ struct ixgbe_flow_mem { TAILQ_ENTRY(ixgbe_flow_mem) entries; @@ -91,6 +108,7 @@ TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele); +TAILQ_HEAD(ixgbe_mirror_filter_list, ixgbe_mirror_conf_ele); TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); static struct ixgbe_ntuple_filter_list filter_ntuple_list; @@ -2931,6 +2949,233 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev) ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); } +static int +ixgbe_flow_parse_sample_attr_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct ixgbe_flow_mirror_conf *conf) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item *next_item = pattern + 1; + enum rte_flow_item_type item_type, next_item_type; + const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last; + struct ixgbe_flow_mirror_conf *mirror_config = conf; + uint16_t vf_id, vf_id_last; + uint16_t vlan_id, vlan_id_mask, vlan_id_last; + uint16_t i, j = 0, k = 0; + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "Not support group."); + return -rte_errno; + } + + item_type = item->type; + next_item_type = next_item->type; + if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END && + (item_type == RTE_FLOW_ITEM_TYPE_PF || + item_type == RTE_FLOW_ITEM_TYPE_VF || + item_type == RTE_FLOW_ITEM_TYPE_VLAN))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support pf or vf or vlan pattern."); + return -rte_errno; + } + if (item_type == RTE_FLOW_ITEM_TYPE_PF) { + if (!attr->ingress && attr->egress) { + mirror_config->rule_type = IXGBE_MRCTL_DPME; + } else if (attr->ingress && !attr->egress) { + mirror_config->rule_type = IXGBE_MRCTL_UPME; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute PF mirror."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VF) { + if (attr->ingress && !attr->egress) { + mirror_config->rule_type = IXGBE_MRCTL_VPME; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress attribute for VF mirror."); + return -rte_errno; + } + + vf_spec = item->spec; + vf_last = item->last; + vf_mask = item->mask; + if (item->spec || item->last) { + vf_id = (vf_spec ? vf_spec->id : 0); + vf_id_last = (vf_last ? vf_last->id : vf_id); + if (vf_id >= pci_dev->max_vfs || + vf_id_last >= pci_dev->max_vfs || + vf_id_last < vf_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item, + "VF ID is out of range."); + return -rte_errno; + } + mirror_config->pool_mask = 0; + for (i = vf_id, k = 0; i <= vf_id_last; i++, k++) + if (!vf_mask || (vf_mask->id & (1 << k))) + mirror_config->pool_mask |= (1ULL << i); + } else if (item->mask) { + if (vf_mask->id >= (uint32_t)(1 << pci_dev->max_vfs)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, + "VF ID mask is out of range."); + return -rte_errno; + } + mirror_config->pool_mask = vf_mask->id; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (attr->ingress && !attr->egress) { + mirror_config->rule_type = IXGBE_MRCTL_VLME; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress attribute for VLAN mirror."); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_last = item->last; + vlan_mask = item->mask; + if (item->spec || item->last) { + vlan_id = GET_VLAN_ID_FROM_TCI(vlan_spec, 0); + vlan_id_last = GET_VLAN_ID_FROM_TCI(vlan_last, vlan_id); + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + + if (vlan_id >= ETH_MIRROR_MAX_VLANS || + vlan_id_last >= ETH_MIRROR_MAX_VLANS || + vlan_id_last < vlan_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item, + "VLAN ID is out of range."); + return -rte_errno; + } + for (i = vlan_id; i <= vlan_id_last; i++, k++) + if (vlan_id_mask & (1 << k)) + mirror_config->vlan_id[j++] = i; + + mirror_config->vlan_mask = (1 << j) - 1; + } else if (item->mask) { + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0); + for (i = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (vlan_id_mask & (1 << i)) + mirror_config->vlan_id[j++] = i; + } + mirror_config->vlan_mask = (1 << j) - 1; + } + } + + return 0; +} + +static int +ixgbe_flow_parse_sample_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct ixgbe_flow_mirror_conf *conf) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_vf *act_q; + struct ixgbe_flow_mirror_conf *mirror_config = conf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint32_t index = 0; + + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_SAMPLE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not supported parameter."); + return -rte_errno; + } + + if (((const struct rte_flow_action_sample *)act->conf)->ratio != 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid ratio for mirror action"); + return -rte_errno; + } + + /* When ratio equal to one, it's behavior is same as mirror rule. */ + /* Just support mirror behavior */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { + act_q = act->conf; + if (!act_q->id) { + mirror_config->dst_pool = pci_dev->max_vfs; + } else if (act_q->id <= pci_dev->max_vfs) { + act_q = act->conf; + mirror_config->dst_pool = act_q->id - 1; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid port id mirror action"); + return -rte_errno; + } + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Only support a action item that is pf or vf."); + return -rte_errno; + } + + return 0; +} + +static int +ixgbe_parse_sample_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct ixgbe_flow_mirror_conf *conf, + struct rte_flow_error *error) +{ + int ret; + + ret = ixgbe_flow_parse_sample_attr_pattern(dev, + attr, + pattern, + error, + conf); + if (ret) + return ret; + + return ixgbe_flow_parse_sample_action(dev, actions, error, conf); +} + void ixgbe_filterlist_init(void) { From patchwork Tue Nov 3 08:28:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 83501 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 98AB9A0521; Tue, 3 Nov 2020 09:31:24 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EBAFBC83A; Tue, 3 Nov 2020 09:29:56 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 22AD9C838 for ; Tue, 3 Nov 2020 09:29:54 +0100 (CET) IronPort-SDR: dV7yef90UxGPoz706iAE+Ni+hf3X+IrSblzNFIXp96Q16PpFVbxdSJtJ95gYCKVFgZkMksrvae zXf3GYM8+nZg== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122379" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122379" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:54 -0800 IronPort-SDR: BCJZpcSt0RDfAMzBSBWMh/ixT8OXfsohEebG5qUz5jtarYngcfgqpHoXYxcSVkm71DTI1PGEs5 BCwpu0dmvL+g== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711532" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:51 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:09 +0000 Message-Id: <20201103082809.41149-7-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 6/6] net/ixgbe: use flow sample to re-realize mirror rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When set follow sample rule's ratio equal to one, its behavior is same as mirror-rule, so we can use "flow create * pattern * actions sample *" to replace old "set port * mirror-rule *" command now. The example of mirror rule command mapping to flow management command: (in below command, port 0 is PF and port 1-2 is VF): 1) ingress: pf => pf set port 0 mirror-rule 0 uplink-mirror dst-pool 2 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 2) egress: pf => pf set port 0 mirror-rule 0 downlink-mirror dst-pool 2 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 3) ingress: pf => vf 2 set port 0 mirror-rule 0 uplink-mirror dst-pool 1 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 4) egress: pf => vf 2 set port 0 mirror-rule 0 downlink-mirror dst-pool 1 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 5) ingress: vf 0,1 => pf set port 0 mirror-rule 0 pool-mirror-up 0x3 dst-pool 2 on or flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 0 / end flow create 0 ingress pattern vf id is 1 / end \ actions sample ratio 1 / port_id id 0 / end 6) ingress: vf 0 => vf 1 set port 0 mirror-rule 0 pool-mirror-up 0x1 dst-pool 1 on or flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 2 / end 7) ingress: vlan 4,6 => vf 1 rx_vlan add 4 port 0 vf 0xf rx_vlan add 6 port 0 vf 0xf set port 0 mirror-rule 0 vlan-mirror 4,6 dst-pool 1 on or rx_vlan add 4 port 0 vf 0xf rx_vlan add 6 port 0 vf 0xf flow create 0 ingress pattern vlan vid is 4 / end \ actions sample ratio 1 / port_id id 2 / end flow create 0 ingress pattern vlan vid is 6 / end \ actions sample ratio 1 / port_id id 2 / end Signed-off-by: Steve Yang --- drivers/net/ixgbe/ixgbe_flow.c | 228 +++++++++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 0ad49ca48..5635bf585 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -117,6 +117,7 @@ static struct ixgbe_syn_filter_list filter_syn_list; static struct ixgbe_fdir_rule_filter_list filter_fdir_list; static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; static struct ixgbe_rss_filter_list filter_rss_list; +static struct ixgbe_mirror_filter_list filter_mirror_list; static struct ixgbe_flow_mem_list ixgbe_flow_list; /** @@ -3176,6 +3177,185 @@ ixgbe_parse_sample_filter(struct rte_eth_dev *dev, return ixgbe_flow_parse_sample_action(dev, actions, error, conf); } +static int +ixgbe_config_mirror_filter_add(struct rte_eth_dev *dev, + struct ixgbe_flow_mirror_conf *mirror_conf) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint32_t mr_ctl, vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; + + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask = 0x0F; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_mirror_conf_ele *it; + int8_t rule_id; + uint8_t mirror_type = 0; + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { + PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", + mirror_conf->rule_type); + return -EINVAL; + } + + TAILQ_FOREACH(it, &filter_mirror_list, entries) { + if (it->filter_info.rule_type == mirror_conf->rule_type && + it->filter_info.dst_pool == mirror_conf->dst_pool && + it->filter_info.pool_mask == mirror_conf->pool_mask && + it->filter_info.vlan_mask == mirror_conf->vlan_mask && + !memcmp(it->filter_info.vlan_id, mirror_conf->vlan_id, + ETH_MIRROR_MAX_VLANS * sizeof(mirror_conf->vlan_id[0]))) { + PMD_DRV_LOG(ERR, "mirror rule exists."); + return -EEXIST; + } + } + + rule_id = ixgbe_mirror_filter_insert(filter_info, mirror_conf); + if (rule_id < 0) { + PMD_DRV_LOG(ERR, "more than maximum mirror count(%d).", + IXGBE_MAX_MIRROR_RULES); + return -EINVAL; + } + + + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + mirror_type |= IXGBE_MRCTL_VLME; + /* Check if vlan id is valid and find conresponding VLAN ID + * index in VLVF + */ + for (i = 0; i < pci_dev->max_vfs; i++) + if (mirror_conf->vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter + * index + */ + reg_index = ixgbe_find_vlvf_slot(hw, + mirror_conf->vlan_id[i], + false); + if (reg_index < 0) + return -EINVAL; + vlvf = IXGBE_READ_REG(hw, + IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) == + mirror_conf->vlan_id[i])) { + vlan_mask |= (1ULL << reg_index); + } else { + ixgbe_mirror_filter_remove(filter_info, + mirror_conf->rule_id); + return -EINVAL; + } + } + + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; + } + + /** + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + mirror_type |= IXGBE_MRCTL_VPME; + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + } + if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) + mirror_type |= IXGBE_MRCTL_UPME; + if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) + mirror_type |= IXGBE_MRCTL_DPME; + + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); + mr_ctl |= mirror_type; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); + } + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + + return 0; +} + +/* remove the mirror filter */ +static int +ixgbe_config_mirror_filter_del(struct rte_eth_dev *dev, + struct ixgbe_flow_mirror_conf *conf) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint8_t rule_id = conf->rule_id; + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + + ixgbe_mirror_filter_remove(filter_info, rule_id); + return 0; +} + +static void +ixgbe_clear_all_mirror_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < IXGBE_MAX_MIRROR_RULES; i++) { + if (filter_info->mirror_mask & (1 << i)) { + ixgbe_config_mirror_filter_del(dev, + &filter_info->mirror_filters[i]); + } + } +} + void ixgbe_filterlist_init(void) { @@ -3185,6 +3365,7 @@ ixgbe_filterlist_init(void) TAILQ_INIT(&filter_fdir_list); TAILQ_INIT(&filter_l2_tunnel_list); TAILQ_INIT(&filter_rss_list); + TAILQ_INIT(&filter_mirror_list); TAILQ_INIT(&ixgbe_flow_list); } @@ -3198,6 +3379,7 @@ ixgbe_filterlist_flush(void) struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; struct ixgbe_rss_conf_ele *rss_filter_ptr; + struct ixgbe_mirror_conf_ele *mirror_filter_ptr; while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { TAILQ_REMOVE(&filter_ntuple_list, @@ -3241,6 +3423,13 @@ ixgbe_filterlist_flush(void) rte_free(rss_filter_ptr); } + while ((mirror_filter_ptr = TAILQ_FIRST(&filter_mirror_list))) { + TAILQ_REMOVE(&filter_mirror_list, + mirror_filter_ptr, + entries); + rte_free(mirror_filter_ptr); + } + while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) { TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, @@ -3272,6 +3461,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); struct ixgbe_rte_flow_rss_conf rss_conf; + struct ixgbe_flow_mirror_conf mirror_conf; struct rte_flow *flow = NULL; struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; @@ -3279,6 +3469,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_rss_conf_ele *rss_filter_ptr; + struct ixgbe_mirror_conf_ele *mirror_filter_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; uint8_t first_mask = FALSE; @@ -3501,6 +3692,30 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } } + memset(&mirror_conf, 0, sizeof(struct ixgbe_flow_mirror_conf)); + ret = ixgbe_parse_sample_filter(dev, attr, pattern, + actions, &mirror_conf, error); + if (!ret) { + /* Just support mirror behavior */ + ret = ixgbe_config_mirror_filter_add(dev, &mirror_conf); + if (ret) { + PMD_DRV_LOG(ERR, "failed to add mirror filter"); + goto out; + } + + mirror_filter_ptr = rte_zmalloc("ixgbe_mirror_filter", + sizeof(struct ixgbe_mirror_conf_ele), 0); + if (!mirror_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + mirror_filter_ptr->filter_info = mirror_conf; + TAILQ_INSERT_TAIL(&filter_mirror_list, + mirror_filter_ptr, entries); + flow->rule = mirror_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SAMPLE; + return flow; + } out: TAILQ_REMOVE(&ixgbe_flow_list, ixgbe_flow_mem_ptr, entries); @@ -3592,6 +3807,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); struct ixgbe_rss_conf_ele *rss_filter_ptr; + struct ixgbe_mirror_conf_ele *mirror_filter_ptr; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -3671,6 +3887,17 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(rss_filter_ptr); } break; + case RTE_ETH_FILTER_SAMPLE: + mirror_filter_ptr = (struct ixgbe_mirror_conf_ele *) + pmd_flow->rule; + ret = ixgbe_config_mirror_filter_del(dev, + &mirror_filter_ptr->filter_info); + if (!ret) { + TAILQ_REMOVE(&filter_mirror_list, + mirror_filter_ptr, entries); + rte_free(mirror_filter_ptr); + } + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -3723,6 +3950,7 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, } ixgbe_clear_rss_filter(dev); + ixgbe_clear_all_mirror_filter(dev); ixgbe_filterlist_flush();