From patchwork Fri Jan 13 08:13:08 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 19320 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id D1CEBF966; Fri, 13 Jan 2017 09:18:28 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 2EB15F920 for ; Fri, 13 Jan 2017 09:18:06 +0100 (CET) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga103.fm.intel.com with ESMTP; 13 Jan 2017 00:18:05 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.33,220,1477983600"; d="scan'208"; a="1111991246" Received: from dpdk1.bj.intel.com ([172.16.182.84]) by fmsmga002.fm.intel.com with ESMTP; 13 Jan 2017 00:18:04 -0800 From: Wei Zhao To: dev@dpdk.org Cc: Wei Zhao , Wenzhuo Lu Date: Fri, 13 Jan 2017 16:13:08 +0800 Message-Id: <1484295192-34009-15-git-send-email-wei.zhao1@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1484295192-34009-1-git-send-email-wei.zhao1@intel.com> References: <1484212665-1635-1-git-send-email-wei.zhao1@intel.com> <1484295192-34009-1-git-send-email-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH v6 14/18] net/ixgbe: parse L2 tunnel filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" check if the rule is a L2 tunnel rule, and get the L2 tunnel info. Signed-off-by: Wei Zhao Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.c | 3 +- drivers/net/ixgbe/ixgbe_flow.c | 216 +++++++++++++++++++++++++++++++++++++++ lib/librte_ether/rte_flow.h | 48 +++++++++ 3 files changed, 266 insertions(+), 1 deletion(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 2a67462..14a88ee 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -8089,7 +8089,8 @@ ixgbe_clear_syn_filter(struct rte_eth_dev *dev) } /* remove all the L2 tunnel filters */ -int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +int +ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) { struct ixgbe_l2_tn_info *l2_tn_info = IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 7557dfa..4006084 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -114,6 +114,19 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, struct rte_eth_syn_filter *filter, struct rte_flow_error *error); static int +cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *filter, + struct rte_flow_error *error); +static int +ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *rule, + struct rte_flow_error *error); +static int ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], @@ -1033,6 +1046,204 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, } /** + * Parse the rule to see if it is a L2 tunnel rule. + * And get the L2 tunnel filter info BTW. + * Only support E-tag now. + * pattern: + * The first not void item can be E_TAG. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * E_TAG grp 0x1 0x3 + e_cid_base 0x309 0xFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_e_tag *e_tag_spec; + const struct rte_flow_item_e_tag *e_tag_mask; + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + uint32_t index; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + /* parse pattern */ + index = 0; + + /* The first not void item should be e-tag. */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + if (!item->spec || !item->mask) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; + e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + + /* Only care about GRP and E cid base. */ + if (e_tag_mask->epcp_edei_in_ecid_b || + e_tag_mask->in_ecid_e || + e_tag_mask->ecid_e || + e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + /** + * grp and e_cid_base are bit fields and only use 14 bits. + * e-tag id is taken as little endian by HW. + */ + filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* parse action */ + index = 0; + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->pool = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static int +ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *l2_tn_filter, + struct rte_flow_error *error) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ret = cons_parse_l2_tn_filter(attr, pattern, + actions, l2_tn_filter, error); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + return ret; +} + +/** * Check if the flow rule is supported by ixgbe. * It only checkes the format. Don't guarantee the rule can be programmed into * the HW. Because there can be no enough room for the rule. @@ -1047,6 +1258,7 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, struct rte_eth_ntuple_filter ntuple_filter; struct rte_eth_ethertype_filter ethertype_filter; struct rte_eth_syn_filter syn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_filter; int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -1067,6 +1279,10 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, if (!ret) return 0; + memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + return ret; } diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h index 98084ac..7142479 100644 --- a/lib/librte_ether/rte_flow.h +++ b/lib/librte_ether/rte_flow.h @@ -268,6 +268,20 @@ enum rte_flow_item_type { * See struct rte_flow_item_vxlan. */ RTE_FLOW_ITEM_TYPE_VXLAN, + + /** + * Matches a E_TAG header. + * + * See struct rte_flow_item_e_tag. + */ + RTE_FLOW_ITEM_TYPE_E_TAG, + + /** + * Matches a NVGRE header. + * + * See struct rte_flow_item_nvgre. + */ + RTE_FLOW_ITEM_TYPE_NVGRE, }; /** @@ -454,6 +468,40 @@ struct rte_flow_item_vxlan { }; /** + * RTE_FLOW_ITEM_TYPE_E_TAG. + * + * Matches a E-tag header. + */ +struct rte_flow_item_e_tag { + uint16_t tpid; /**< Tag protocol identifier (0x893F). */ + /** E-Tag control information (E-TCI). */ + /**< E-PCP (3b), E-DEI (1b), ingress E-CID base (12b). */ + uint16_t epcp_edei_in_ecid_b; + /**< Reserved (2b), GRP (2b), E-CID base (12b). */ + uint16_t rsvd_grp_ecid_b; + uint8_t in_ecid_e; /**< Ingress E-CID ext. */ + uint8_t ecid_e; /**< E-CID ext. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_NVGRE. + * + * Matches a NVGRE header. + */ +struct rte_flow_item_nvgre { + /** + * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b), + * reserved 0 (9b), version (3b). + * + * \c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637. + */ + uint16_t c_k_s_rsvd0_ver; + uint16_t protocol; /**< Protocol type (0x6558). */ + uint8_t tni[3]; /**< Virtual subnet ID. */ + uint8_t flow_id; /**< Flow ID. */ +}; + +/** * Matching pattern item definition. * * A pattern is formed by stacking items starting from the lowest protocol