From patchwork Fri Dec 30 07:53:06 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 18726 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id DC3FEF934; Fri, 30 Dec 2016 08:59:03 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 50B622C31 for ; Fri, 30 Dec 2016 08:58:09 +0100 (CET) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 29 Dec 2016 23:58:08 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,428,1477983600"; d="scan'208";a="803524556" Received: from dpdk1.bj.intel.com ([172.16.182.84]) by FMSMGA003.fm.intel.com with ESMTP; 29 Dec 2016 23:58:07 -0800 From: Wei Zhao To: dev@dpdk.org Cc: Wei Zhao , Wenzhuo Lu Date: Fri, 30 Dec 2016 15:53:06 +0800 Message-Id: <1483084390-53159-15-git-send-email-wei.zhao1@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1483084390-53159-1-git-send-email-wei.zhao1@intel.com> References: <1483084390-53159-1-git-send-email-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH v2 14/18] net/ixgbe: parse L2 tunnel filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" check if the rule is a L2 tunnel rule, and get the L2 tunnel info. Signed-off-by: Wei Zhao Signed-off-by: Wenzhuo Lu --- v2: --add new error set function --change return value type of parser function --- drivers/net/ixgbe/ixgbe_ethdev.c | 269 +++++++++++++++++++++++++++++++++++---- lib/librte_ether/rte_flow.h | 32 +++++ 2 files changed, 273 insertions(+), 28 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ffb1962..0a5ac4f 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -425,6 +425,19 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, struct rte_eth_syn_filter *filter, struct rte_flow_error *error); static int +cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *filter, + struct rte_flow_error *error); +static int +ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *rule, + struct rte_flow_error *error); +static int ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], @@ -8939,41 +8952,175 @@ ixgbe_parse_syn_filter(const struct rte_flow_attr *attr, } /** - * Check if the flow rule is supported by ixgbe. - * It only checkes the format. Don't guarantee the rule can be programmed into - * the HW. Because there can be no enough room for the rule. + * Parse the rule to see if it is a L2 tunnel rule. + * And get the L2 tunnel filter info BTW. + * Only support E-tag now. */ static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *filter, + struct rte_flow_error *error) { - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - int ret; + const struct rte_flow_item *item; + const struct rte_flow_item_e_tag *e_tag_spec; + const struct rte_flow_item_e_tag *e_tag_mask; + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + uint32_t index, j; - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); - ret = ixgbe_parse_ntuple_filter(attr, pattern, - actions, &ntuple_filter, error); - if (!ret) - return 0; + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); - ret = ixgbe_parse_ethertype_filter(attr, pattern, - actions, ðertype_filter, error); - if (!ret) - return 0; + /* parse pattern */ + index = 0; - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); - ret = ixgbe_parse_syn_filter(attr, pattern, - actions, &syn_filter, error); - if (!ret) - return 0; + /* The first not void item should be e-tag. */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } - return ret; + if (!item->spec || !item->mask) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; + e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + + /* Src & dst MAC address should be masked. */ + for (j = 0; j < ETHER_ADDR_LEN; j++) { + if (e_tag_mask->src.addr_bytes[j] || + e_tag_mask->dst.addr_bytes[j]) { + memset(filter, 0, + sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + } + + /* Only care about GRP and E cid base. */ + if (e_tag_mask->e_tag_ethertype || + e_tag_mask->e_pcp || + e_tag_mask->dei || + e_tag_mask->in_e_cid_base || + e_tag_mask->in_e_cid_ext || + e_tag_mask->e_cid_ext || + e_tag_mask->type || + e_tag_mask->tags || + e_tag_mask->grp != 0x3 || + e_tag_mask->e_cid_base != 0xFFF) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + /** + * grp and e_cid_base are bit fields and only use 14 bits. + * e-tag id is taken as little endian by HW. + */ + filter->tunnel_id = e_tag_spec->grp << 12; + filter->tunnel_id |= rte_be_to_cpu_16(e_tag_spec->e_cid_base); + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* parse action */ + index = 0; + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->pool = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; } /* Destroy all flow rules associated with a port on ixgbe. */ @@ -9006,6 +9153,72 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, return 0; } +static int +ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *l2_tn_filter, + struct rte_flow_error *error) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ret = cons_parse_l2_tn_filter(attr, pattern, + actions, l2_tn_filter, error); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + return ret; +} + +/** + * Check if the flow rule is supported by ixgbe. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int +ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_parse_ntuple_filter(attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_parse_ethertype_filter(attr, pattern, + actions, ðertype_filter, error); + if (!ret) + return 0; + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_parse_syn_filter(attr, pattern, + actions, &syn_filter, error); + if (!ret) + return 0; + + memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + + return ret; +} + RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio"); diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h index 98084ac..e9e6220 100644 --- a/lib/librte_ether/rte_flow.h +++ b/lib/librte_ether/rte_flow.h @@ -268,6 +268,13 @@ enum rte_flow_item_type { * See struct rte_flow_item_vxlan. */ RTE_FLOW_ITEM_TYPE_VXLAN, + + /** + * Matches a E_TAG header. + * + * See struct rte_flow_item_e_tag. + */ + RTE_FLOW_ITEM_TYPE_E_TAG, }; /** @@ -454,6 +461,31 @@ struct rte_flow_item_vxlan { }; /** + * RTE_FLOW_ITEM_TYPE_E_TAG. + * + * Matches a E-tag header. + */ +struct rte_flow_item_e_tag { + struct ether_addr dst; /**< Destination MAC. */ + struct ether_addr src; /**< Source MAC. */ + uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */ + uint16_t e_pcp:3; /**< E-PCP */ + uint16_t dei:1; /**< DEI */ + uint16_t in_e_cid_base:12; /**< Ingress E-CID base */ + uint16_t rsv:2; /**< reserved */ + uint16_t grp:2; /**< GRP */ + uint16_t e_cid_base:12; /**< E-CID base */ + uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */ + uint16_t e_cid_ext:8; /**< E-CID extend */ + uint16_t type; /**< MAC type. */ + unsigned int tags; /**< Number of 802.1Q/ad tags defined. */ + struct { + uint16_t tpid; /**< Tag protocol identifier. */ + uint16_t tci; /**< Tag control information. */ + } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */ +}; + +/** * Matching pattern item definition. * * A pattern is formed by stacking items starting from the lowest protocol