From patchwork Fri Dec 2 10:43:09 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 17472 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 3A2D1FA6E; Fri, 2 Dec 2016 11:47:41 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 69A5658C8 for ; Fri, 2 Dec 2016 11:47:02 +0100 (CET) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga103.jf.intel.com with ESMTP; 02 Dec 2016 02:47:02 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.33,729,1477983600"; d="scan'208"; a="1067043872" Received: from dpdk1.bj.intel.com ([172.16.182.84]) by orsmga001.jf.intel.com with ESMTP; 02 Dec 2016 02:47:00 -0800 From: Wei Zhao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, wei zhao1 Date: Fri, 2 Dec 2016 18:43:09 +0800 Message-Id: <1480675394-59179-14-git-send-email-wei.zhao1@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1480675394-59179-1-git-send-email-wei.zhao1@intel.com> References: <1480675394-59179-1-git-send-email-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH 13/18] net/ixgbe: parse SYN filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: wei zhao1 check if the rule is a SYN rule, and get the SYN info. Signed-off-by: wei zhao1 Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.c | 154 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index a421062..3ed749a 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -415,6 +415,11 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_ethertype_filter *filter); +static enum rte_flow_error_type +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter); enum rte_flow_error_type ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -8459,6 +8464,148 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr, } /** + * Parse the rule to see if it is a SYN rule. + * And get the SYN filter info BTW. + */ +static enum rte_flow_error_type +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_action_queue *act_q; + uint32_t i; + + /************************************************ + * parse pattern + ************************************************/ + i = 0; + + /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ + PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP) + return RTE_FLOW_ERROR_TYPE_ITEM; + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* if the item is MAC, the content should be NULL */ + if (item->spec || item->mask) + return RTE_FLOW_ERROR_TYPE_ITEM; + + /* check if the next not void item is IPv4 or IPv6 */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ITEM); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) + return RTE_FLOW_ERROR_TYPE_ITEM; + } + + /* Skip IP */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* if the item is IP, the content should be NULL */ + if (item->spec || item->mask) + return RTE_FLOW_ERROR_TYPE_ITEM; + + /* check if the next not void item is TCP */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ITEM); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) + return RTE_FLOW_ERROR_TYPE_ITEM; + } + + /* Get the TCP info. Only support SYN. */ + if (!item->spec || !item->mask) + return RTE_FLOW_ERROR_TYPE_ITEM; + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || + tcp_mask->hdr.src_port || + tcp_mask->hdr.dst_port || + tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ITEM; + } + + /* check if the next not void item is END */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ITEM); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ITEM; + } + + /************************************************ + * parse action + ************************************************/ + i = 0; + + /* check if the first not void action is QUEUE. */ + ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ACTION_NUM); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ACTION; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + + /* check if the next not void item is END */ + i++; + ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter, + RTE_FLOW_ERROR_TYPE_ACTION); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ACTION; + } + + /************************************************ + * parse attr + ************************************************/ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS; + } + + /* Support 2 priorities, the lowest or highest. */ + if (!attr->priority) { + filter->hig_pri = 0; + } else if (attr->priority == (uint32_t)~0U) { + filter->hig_pri = 1; + } else { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY; + } + + return 0; +} + +/** * Check if the flow rule is supported by ixgbe. * It only checkes the format. Don't guarantee the rule can be programmed into * the HW. Because there can be no enough room for the rule. @@ -8472,17 +8619,24 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev, int ret; struct rte_eth_ntuple_filter ntuple_filter; struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter); if (!ret) return RTE_FLOW_ERROR_TYPE_NONE; + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); ret = ixgbe_parse_ethertype_filter(attr, pattern, actions, ðertype_filter); if (!ret) return RTE_FLOW_ERROR_TYPE_NONE; + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter); + if (!ret) + return RTE_FLOW_ERROR_TYPE_NONE; + return ret; }