From patchwork Fri Jan 5 06:13:50 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 32927 X-Patchwork-Delegate: helin.zhang@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7D59C7D19; Fri, 5 Jan 2018 07:21:48 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id C660A3250 for ; Fri, 5 Jan 2018 07:21:46 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 04 Jan 2018 22:21:45 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.46,317,1511856000"; d="scan'208";a="19187114" Received: from dpdk2.bj.intel.com ([172.16.182.81]) by fmsmga001.fm.intel.com with ESMTP; 04 Jan 2018 22:21:44 -0800 From: Wei Zhao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, Wei Zhao Date: Fri, 5 Jan 2018 14:13:50 +0800 Message-Id: <20180105061350.97658-1-wei.zhao1@intel.com> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20171226094905.83277-1-wei.zhao1@intel.com> References: <20171226094905.83277-1-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Ixgbe ntuple filter in rte_flow need to support diversion data with less than 5 tuple parameters.So add this new support in parser code.This patch also add parser vlan pattern code in ntuple filter in order to handle some case including vlan in flow API. Signed-off-by: Wei Zhao Acked-by: Wenzhuo Lu --- v2: -fix coding style issue. v3: -add parser vlan pattern code. v4: -fix patch check issue. v5: -add more commit log message. --- drivers/net/ixgbe/ixgbe_flow.c | 135 +++++++++++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 40 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 8f964cf..6af5ce1 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -214,6 +214,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + struct rte_flow_item_eth eth_null; + struct rte_flow_item_vlan vlan_null; if (!pattern) { rte_flow_error_set(error, @@ -235,6 +241,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); + memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); + #ifdef RTE_LIBRTE_SECURITY /** * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY @@ -284,6 +293,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_mask = (const struct rte_flow_item_eth *)item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -294,15 +305,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* if the first item is MAC, the content should be NULL */ - if (item->spec || item->mask) { + if ((item->spec || item->mask) && + (memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth)) || + memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); return -rte_errno; } - /* check if the next not void item is IPv4 */ + /* check if the next not void item is IPv4 or Vlan */ item = next_no_void_pattern(pattern, item); - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -310,48 +326,82 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } } - /* get the IPv4 info */ - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Invalid ntuple mask"); - return -rte_errno; - } - /*Not supported last point for range*/ - if (item->last) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - item, "Not supported last point for range"); - return -rte_errno; + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = (const struct rte_flow_item_vlan *)item->spec; + vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan)) || + memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; - /** - * Only support src & dst addresses, protocol, - * others should be masked. - */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.type_of_service || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.hdr_checksum) { + if (item->mask) { + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { rte_flow_error_set(error, - EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by ntuple filter"); - return -rte_errno; - } + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } - filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; - filter->src_ip_mask = ipv4_mask->hdr.src_addr; - filter->proto_mask = ipv4_mask->hdr.next_proto_id; + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; - filter->dst_ip = ipv4_spec->hdr.dst_addr; - filter->src_ip = ipv4_spec->hdr.src_addr; - filter->proto = ipv4_spec->hdr.next_proto_id; + ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + } /* check if the next not void item is TCP or UDP */ item = next_no_void_pattern(pattern, item); @@ -366,8 +416,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* get the TCP/UDP info */ if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec && !item->mask)) { + goto action; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL,