From patchwork Fri Jun 5 07:40:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 70868 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 70476A00C4; Fri, 5 Jun 2020 10:05:45 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 822791D5FC; Fri, 5 Jun 2020 10:05:37 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 25D151D5DA for ; Fri, 5 Jun 2020 10:05:34 +0200 (CEST) IronPort-SDR: HYYFdoZlMa5a9nW9tiYhcKyQJIf7GAeAY2JpFF7aQs5hp+09b0dtfCM1Xq0ue8aCViFapZXWU8 nFeGpV12i/Zw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2020 01:05:34 -0700 IronPort-SDR: fNOEJ6M7TVbBW6yo1P5TwG7kVrSpE3DcDcceZ1+aIqgpf3poIgkCOcvGyUdbfjS8pmkYVAWcKq f1nu+rwT/xrA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,475,1583222400"; d="scan'208";a="445815234" Received: from unknown (HELO localhost.localdomain.bj.intel.com) ([172.16.182.123]) by orsmga005.jf.intel.com with ESMTP; 05 Jun 2020 01:05:33 -0700 From: Wei Zhao To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Wei Zhao Date: Fri, 5 Jun 2020 15:40:28 +0800 Message-Id: <20200605074031.16231-2-wei.zhao1@intel.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20200605074031.16231-1-wei.zhao1@intel.com> References: <20200605074031.16231-1-wei.zhao1@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 1/4] net/ice: add support more PPPoE packet type for switch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add more support for switch parser of pppoe packet, it enable parse tcp/udp L4 layer and ipv4/ipv6 L3 layer parser for pppoe payload, so we can use L4 dst/src port and L3 ip address as input set for switch filter pppoe related rule. Signed-off-by: Wei Zhao --- drivers/net/ice/ice_switch_filter.c | 115 ++++++++++++++++++++++++---- 1 file changed, 100 insertions(+), 15 deletions(-) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 20e8187d3..a5dd1f7ab 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -26,6 +26,8 @@ #define MAX_QGRP_NUM_TYPE 7 +#define ICE_PPP_IPV4_PROTO 0x0021 +#define ICE_PPP_IPV6_PROTO 0x0057 #define ICE_SW_INSET_ETHER ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) @@ -95,6 +97,18 @@ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \ ICE_INSET_PPPOE_PROTO) +#define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP) #define ICE_SW_INSET_MAC_IPV4_ESP ( \ ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI) #define ICE_SW_INSET_MAC_IPV6_ESP ( \ @@ -154,10 +168,6 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes, @@ -166,6 +176,30 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_esp, @@ -254,10 +288,6 @@ ice_pattern_match_item ice_switch_pattern_perm[] = { ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes, @@ -266,6 +296,30 @@ ice_pattern_match_item ice_switch_pattern_perm[] = { ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_esp, @@ -416,13 +470,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; - uint16_t j, t = 0; + bool pppoe_elem_valid = 0; + bool pppoe_patt_valid = 0; + bool pppoe_prot_valid = 0; bool profile_rule = 0; bool tunnel_valid = 0; - bool pppoe_valid = 0; bool ipv6_valiad = 0; bool ipv4_valiad = 0; bool udp_valiad = 0; + bool tcp_valiad = 0; + uint16_t j, t = 0; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { @@ -752,6 +809,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; tcp_mask = item->mask; + tcp_valiad = 1; if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -969,6 +1027,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid pppoe item"); return 0; } + pppoe_patt_valid = 1; if (pppoe_spec && pppoe_mask) { /* Check pppoe mask and update input set */ if (pppoe_mask->length || @@ -989,7 +1048,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], input_set |= ICE_INSET_PPPOE_SESSION; } t++; - pppoe_valid = 1; + pppoe_elem_valid = 1; } break; @@ -1010,7 +1069,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], return 0; } if (pppoe_proto_spec && pppoe_proto_mask) { - if (pppoe_valid) + if (pppoe_elem_valid) t--; list[t].type = ICE_PPPOE; if (pppoe_proto_mask->proto_id) { @@ -1019,9 +1078,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; input_set |= ICE_INSET_PPPOE_PROTO; + + pppoe_prot_valid = 1; } + if ((pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV4_PROTO) && + (pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV6_PROTO)) + *tun_type = ICE_SW_TUN_PPPOE_PAY; + else + *tun_type = ICE_SW_TUN_PPPOE; t++; } + break; case RTE_FLOW_ITEM_TYPE_ESP: @@ -1232,6 +1303,23 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } } + if (pppoe_patt_valid && !pppoe_prot_valid) { + if (ipv6_valiad && udp_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; + else if (ipv6_valiad && tcp_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; + else if (ipv4_valiad && udp_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP; + else if (ipv4_valiad && tcp_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP; + else if (ipv6_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV6; + else if (ipv4_valiad) + *tun_type = ICE_SW_TUN_PPPOE_IPV4; + else + *tun_type = ICE_SW_TUN_PPPOE; + } + *lkups_num = t; return input_set; @@ -1447,9 +1535,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, tun_type = ICE_SW_TUN_VXLAN; if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) tun_type = ICE_SW_TUN_NVGRE; - if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED || - item->type == RTE_FLOW_ITEM_TYPE_PPPOES) - tun_type = ICE_SW_TUN_PPPOE; if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { const struct rte_flow_item_eth *eth_mask; if (item->mask) From patchwork Fri Jun 5 07:40:29 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 70869 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5203EA00C4; Fri, 5 Jun 2020 10:05:55 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2D3241D612; Fri, 5 Jun 2020 10:05:39 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id E04D61D5F4; Fri, 5 Jun 2020 10:05:36 +0200 (CEST) IronPort-SDR: +ELyG1eSxiHbbrp8ubZyHLIi0iFZdqCcoggTEEaRYF4nqgzGPuRp5YbBe0moDh8SqeXWLNYbLS Q85EUA+U2U1g== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2020 01:05:36 -0700 IronPort-SDR: kqymixvAaDc7CYTpV3WwZWt0Ozn9+CMVTWb9gZNGgdTRD7s2OafUhDt/G4OPEtHMhkwF9eXQ4/ N5fV+VTzvDRg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,475,1583222400"; d="scan'208";a="445815240" Received: from unknown (HELO localhost.localdomain.bj.intel.com) ([172.16.182.123]) by orsmga005.jf.intel.com with ESMTP; 05 Jun 2020 01:05:34 -0700 From: Wei Zhao To: dev@dpdk.org Cc: qi.z.zhang@intel.com, stable@dpdk.org, Wei Zhao Date: Fri, 5 Jun 2020 15:40:29 +0800 Message-Id: <20200605074031.16231-3-wei.zhao1@intel.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20200605074031.16231-1-wei.zhao1@intel.com> References: <20200605074031.16231-1-wei.zhao1@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 2/4] net/ice: add redirect support for VSI list rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enable redirect switch rule of vsi list type. Fixes: 397b4b3c5095 ("net/ice: enable flow redirect on switch") Cc: stable@dpdk.org Signed-off-by: Wei Zhao --- drivers/net/ice/ice_switch_filter.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index a5dd1f7ab..fdb1eb755 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -1662,6 +1662,9 @@ ice_switch_redirect(struct ice_adapter *ad, uint16_t lkups_cnt; int ret; + if (rdata->vsi_handle != rd->vsi_handle) + return 0; + sw = hw->switch_info; if (!sw->recp_list[rdata->rid].recp_created) return -EINVAL; @@ -1673,25 +1676,30 @@ ice_switch_redirect(struct ice_adapter *ad, LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, list_entry) { rinfo = list_itr->rule_info; - if (rinfo.fltr_rule_id == rdata->rule_id && + if ((rinfo.fltr_rule_id == rdata->rule_id && rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI && - rinfo.sw_act.vsi_handle == rd->vsi_handle) { + rinfo.sw_act.vsi_handle == rd->vsi_handle) || + (rinfo.fltr_rule_id == rdata->rule_id && + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){ lkups_cnt = list_itr->lkups_cnt; lkups_dp = (struct ice_adv_lkup_elem *) ice_memdup(hw, list_itr->lkups, sizeof(*list_itr->lkups) * lkups_cnt, ICE_NONDMA_TO_NONDMA); + if (!lkups_dp) { PMD_DRV_LOG(ERR, "Failed to allocate memory."); return -EINVAL; } + if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) + rinfo.sw_act.vsi_handle = rd->vsi_handle; break; } } if (!lkups_dp) - return 0; + return -EINVAL; /* Remove the old rule */ ret = ice_rem_adv_rule(hw, list_itr->lkups, From patchwork Fri Jun 5 07:40:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 70870 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 68E0CA00C4; Fri, 5 Jun 2020 10:06:05 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9E88D1D627; Fri, 5 Jun 2020 10:05:40 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 54E561D606; Fri, 5 Jun 2020 10:05:37 +0200 (CEST) IronPort-SDR: /IU4xnv57q9njhIECXrAfACYJjGJCWXvGoPlZNBsWEiv4uoWDkhZMR/46ZXTYRiql412HAR8bD ETp7XbtCI19Q== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2020 01:05:37 -0700 IronPort-SDR: xe+wZIRko+pcrZ7HMqlcOfc3fedSv+orkE/b5xbGrfE9lnHKdv7alUwNB9cCBhQSxquW8XcS6R 2kcFAAQA5asg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,475,1583222400"; d="scan'208";a="445815249" Received: from unknown (HELO localhost.localdomain.bj.intel.com) ([172.16.182.123]) by orsmga005.jf.intel.com with ESMTP; 05 Jun 2020 01:05:36 -0700 From: Wei Zhao To: dev@dpdk.org Cc: qi.z.zhang@intel.com, stable@dpdk.org, Wei Zhao Date: Fri, 5 Jun 2020 15:40:30 +0800 Message-Id: <20200605074031.16231-4-wei.zhao1@intel.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20200605074031.16231-1-wei.zhao1@intel.com> References: <20200605074031.16231-1-wei.zhao1@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 3/4] net/ice: add check for NVGRE protocol X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add check for protocol type of IPv4 packet, it need to update tunnel type when NVGRE is in payload. Fixes: 6bc7628c5e0b ("net/ice: change default tunnel type") Cc: stable@dpdk.org Signed-off-by: Wei Zhao --- drivers/net/ice/ice_switch_filter.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index fdb1eb755..be86b6bdf 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -28,6 +28,7 @@ #define MAX_QGRP_NUM_TYPE 7 #define ICE_PPP_IPV4_PROTO 0x0021 #define ICE_PPP_IPV6_PROTO 0x0057 +#define ICE_IPV4_PROTO_NVGRE 0x2F #define ICE_SW_INSET_ETHER ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) @@ -632,6 +633,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; } + if ((ipv4_spec->hdr.next_proto_id & + ipv4_mask->hdr.next_proto_id) == + ICE_IPV4_PROTO_NVGRE) + *tun_type = ICE_SW_TUN_AND_NON_TUN; if (ipv4_mask->hdr.type_of_service) { list[t].h_u.ipv4_hdr.tos = ipv4_spec->hdr.type_of_service; @@ -1526,7 +1531,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, const struct rte_flow_item *item = pattern; uint16_t item_num = 0; enum ice_sw_tunnel_type tun_type = - ICE_SW_TUN_AND_NON_TUN; + ICE_NON_TUN; struct ice_pattern_match_item *pattern_match_item = NULL; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { From patchwork Fri Jun 5 07:40:31 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 70871 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E00EBA00C4; Fri, 5 Jun 2020 10:06:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1959C1D631; Fri, 5 Jun 2020 10:05:42 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 394371D619; Fri, 5 Jun 2020 10:05:40 +0200 (CEST) IronPort-SDR: 9VtH2dYQvFU1drSh6WS5GuZPFrT0u3va4yCdAaogFcUEc9rI0R3ev+vDOuZ2AL7yLBvIc11Czb JFyCMytYynoA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2020 01:05:39 -0700 IronPort-SDR: CxnT/kzwnqvX4/PQUmjNMmty0M5JITpiIF7wpgojMbS051GCi/ztlOkhiuT7StoXGv6zGeGIh7 LJjy2oGmAtZQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,475,1583222400"; d="scan'208";a="445815263" Received: from unknown (HELO localhost.localdomain.bj.intel.com) ([172.16.182.123]) by orsmga005.jf.intel.com with ESMTP; 05 Jun 2020 01:05:37 -0700 From: Wei Zhao To: dev@dpdk.org Cc: qi.z.zhang@intel.com, stable@dpdk.org, Wei Zhao Date: Fri, 5 Jun 2020 15:40:31 +0800 Message-Id: <20200605074031.16231-5-wei.zhao1@intel.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20200605074031.16231-1-wei.zhao1@intel.com> References: <20200605074031.16231-1-wei.zhao1@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 4/4] net/ice: support switch flow for specific L4 type X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add more specific tunnel type for ipv4/ipv6 packet, it enable tcp/udp layer of ipv4/ipv6 as L4 payload but without L4 dst/src port number as input set for the switch filter rule. Fixes: 47d460d63233 ("net/ice: rework switch filter") Cc: stable@dpdk.org Signed-off-by: Wei Zhao --- drivers/net/ice/ice_switch_filter.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index be86b6bdf..aa99f26b0 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -471,11 +471,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; + uint16_t tunnel_valid = 0; bool pppoe_elem_valid = 0; bool pppoe_patt_valid = 0; bool pppoe_prot_valid = 0; bool profile_rule = 0; - bool tunnel_valid = 0; bool ipv6_valiad = 0; bool ipv4_valiad = 0; bool udp_valiad = 0; @@ -960,7 +960,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid NVGRE item"); return 0; } - tunnel_valid = 1; + tunnel_valid = 2; if (nvgre_spec && nvgre_mask) { list[t].type = ICE_NVGRE; if (nvgre_mask->tni[0] || @@ -1325,6 +1325,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], *tun_type = ICE_SW_TUN_PPPOE; } + if (!pppoe_patt_valid) { + if (tunnel_valid == 1) + *tun_type = ICE_SW_TUN_VXLAN; + else if (tunnel_valid == 2) + *tun_type = ICE_SW_TUN_NVGRE; + else if (ipv4_valiad && tcp_valiad) + *tun_type = ICE_SW_IPV4_TCP; + else if (ipv4_valiad && udp_valiad) + *tun_type = ICE_SW_IPV4_UDP; + else if (ipv6_valiad && tcp_valiad) + *tun_type = ICE_SW_IPV6_TCP; + else if (ipv6_valiad && udp_valiad) + *tun_type = ICE_SW_IPV6_UDP; + } + *lkups_num = t; return input_set; @@ -1536,10 +1551,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { item_num++; - if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) - tun_type = ICE_SW_TUN_VXLAN; - if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) - tun_type = ICE_SW_TUN_NVGRE; if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { const struct rte_flow_item_eth *eth_mask; if (item->mask)