From patchwork Wed Sep 2 10:01:36 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qiming Yang X-Patchwork-Id: 76320 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1866AA04B8; Wed, 2 Sep 2020 12:23:05 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7DEAB137D; Wed, 2 Sep 2020 12:23:04 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 8A774255 for ; Wed, 2 Sep 2020 12:23:02 +0200 (CEST) IronPort-SDR: Zz+xANMnIBJZA3xD0VO/OTGZ2Z9RxUtWYhOvbi959OauayloRoKXv1G6NAWWeHvmS7rox+kVPC RzTQhgav2wQA== X-IronPort-AV: E=McAfee;i="6000,8403,9731"; a="158355221" X-IronPort-AV: E=Sophos;i="5.76,381,1592895600"; d="scan'208";a="158355221" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 02 Sep 2020 03:22:59 -0700 IronPort-SDR: z2ZAPYGtNDhb84w8reShTV0m6PrHgQafooqEdU50GJVfxYVs4nDOeTcFp9iucSAkuxNIg8b8yA zwTDJyQcZHEw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,381,1592895600"; d="scan'208";a="297633289" Received: from dpdk-qiming1.sh.intel.com ([10.67.119.9]) by orsmga003.jf.intel.com with ESMTP; 02 Sep 2020 03:22:57 -0700 From: Qiming Yang To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Qiming Yang , Wei Zhao Date: Wed, 2 Sep 2020 18:01:36 +0800 Message-Id: <20200902100136.109967-1-qiming.yang@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [PATCH] net/ice: enable QINQ filter in switch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enabled QINQ filter in switch filter. This code depend on base code update. Signed-off-by: Wei Zhao Signed-off-by: Qiming Yang --- drivers/net/ice/ice_generic_flow.c | 8 +++ drivers/net/ice/ice_generic_flow.h | 1 + drivers/net/ice/ice_switch_filter.c | 106 +++++++++++++++++++++++++--- 3 files changed, 104 insertions(+), 11 deletions(-) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 54b0316b9..9356bff72 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -1448,6 +1448,14 @@ enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = { RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, RTE_FLOW_ITEM_TYPE_END, }; +enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + RTE_FLOW_ITEM_TYPE_END, +}; enum rte_flow_item_type pattern_eth_qinq_pppoes[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_VLAN, diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index 434d2f425..887531905 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -425,6 +425,7 @@ extern enum rte_flow_item_type pattern_eth_pppoes[]; extern enum rte_flow_item_type pattern_eth_pppoes_proto[]; extern enum rte_flow_item_type pattern_eth_vlan_pppoes[]; extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[]; extern enum rte_flow_item_type pattern_eth_qinq_pppoes[]; extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[]; extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[]; diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 24320ac7d..f9849bbf5 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -35,8 +35,8 @@ #define ICE_SW_INSET_ETHER ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) #define ICE_SW_INSET_MAC_VLAN ( \ - ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \ - ICE_INSET_VLAN_OUTER) + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \ + ICE_INSET_VLAN_INNER) #define ICE_SW_INSET_MAC_IPV4 ( \ ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) @@ -130,6 +130,12 @@ #define ICE_SW_INSET_MAC_IPV6_PFCP ( \ ICE_SW_INSET_MAC_IPV6 | \ ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_QINQ ( \ + ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER) +#define ICE_SW_INSET_MAC_IPV4_QINQ ( \ + ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4) +#define ICE_SW_INSET_MAC_IPV6_QINQ ( \ + ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6) struct sw_meta { struct ice_adv_lkup_elem *list; @@ -225,6 +231,20 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_qinq, + ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv4, + ICE_SW_INSET_MAC_IPV4_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv6, + ICE_SW_INSET_MAC_IPV6_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, }; static struct @@ -345,6 +365,20 @@ ice_pattern_match_item ice_switch_pattern_perm[] = { ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_qinq, + ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv4, + ICE_SW_INSET_MAC_IPV4_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv6, + ICE_SW_INSET_MAC_IPV6_QINQ, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, }; static int @@ -477,6 +511,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], bool pppoe_elem_valid = 0; bool pppoe_patt_valid = 0; bool pppoe_prot_valid = 0; + bool inner_vlan_valid = 0; + bool outer_vlan_valid = 0; bool tunnel_valid = 0; bool profile_rule = 0; bool nvgre_valid = 0; @@ -1023,23 +1059,42 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid VLAN item"); return 0; } + + if (!outer_vlan_valid && + (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ || + *tun_type == ICE_NON_TUN_QINQ)) + outer_vlan_valid = 1; + else if (!inner_vlan_valid && + (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ || + *tun_type == ICE_NON_TUN_QINQ)) + inner_vlan_valid = 1; + else if (!inner_vlan_valid) + inner_vlan_valid = 1; + if (vlan_spec && vlan_mask) { - list[t].type = ICE_VLAN_OFOS; + if (outer_vlan_valid && + !inner_vlan_valid) { + list[t].type = ICE_VLAN_EX; + input_set |= ICE_INSET_VLAN_OUTER; + } else if (inner_vlan_valid) { + list[t].type = ICE_VLAN_OFOS; + input_set |= ICE_INSET_VLAN_INNER; + } + if (vlan_mask->tci) { list[t].h_u.vlan_hdr.vlan = vlan_spec->tci; list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; - input_set |= ICE_INSET_VLAN_OUTER; input_set_byte += 2; } + if (vlan_mask->inner_type) { - list[t].h_u.vlan_hdr.type = - vlan_spec->inner_type; - list[t].m_u.vlan_hdr.type = - vlan_mask->inner_type; - input_set |= ICE_INSET_ETHERTYPE; - input_set_byte += 2; + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VLAN input set."); + return 0; } t++; } @@ -1341,8 +1396,27 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } } + if (*tun_type == ICE_SW_TUN_PPPOE_PAY && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ; + else if (*tun_type == ICE_SW_TUN_PPPOE && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_QINQ; + else if (*tun_type == ICE_NON_TUN && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_NON_TUN_QINQ; + else if (*tun_type == ICE_SW_TUN_AND_NON_TUN && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ; + if (pppoe_patt_valid && !pppoe_prot_valid) { - if (ipv6_valid && udp_valid) + if (inner_vlan_valid && outer_vlan_valid && ipv4_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ; + else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ; + else if (inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_QINQ; + else if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; else if (ipv6_valid && tcp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; @@ -1615,6 +1689,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, uint16_t lkups_num = 0; const struct rte_flow_item *item = pattern; uint16_t item_num = 0; + uint16_t vlan_num = 0; enum ice_sw_tunnel_type tun_type = ICE_NON_TUN; struct ice_pattern_match_item *pattern_match_item = NULL; @@ -1630,6 +1705,10 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, if (eth_mask->type == UINT16_MAX) tun_type = ICE_SW_TUN_AND_NON_TUN; } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) + vlan_num++; + /* reserve one more memory slot for ETH which may * consume 2 lookup items. */ @@ -1637,6 +1716,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, item_num++; } + if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN) + tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ; + else if (vlan_num == 2) + tun_type = ICE_NON_TUN_QINQ; + list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); if (!list) { rte_flow_error_set(error, EINVAL,