From patchwork Wed Jun 22 03:00:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Yan, Zhirun" X-Patchwork-Id: 113207 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A0B62A04FD; Wed, 22 Jun 2022 05:04:06 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 091A142824; Wed, 22 Jun 2022 05:04:04 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 3DC3740A84 for ; Wed, 22 Jun 2022 05:04:02 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1655867042; x=1687403042; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=A//RCbT3rxV0vXRLvpxg7rH6on7KsvhYvX9NlUhlRpk=; b=PpMEy8FgWyh55J0c3NpdG2R6SV99Io/Vzmgfc9aWI4U70iYK+wsbqr8/ XDmDJ06l50OumQ4teb72t9rqAzirtDiYIrIKZF8zzgNvUQ+SDyAK0cq4G vPtDn6rFDUV+iMWLbiWCnbbZyqZe/4qHM7eTgTAGWbaXlRbp3nUmi7kjM 9TqfOsKx89XAYu4jhaYe0Bg7OeeWeF8FLerd3tGQy9e78omYErn9melU1 j8lLJNoEpuIXSDpXSbd/YU6mmpEV2lh7Mj0gzJPUgWkSKe0JIjwteXCmF 7K4a/H9kEXi26hIgrs41I7/HeGd5vIoxMOrvKUyTeR6NgDSgN6ni4oP1S w==; X-IronPort-AV: E=McAfee;i="6400,9594,10385"; a="366620984" X-IronPort-AV: E=Sophos;i="5.92,211,1650956400"; d="scan'208";a="366620984" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Jun 2022 20:04:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.92,211,1650956400"; d="scan'208";a="677308388" Received: from dpdk-zhirun-dev.sh.intel.com ([10.67.110.172]) by FMSMGA003.fm.intel.com with ESMTP; 21 Jun 2022 20:03:59 -0700 From: Zhirun Yan To: dev@dpdk.org, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: xiao.w.wang@intel.com Subject: [PATCH v1 3/3] net/ice: fix flow management in FDIR Date: Wed, 22 Jun 2022 11:00:32 +0800 Message-Id: <20220622030032.474770-4-zhirun.yan@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220622030032.474770-1-zhirun.yan@intel.com> References: <20220622030032.474770-1-zhirun.yan@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Xiao Wang The supported protocol fields e.g. outer tuples should be included in the key definition of rte_hash table. Besides, a protocol field should be used only when the corresponding mask is set. Signed-off-by: Xiao Wang --- drivers/net/ice/ice_ethdev.h | 8 ++ drivers/net/ice/ice_fdir_filter.c | 128 ++++++++++++++++++++---------- 2 files changed, 92 insertions(+), 44 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 791e8566ea..15db79d56f 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -360,6 +360,14 @@ struct ice_fdir_fltr_pattern { struct ice_fdir_v6 v6; } ip, mask; + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip_outer, mask_outer; + + struct ice_fdir_udp_vxlan vxlan_data; + struct ice_fdir_udp_vxlan vxlan_mask; + struct ice_fdir_udp_gtp gtpu_data; struct ice_fdir_udp_gtp gtpu_mask; diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 4b0b6b5b23..3503e95979 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -1245,11 +1245,19 @@ ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key, key->flow_type = input->flow_type; rte_memcpy(&key->ip, &input->ip, sizeof(key->ip)); rte_memcpy(&key->mask, &input->mask, sizeof(key->mask)); + + rte_memcpy(&key->ip_outer, &input->ip_outer, sizeof(key->ip_outer)); + rte_memcpy(&key->mask_outer, &input->mask_outer, sizeof(key->mask_outer)); + rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data)); rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask)); rte_memcpy(&key->ext_data_outer, &input->ext_data_outer, sizeof(key->ext_data_outer)); rte_memcpy(&key->ext_mask_outer, &input->ext_mask_outer, sizeof(key->ext_mask_outer)); + + rte_memcpy(&key->vxlan_data, &input->vxlan_data, sizeof(key->vxlan_data)); + rte_memcpy(&key->vxlan_mask, &input->vxlan_mask, sizeof(key->vxlan_mask)); + rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data)); rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask)); @@ -2052,23 +2060,31 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, if (ipv4_mask->hdr.dst_addr && ipv4_mask->hdr.dst_addr != UINT32_MAX) return -rte_errno; + /* Mask for IPv4 tos not supported */ + if (ipv4_mask->hdr.type_of_service && + ipv4_mask->hdr.type_of_service != UINT8_MAX) + return -rte_errno; - if (ipv4_mask->hdr.dst_addr == UINT32_MAX) + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { *input_set |= ICE_INSET_IPV4_DST; - if (ipv4_mask->hdr.src_addr == UINT32_MAX) + p_v4->dst_ip = ipv4_spec->hdr.dst_addr; + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { *input_set |= ICE_INSET_IPV4_SRC; - if (ipv4_mask->hdr.time_to_live == UINT8_MAX) + p_v4->src_ip = ipv4_spec->hdr.src_addr; + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { *input_set |= ICE_INSET_IPV4_TTL; - if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) + p_v4->ttl = ipv4_spec->hdr.time_to_live; + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { *input_set |= ICE_INSET_IPV4_PROTO; - if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + p_v4->proto = ipv4_spec->hdr.next_proto_id; + } + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) { *input_set |= ICE_INSET_IPV4_TOS; - - p_v4->dst_ip = ipv4_spec->hdr.dst_addr; - p_v4->src_ip = ipv4_spec->hdr.src_addr; - p_v4->ttl = ipv4_spec->hdr.time_to_live; - p_v4->proto = ipv4_spec->hdr.next_proto_id; - p_v4->tos = ipv4_spec->hdr.type_of_service; + p_v4->tos = ipv4_spec->hdr.type_of_service; + } /* fragment Ipv4: * spec is 0x2000, mask is 0x2000 @@ -2114,27 +2130,35 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.src_addr))) + RTE_DIM(ipv6_mask->hdr.src_addr))) { *input_set |= ICE_INSET_IPV6_SRC; + rte_memcpy(&p_v6->src_ip, + ipv6_spec->hdr.src_addr, 16); + } if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.dst_addr))) + RTE_DIM(ipv6_mask->hdr.dst_addr))) { *input_set |= ICE_INSET_IPV6_DST; + rte_memcpy(&p_v6->dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } if ((ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) - == rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) + == rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) { *input_set |= ICE_INSET_IPV6_TC; - if (ipv6_mask->hdr.proto == UINT8_MAX) + vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); + p_v6->tc = (uint8_t)(vtc_flow_cpu >> + ICE_FDIR_IPV6_TC_OFFSET); + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { *input_set |= ICE_INSET_IPV6_NEXT_HDR; - if (ipv6_mask->hdr.hop_limits == UINT8_MAX) + p_v6->proto = ipv6_spec->hdr.proto; + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { *input_set |= ICE_INSET_IPV6_HOP_LIMIT; + p_v6->hlim = ipv6_spec->hdr.hop_limits; + } - rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16); - rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16); - vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); - p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET); - p_v6->proto = ipv6_spec->hdr.proto; - p_v6->hlim = ipv6_spec->hdr.hop_limits; break; case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT; @@ -2210,12 +2234,16 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { assert(p_v4); - p_v4->dst_port = tcp_spec->hdr.dst_port; - p_v4->src_port = tcp_spec->hdr.src_port; + if (*input_set & ICE_INSET_TCP_SRC_PORT) + p_v4->src_port = tcp_spec->hdr.src_port; + if (*input_set & ICE_INSET_TCP_DST_PORT) + p_v4->dst_port = tcp_spec->hdr.dst_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { assert(p_v6); - p_v6->dst_port = tcp_spec->hdr.dst_port; - p_v6->src_port = tcp_spec->hdr.src_port; + if (*input_set & ICE_INSET_TCP_SRC_PORT) + p_v6->src_port = tcp_spec->hdr.src_port; + if (*input_set & ICE_INSET_TCP_DST_PORT) + p_v6->dst_port = tcp_spec->hdr.dst_port; } break; case RTE_FLOW_ITEM_TYPE_UDP: @@ -2257,12 +2285,16 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { assert(p_v4); - p_v4->dst_port = udp_spec->hdr.dst_port; - p_v4->src_port = udp_spec->hdr.src_port; + if (*input_set & ICE_INSET_UDP_SRC_PORT) + p_v4->src_port = udp_spec->hdr.src_port; + if (*input_set & ICE_INSET_UDP_DST_PORT) + p_v4->dst_port = udp_spec->hdr.dst_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { assert(p_v6); - p_v6->src_port = udp_spec->hdr.src_port; - p_v6->dst_port = udp_spec->hdr.dst_port; + if (*input_set & ICE_INSET_UDP_SRC_PORT) + p_v6->src_port = udp_spec->hdr.src_port; + if (*input_set & ICE_INSET_UDP_DST_PORT) + p_v6->dst_port = udp_spec->hdr.dst_port; } break; case RTE_FLOW_ITEM_TYPE_SCTP: @@ -2302,12 +2334,20 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { assert(p_v4); - p_v4->dst_port = sctp_spec->hdr.dst_port; - p_v4->src_port = sctp_spec->hdr.src_port; + if (*input_set & ICE_INSET_SCTP_SRC_PORT) + p_v4->src_port = + sctp_spec->hdr.src_port; + if (*input_set & ICE_INSET_SCTP_DST_PORT) + p_v4->dst_port = + sctp_spec->hdr.dst_port; } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { assert(p_v6); - p_v6->dst_port = sctp_spec->hdr.dst_port; - p_v6->src_port = sctp_spec->hdr.src_port; + if (*input_set & ICE_INSET_SCTP_SRC_PORT) + p_v6->src_port = + sctp_spec->hdr.src_port; + if (*input_set & ICE_INSET_SCTP_DST_PORT) + p_v6->dst_port = + sctp_spec->hdr.dst_port; } break; case RTE_FLOW_ITEM_TYPE_VOID: @@ -2329,10 +2369,10 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, return -rte_errno; } - if (vxlan_mask->hdr.vx_vni) + if (vxlan_mask->hdr.vx_vni) { *input_set |= ICE_INSET_VXLAN_VNI; - - filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni; + filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni; + } break; case RTE_FLOW_ITEM_TYPE_GTPU: @@ -2354,10 +2394,10 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, return -rte_errno; } - if (gtp_mask->teid == UINT32_MAX) + if (gtp_mask->teid == UINT32_MAX) { input_set_o |= ICE_INSET_GTPU_TEID; - - filter->input.gtpu_data.teid = gtp_spec->teid; + filter->input.gtpu_data.teid = gtp_spec->teid; + } break; case RTE_FLOW_ITEM_TYPE_GTP_PSC: tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; @@ -2367,11 +2407,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, if (!(gtp_psc_spec && gtp_psc_mask)) break; - if (gtp_psc_mask->hdr.qfi == 0x3F) + if (gtp_psc_mask->hdr.qfi == 0x3F) { input_set_o |= ICE_INSET_GTPU_QFI; - - filter->input.gtpu_data.qfi = - gtp_psc_spec->hdr.qfi; + filter->input.gtpu_data.qfi = + gtp_psc_spec->hdr.qfi; + } break; case RTE_FLOW_ITEM_TYPE_ESP: if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&