From patchwork Tue Dec 12 02:34:06 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zhichao Zeng X-Patchwork-Id: 135047 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AF214436CA; Tue, 12 Dec 2023 03:24:49 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 46D2040A73; Tue, 12 Dec 2023 03:24:49 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 211BA402B9 for ; Tue, 12 Dec 2023 03:24:46 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1702347887; x=1733883887; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=2zjgVsnkr+4G6oBovqKr0F9zFqPp3Sh6Q+wnIBtH3oI=; b=Bcn7rpLy/YRvoWGXuEYRWZdOuc9irwQuuzJfI6URmxF7/aHxadY6HZyY LRKRPA22jD/FHgo0Yec9ftJGuL7k/RUPaO43gdt3+F4jAoRBG5VuBKsqZ QtRJlEjowmanfC6DGPZm639sRMBGwSHl9vAxmycH+VCIy0843WOeeFmOB tEVt8HuofR73F8EDIR9gCFK+t6wNjfpB5Zvi5gMQ5hBNGiaUgghvAAvLB VlRhsrvZBBgVMFu6s+MSInsFg3E8/ZpCQ1azXYK21Az1Xs2SFXzTYa71B tpewOqsEj80O4DgSxtwjeWvdLOp1scGODCI8gR1T8symxvJPOjyDmUxcK g==; X-IronPort-AV: E=McAfee;i="6600,9927,10921"; a="461218120" X-IronPort-AV: E=Sophos;i="6.04,269,1695711600"; d="scan'208";a="461218120" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Dec 2023 18:24:45 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10921"; a="1104720120" X-IronPort-AV: E=Sophos;i="6.04,269,1695711600"; d="scan'208";a="1104720120" Received: from unknown (HELO zhichao-dpdk..) ([10.239.252.103]) by fmsmga005-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Dec 2023 18:24:43 -0800 From: Zhichao Zeng To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Zhichao Zeng , Jingjing Wu , Beilei Xing Subject: [PATCH] net/iavf: support rte flow with mask for FDIR Date: Tue, 12 Dec 2023 10:34:06 +0800 Message-Id: <20231212023406.820293-1-zhichaox.zeng@intel.com> X-Mailer: git-send-email 2.34.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch supports rte flow with mask for FDIR, including eth/ipv4/ipv6/tcp/udp flow items. Signed-off-by: Zhichao Zeng --- drivers/net/iavf/iavf_fdir.c | 419 ++++++++++++++++++++--------------- 1 file changed, 243 insertions(+), 176 deletions(-) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index 811a10287b..df5359892c 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -742,6 +742,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, const struct rte_flow_item_ppp *ppp_spec, *ppp_mask; const struct rte_flow_item *item = pattern; struct virtchnl_proto_hdr *hdr, *hdr1 = NULL; + struct virtchnl_proto_hdr_w_msk *hdr_w_msk, *hdr1_w_msk = NULL; struct rte_ecpri_common_hdr ecpri_common; uint64_t input_set = IAVF_INSET_NONE; enum rte_flow_item_type item_type; @@ -749,6 +750,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, uint8_t tun_inner = 0; uint16_t ether_type, flags_version; uint8_t item_num = 0; + int with_mask = 0; int layer = 0; uint8_t ipv6_addr_mask[16] = { @@ -838,8 +840,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, next_type = (item + 1)->type; hdr1 = &hdrs->proto_hdr[layer]; + hdr1_w_msk = &hdrs->proto_hdr_w_msk[layer]; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH); + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1_w_msk, ETH); if (next_type == RTE_FLOW_ITEM_TYPE_END && (!eth_spec || !eth_mask)) { @@ -850,43 +854,60 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, } if (eth_spec && eth_mask) { - if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) { - input_set |= IAVF_INSET_DMAC; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, - ETH, - DST); - } else if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) { - input_set |= IAVF_INSET_SMAC; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, - ETH, - SRC); - } - - if (eth_mask->hdr.ether_type) { - if (eth_mask->hdr.ether_type != RTE_BE16(0xffff)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Invalid type mask."); - return -rte_errno; + if ((!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr) && + !rte_is_broadcast_ether_addr(ð_mask->hdr.dst_addr)) || + (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr) && + !rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr))) { + if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) + input_set |= IAVF_INSET_DMAC; + if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) + input_set |= IAVF_INSET_SMAC; + if (eth_mask->hdr.ether_type) + input_set |= IAVF_INSET_ETHERTYPE; + rte_memcpy(hdr1_w_msk->buffer_spec, eth_spec, + sizeof(struct rte_ether_hdr)); + rte_memcpy(hdr1_w_msk->buffer_mask, eth_mask, + sizeof(struct rte_ether_hdr)); + with_mask = 1; + } else { + if (!rte_is_zero_ether_addr(ð_mask->hdr.dst_addr)) { + input_set |= IAVF_INSET_DMAC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, + ETH, + DST); + } else if (!rte_is_zero_ether_addr(ð_mask->hdr.src_addr)) { + input_set |= IAVF_INSET_SMAC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, + ETH, + SRC); } - ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type); - if (ether_type == RTE_ETHER_TYPE_IPV4 || - ether_type == RTE_ETHER_TYPE_IPV6) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Unsupported ether_type."); - return -rte_errno; + if (eth_mask->hdr.ether_type) { + if (eth_mask->hdr.ether_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid type mask."); + return -rte_errno; + } + + ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type); + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } + + input_set |= IAVF_INSET_ETHERTYPE; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH, + ETHERTYPE); } - input_set |= IAVF_INSET_ETHERTYPE; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH, - ETHERTYPE); + rte_memcpy(hdr1->buffer, eth_spec, + sizeof(struct rte_ether_hdr)); } - - rte_memcpy(hdr1->buffer, eth_spec, - sizeof(struct rte_ether_hdr)); } hdrs->count = ++layer; @@ -900,8 +921,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, next_type = (item + 1)->type; hdr = &hdrs->proto_hdr[layer]; + hdr_w_msk = &hdrs->proto_hdr_w_msk[layer]; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV4); if (!(ipv4_spec && ipv4_mask)) { hdrs->count = ++layer; @@ -932,79 +955,82 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, return -rte_errno; } - /* Mask for IPv4 src/dst addrs not supported */ - if (ipv4_mask->hdr.src_addr && - ipv4_mask->hdr.src_addr != UINT32_MAX) - return -rte_errno; - if (ipv4_mask->hdr.dst_addr && - ipv4_mask->hdr.dst_addr != UINT32_MAX) - return -rte_errno; + if ((ipv4_mask->hdr.src_addr && + ipv4_mask->hdr.src_addr != UINT32_MAX) || + (ipv4_mask->hdr.dst_addr && + ipv4_mask->hdr.dst_addr != UINT32_MAX)) { + if (ipv4_mask->hdr.src_addr) + input_set |= IAVF_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr) + input_set |= IAVF_INSET_IPV4_DST; + rte_memcpy(hdr_w_msk->buffer_spec, &ipv4_spec->hdr, + sizeof(ipv4_spec->hdr)); + rte_memcpy(hdr_w_msk->buffer_mask, &ipv4_mask->hdr, + sizeof(ipv4_mask->hdr)); + with_mask = 1; + } else { + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TOS; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, + DSCP); + } - if (ipv4_mask->hdr.type_of_service == - UINT8_MAX) { - input_set |= IAVF_INSET_IPV4_TOS; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, - DSCP); - } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_PROTO; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, + PROT); + } - if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { - input_set |= IAVF_INSET_IPV4_PROTO; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, - PROT); - } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TTL; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, + TTL); + } - if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { - input_set |= IAVF_INSET_IPV4_TTL; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, - TTL); - } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, + SRC); + } - if (ipv4_mask->hdr.src_addr == UINT32_MAX) { - input_set |= IAVF_INSET_IPV4_SRC; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, - SRC); - } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, + DST); + } + rte_memcpy(hdr->buffer, &ipv4_spec->hdr, + sizeof(ipv4_spec->hdr)); + /* fragment Ipv4: + * spec is 0x2000, mask is 0x2000 + */ + if (ipv4_spec->hdr.fragment_offset == + rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) && + ipv4_mask->hdr.fragment_offset == + rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) { + /* all IPv4 fragment packet has the same + * ethertype, if the spec and mask is valid, + * set ethertype into input set. + */ + input_set |= IAVF_INSET_ETHERTYPE; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH, + ETHERTYPE); - if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { - input_set |= IAVF_INSET_IPV4_DST; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, - DST); + /* add dummy header for IPv4 Fragment */ + iavf_fdir_add_fragment_hdr(hdrs, layer); + } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv4 mask."); + return -rte_errno; + } } if (tun_inner) { input_set &= ~IAVF_PROT_IPV4_OUTER; input_set |= IAVF_PROT_IPV4_INNER; } - - rte_memcpy(hdr->buffer, &ipv4_spec->hdr, - sizeof(ipv4_spec->hdr)); - hdrs->count = ++layer; - - /* fragment Ipv4: - * spec is 0x2000, mask is 0x2000 - */ - if (ipv4_spec->hdr.fragment_offset == - rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) && - ipv4_mask->hdr.fragment_offset == - rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) { - /* all IPv4 fragment packet has the same - * ethertype, if the spec and mask is valid, - * set ethertype into input set. - */ - input_set |= IAVF_INSET_ETHERTYPE; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH, - ETHERTYPE); - - /* add dummy header for IPv4 Fragment */ - iavf_fdir_add_fragment_hdr(hdrs, layer); - } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Invalid IPv4 mask."); - return -rte_errno; - } - break; case RTE_FLOW_ITEM_TYPE_IPV6: @@ -1013,8 +1039,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, ipv6_mask = item->mask; hdr = &hdrs->proto_hdr[layer]; - + hdr_w_msk = &hdrs->proto_hdr_w_msk[layer]; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV6); if (!(ipv6_spec && ipv6_mask)) { hdrs->count = ++layer; @@ -1028,47 +1055,70 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, return -rte_errno; } - if ((ipv6_mask->hdr.vtc_flow & - rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) - == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) { - input_set |= IAVF_INSET_IPV6_TC; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, - TC); - } + if (memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr)) || + memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) { + if (memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) + input_set |= IAVF_INSET_IPV6_SRC; + if (memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) + input_set |= IAVF_INSET_IPV6_DST; + if (ipv6_mask->hdr.proto) + input_set |= IAVF_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits) + input_set |= IAVF_INSET_IPV6_HOP_LIMIT; + if (ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) { + input_set |= IAVF_INSET_IPV6_TC; + } + rte_memcpy(hdr_w_msk->buffer_spec, &ipv6_spec->hdr, + sizeof(ipv6_spec->hdr)); + rte_memcpy(hdr_w_msk->buffer_mask, &ipv6_mask->hdr, + sizeof(ipv6_mask->hdr)); + with_mask = 1; + } else { + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) + == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) { + input_set |= IAVF_INSET_IPV6_TC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, + TC); + } - if (ipv6_mask->hdr.proto == UINT8_MAX) { - input_set |= IAVF_INSET_IPV6_NEXT_HDR; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, - PROT); - } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_NEXT_HDR; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, + PROT); + } - if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { - input_set |= IAVF_INSET_IPV6_HOP_LIMIT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, - HOP_LIMIT); - } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_HOP_LIMIT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, + HOP_LIMIT); + } - if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.src_addr))) { - input_set |= IAVF_INSET_IPV6_SRC; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, - SRC); - } - if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.dst_addr))) { - input_set |= IAVF_INSET_IPV6_DST; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, - DST); + if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) { + input_set |= IAVF_INSET_IPV6_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, + SRC); + } + if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) { + input_set |= IAVF_INSET_IPV6_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, + DST); + } + rte_memcpy(hdr->buffer, &ipv6_spec->hdr, + sizeof(ipv6_spec->hdr)); } if (tun_inner) { input_set &= ~IAVF_PROT_IPV6_OUTER; input_set |= IAVF_PROT_IPV6_INNER; } - - rte_memcpy(hdr->buffer, &ipv6_spec->hdr, - sizeof(ipv6_spec->hdr)); - hdrs->count = ++layer; break; @@ -1118,8 +1168,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, udp_mask = item->mask; hdr = &hdrs->proto_hdr[layer]; - + hdr_w_msk = &hdrs->proto_hdr_w_msk[layer]; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, UDP); if (udp_spec && udp_mask) { if (udp_mask->hdr.dgram_len || @@ -1131,35 +1182,42 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, } /* Mask for UDP src/dst ports not supported */ - if (udp_mask->hdr.src_port && - udp_mask->hdr.src_port != UINT16_MAX) - return -rte_errno; - if (udp_mask->hdr.dst_port && - udp_mask->hdr.dst_port != UINT16_MAX) - return -rte_errno; + if ((udp_mask->hdr.src_port && + udp_mask->hdr.src_port != UINT16_MAX) || + (udp_mask->hdr.dst_port && + udp_mask->hdr.dst_port != UINT16_MAX)) { + if (udp_mask->hdr.src_port) + input_set |= IAVF_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port) + input_set |= IAVF_INSET_UDP_DST_PORT; + rte_memcpy(hdr_w_msk->buffer_spec, &udp_spec->hdr, + sizeof(udp_spec->hdr)); + rte_memcpy(hdr_w_msk->buffer_mask, &udp_mask->hdr, + sizeof(udp_mask->hdr)); + with_mask = 1; + } else { + if (udp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } - if (udp_mask->hdr.src_port == UINT16_MAX) { - input_set |= IAVF_INSET_UDP_SRC_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); } - if (udp_mask->hdr.dst_port == UINT16_MAX) { - input_set |= IAVF_INSET_UDP_DST_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); - } - if (tun_inner) { input_set &= ~IAVF_PROT_UDP_OUTER; input_set |= IAVF_PROT_UDP_INNER; } - - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - rte_memcpy(hdr->buffer, - &udp_spec->hdr, - sizeof(udp_spec->hdr)); - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - rte_memcpy(hdr->buffer, - &udp_spec->hdr, - sizeof(udp_spec->hdr)); } hdrs->count = ++layer; @@ -1170,8 +1228,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, tcp_mask = item->mask; hdr = &hdrs->proto_hdr[layer]; - + hdr_w_msk = &hdrs->proto_hdr_w_msk[layer]; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, TCP); if (tcp_spec && tcp_mask) { if (tcp_mask->hdr.sent_seq || @@ -1187,36 +1246,41 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, return -rte_errno; } - /* Mask for TCP src/dst ports not supported */ - if (tcp_mask->hdr.src_port && - tcp_mask->hdr.src_port != UINT16_MAX) - return -rte_errno; - if (tcp_mask->hdr.dst_port && - tcp_mask->hdr.dst_port != UINT16_MAX) - return -rte_errno; - - if (tcp_mask->hdr.src_port == UINT16_MAX) { - input_set |= IAVF_INSET_TCP_SRC_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); - } - if (tcp_mask->hdr.dst_port == UINT16_MAX) { - input_set |= IAVF_INSET_TCP_DST_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + if ((tcp_mask->hdr.src_port && + tcp_mask->hdr.src_port != UINT16_MAX) || + (tcp_mask->hdr.dst_port && + tcp_mask->hdr.dst_port != UINT16_MAX)) { + if (tcp_mask->hdr.src_port) + input_set |= IAVF_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port) + input_set |= IAVF_INSET_TCP_DST_PORT; + rte_memcpy(hdr_w_msk->buffer_spec, &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + rte_memcpy(hdr_w_msk->buffer_mask, &tcp_mask->hdr, + sizeof(tcp_mask->hdr)); + with_mask = 1; + } else { + if (tcp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); } - if (tun_inner) { input_set &= ~IAVF_PROT_TCP_OUTER; input_set |= IAVF_PROT_TCP_INNER; } - - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - rte_memcpy(hdr->buffer, - &tcp_spec->hdr, - sizeof(tcp_spec->hdr)); - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - rte_memcpy(hdr->buffer, - &tcp_spec->hdr, - sizeof(tcp_spec->hdr)); } hdrs->count = ++layer; @@ -1556,6 +1620,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, } } + if (with_mask) + hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS; + if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,