From patchwork Tue Dec 17 10:15:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Iremonger, Bernard" X-Patchwork-Id: 63961 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C148EA04FC; Tue, 17 Dec 2019 11:17:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 509D51BF8E; Tue, 17 Dec 2019 11:16:23 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id E4B001BF8C for ; Tue, 17 Dec 2019 11:16:21 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Dec 2019 02:16:21 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,325,1571727600"; d="scan'208";a="217467458" Received: from sivswdev08.ir.intel.com (HELO localhost.localdomain) ([10.237.217.47]) by orsmga006.jf.intel.com with ESMTP; 17 Dec 2019 02:16:19 -0800 From: Bernard Iremonger To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, declan.doherty@intel.com Cc: konstantin.ananyev@intel.com, bernard.iremonger@intel.com Date: Tue, 17 Dec 2019 10:15:52 +0000 Message-Id: <1576577756-648-7-git-send-email-bernard.iremonger@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1575982632-23059-1-git-send-email-bernard.iremonger@intel.com> References: <1575982632-23059-1-git-send-email-bernard.iremonger@intel.com> Subject: [dpdk-dev] [PATCH v2 06/10] net/i40e: process ESP flows X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Process ESP flows on Flow Director and RSS. add eth/ipv4/esp and eth/ipv6/esp patterns add eth/ipv4/udp/esp and eth/ipv6/esp/udp patterns update i40e_flow_parse_fdir_filter() add fill_ip6_head() add oip_type in filter add is_udp in filter use tenant_id in filter for spi handle ESP and AH pctypes in ESP-AH profile update customized code for ESP Signed-off-by: Bernard Iremonger --- drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++- drivers/net/i40e/i40e_ethdev.h | 25 ++++++++ drivers/net/i40e/i40e_fdir.c | 142 ++++++++++++++++++++++++++++++++++++++--- drivers/net/i40e/i40e_flow.c | 119 +++++++++++++++++++++++++++++++++- 4 files changed, 317 insertions(+), 13 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5f1cf8a..a462eba 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1106,6 +1106,7 @@ i40e_init_customized_info(struct i40e_pf *pf) } pf->gtp_support = false; + pf->esp_support = false; } void @@ -12337,6 +12338,7 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, } } name[strlen(name) - 1] = '\0'; + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strcmp(name, "GTPC")) new_pctype = i40e_find_customized_pctype(pf, @@ -12353,6 +12355,30 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, new_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); + else if (!strcmp(name, "IPV4_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (!strcmp(name, "IPV6_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + else if (!strcmp(name, "IPV4_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (!strcmp(name, "IPV6_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + else if (!strcmp(name, "IPV4_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV4); + else if (!strcmp(name, "IPV6_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV6); if (new_pctype) { if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { new_pctype->pctype = pctype_value; @@ -12448,6 +12474,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, continue; memset(name, 0, sizeof(name)); strcpy(name, proto[n].name); + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strncasecmp(name, "PPPOE", 5)) ptype_mapping[i].sw_ptype |= RTE_PTYPE_L2_ETHER_PPPOE; @@ -12541,6 +12568,10 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GTPU; in_tunnel = true; + } else if (!strncasecmp(name, "ESP", 3)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_ESP; + in_tunnel = true; } else if (!strncasecmp(name, "GRENAT", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; @@ -12560,7 +12591,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, ptype_num, 0); if (ret) - PMD_DRV_LOG(ERR, "Failed to update mapping table."); + PMD_DRV_LOG(ERR, "Failed to update ptype mapping table."); rte_free(ptype_mapping); rte_free(ptype); @@ -12625,6 +12656,17 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, } } + /* Check if ESP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "ESP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->esp_support = true; + else + pf->esp_support = false; + break; + } + } + /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, proto_num, proto, op); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 295ad59..3566056 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -501,6 +501,18 @@ struct i40e_gtp_ipv6_flow { struct rte_eth_ipv6_flow ip6; }; +/* A structure used to define the input for ESP IPV4 flow */ +struct i40e_esp_ipv4_flow { + struct rte_eth_udpv4_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for ESP IPV6 flow */ +struct i40e_esp_ipv6_flow { + struct rte_eth_udpv6_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + /* A structure used to define the input for raw type flow */ struct i40e_raw_flow { uint16_t pctype; @@ -526,6 +538,8 @@ union i40e_fdir_flow { struct i40e_gtp_ipv4_flow gtp_ipv4_flow; struct i40e_gtp_ipv6_flow gtp_ipv6_flow; struct i40e_raw_flow raw_flow; + struct i40e_esp_ipv4_flow esp_ipv4_flow; + struct i40e_esp_ipv6_flow esp_ipv6_flow; }; enum i40e_fdir_ip_type { @@ -542,8 +556,10 @@ struct i40e_fdir_flow_ext { uint16_t dst_id; /* VF ID, available when is_vf is 1*/ bool inner_ip; /* If there is inner ip */ enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */ + enum i40e_fdir_ip_type oip_type; /* ip type for outer ip */ bool customized_pctype; /* If customized pctype is used */ bool pkt_template; /* If raw packet template is used */ + bool is_udp; /* ipv4|ipv6 udp flow */ }; /* A structure used to define the input for a flow director filter entry */ @@ -769,6 +785,8 @@ enum i40e_tunnel_type { I40E_TUNNEL_TYPE_QINQ, I40E_TUNNEL_TYPE_GTPC, I40E_TUNNEL_TYPE_GTPU, + I40E_TUNNEL_TYPE_ESPoUDP, + I40E_TUNNEL_TYPE_ESPoIP, I40E_TUNNEL_TYPE_MAX, }; @@ -897,6 +915,12 @@ enum i40e_new_pctype { I40E_CUSTOMIZED_GTPU_IPV4, I40E_CUSTOMIZED_GTPU_IPV6, I40E_CUSTOMIZED_GTPU, + I40E_CUSTOMIZED_ESP_IPV4, + I40E_CUSTOMIZED_ESP_IPV6, + I40E_CUSTOMIZED_ESP_IPV4_UDP, + I40E_CUSTOMIZED_ESP_IPV6_UDP, + I40E_CUSTOMIZED_AH_IPV4, + I40E_CUSTOMIZED_AH_IPV6, I40E_CUSTOMIZED_MAX, }; @@ -1001,6 +1025,7 @@ struct i40e_pf { /* Dynamic Device Personalization */ bool gtp_support; /* 1 - support GTP-C and GTP-U */ + bool esp_support; /* 1 - support ESP SPI */ /* customer customized pctype */ struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; /* Switch Domain Id */ diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index dee007d..410e5e7 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -971,6 +971,37 @@ i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype) } static inline int +fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt, + uint8_t next_proto, uint8_t len, uint16_t *ether_type) +{ + struct rte_ipv6_hdr *ip6; + + ip6 = (struct rte_ipv6_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET)); + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->proto = fdir_input->flow.ipv6_flow.proto ? + fdir_input->flow.ipv6_flow.proto : next_proto; + ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ? + fdir_input->flow.ipv6_flow.hop_limits : + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip, + IPV6_ADDR_LEN); + rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip, + IPV6_ADDR_LEN); + len += sizeof(struct rte_ipv6_hdr); + + return len; +} + +static inline int i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt, @@ -1045,16 +1076,29 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, ip->src_addr = fdir_input->flow.ip4_flow.dst_ip; ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; - if (!is_customized_pctype) + if (!is_customized_pctype) { ip->next_proto_id = fdir_input->flow.ip4_flow.proto ? fdir_input->flow.ip4_flow.proto : next_proto[fdir_input->pctype]; - else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || + len += sizeof(struct rte_ipv4_hdr); + } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 || cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 || - cus_pctype->index == I40E_CUSTOMIZED_GTPU) + cus_pctype->index == I40E_CUSTOMIZED_GTPU) { ip->next_proto_id = IPPROTO_UDP; - len += sizeof(struct rte_ipv4_hdr); + len += sizeof(struct rte_ipv4_hdr); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) { + ip->next_proto_id = IPPROTO_ESP; + len += sizeof(struct rte_ipv4_hdr); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) { + ip->next_proto_id = IPPROTO_UDP; + len += sizeof(struct rte_ipv4_hdr); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6) + len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP, + len, ether_type); + else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) + len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP, + len, ether_type); } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || @@ -1088,8 +1132,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, IPV6_ADDR_LEN); len += sizeof(struct rte_ipv6_hdr); } else { - PMD_DRV_LOG(ERR, "unknown pctype %u.", - fdir_input->pctype); + PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype); return -1; } @@ -1115,6 +1158,10 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, struct rte_flow_item_gtp *gtp; struct rte_ipv4_hdr *gtp_ipv4; struct rte_ipv6_hdr *gtp_ipv6; + struct rte_flow_item_esp *esp; + struct rte_ipv4_hdr *esp_ipv4; + struct rte_ipv6_hdr *esp_ipv6; + uint8_t size, dst = 0; uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ int len; @@ -1285,10 +1332,87 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, } else payload = (unsigned char *)gtp + sizeof(struct rte_flow_item_gtp); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) { + if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) { + esp_ipv4 = (struct rte_ipv4_hdr *) + (raw_pkt + len); + esp = (struct rte_flow_item_esp *)esp_ipv4; + esp->hdr.spi = + fdir_input->flow.esp_ipv4_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV4_UDP) { + esp_ipv4 = (struct rte_ipv4_hdr *) + (raw_pkt + len); + udp = (struct rte_udp_hdr *)esp_ipv4; + /** + * The source and destination fields in + * the transmitted packet need to be + * presented in a reversed order with + * respect to the expected received + * packets. + */ + udp->src_port = + fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = + fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16 + (I40E_FDIR_UDP_DEFAULT_LEN); + esp = (struct rte_flow_item_esp *) + ((unsigned char *)esp_ipv4 + + sizeof(struct rte_udp_hdr)); + esp->hdr.spi = + fdir_input->flow.esp_ipv4_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_udp_hdr) + + sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV6) { + esp_ipv6 = (struct rte_ipv6_hdr *) + (raw_pkt + len); + esp = (struct rte_flow_item_esp *)esp_ipv6; + esp->hdr.spi = + fdir_input->flow.esp_ipv6_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV6_UDP) { + esp_ipv6 = (struct rte_ipv6_hdr *) + (raw_pkt + len); + udp = (struct rte_udp_hdr *)esp_ipv6; + /** + * The source and destination fields in + * the transmitted packet need to be + * presented in a reversed order with + * respect to the expected received + * packets. + */ + udp->src_port = + fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = + fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16 + (I40E_FDIR_UDP_DEFAULT_LEN); + esp = (struct rte_flow_item_esp *) + ((unsigned char *)esp_ipv6 + + sizeof(struct rte_udp_hdr)); + esp->hdr.spi = + fdir_input->flow.esp_ipv6_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_udp_hdr) + + sizeof(struct rte_esp_hdr); + } } } else { - PMD_DRV_LOG(ERR, "unknown pctype %u.", - fdir_input->pctype); + PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype); return -1; } @@ -1305,7 +1429,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, &fdir_input->flow_ext.flexbytes[dst], size * sizeof(uint16_t)); } - + rte_hexdump(stdout, NULL, raw_pkt, len); return 0; } diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 6102103..d182080 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -110,8 +110,7 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); -static int -i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); +static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -1615,6 +1614,36 @@ static enum rte_flow_item_type pattern_qinq_1[] = { RTE_FLOW_ITEM_TYPE_END, }; +static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + static struct i40e_valid_pattern i40e_supported_patterns[] = { /* Ethertype */ { pattern_ethertype, i40e_flow_parse_ethertype_filter }, @@ -1628,6 +1657,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter }, @@ -1636,6 +1667,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter }, /* FDIR - support default flow type with flexible payload */ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter }, { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter }, @@ -2420,6 +2453,28 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, cus_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU_IPV6); break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!filter->input.flow_ext.is_udp) { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + } else { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + filter->input.flow_ext.is_udp = false; + } + break; default: PMD_DRV_LOG(ERR, "Unsupported item type"); break; @@ -2459,6 +2514,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; const struct rte_flow_item_raw *raw_spec, *raw_mask; const struct rte_flow_item_vf *vf_spec; @@ -2654,10 +2710,18 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, ipv4_spec->hdr.src_addr; filter->input.flow.ip4_flow.dst_ip = ipv4_spec->hdr.dst_addr; + + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; } else if (!ipv4_spec && !ipv4_mask && !outer_ip) { filter->input.flow_ext.inner_ip = true; filter->input.flow_ext.iip_type = I40E_FDIR_IPTYPE_IPV4; + } else if (!ipv4_spec && !ipv4_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; } else if ((ipv4_spec || ipv4_mask) && !outer_ip) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2716,6 +2780,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, filter->input.flow.ipv6_flow.hop_limits = ipv6_spec->hdr.hop_limits; + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + rte_memcpy(filter->input.flow.ipv6_flow.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(filter->input.flow.ipv6_flow.dst_ip, @@ -2729,6 +2797,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, filter->input.flow_ext.inner_ip = true; filter->input.flow_ext.iip_type = I40E_FDIR_IPTYPE_IPV6; + } else if (!ipv6_spec && !ipv6_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; } else if ((ipv6_spec || ipv6_mask) && !outer_ip) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2828,7 +2900,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, udp_spec->hdr.dst_port; } } - + filter->input.flow_ext.is_udp = true; layer_idx = I40E_FLXPLD_L4_IDX; break; @@ -2863,6 +2935,47 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, cus_proto = item_type; } break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!pf->esp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ESP protocol"); + return -rte_errno; + } + + esp_spec = item->spec; + esp_mask = item->mask; + + if (!esp_spec || !esp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP item"); + return -rte_errno; + } + + if (esp_spec && esp_mask) { + if (esp_mask->hdr.spi != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP mask"); + return -rte_errno; + } + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + filter->input.flow.esp_ipv4_flow.spi = + esp_spec->hdr.spi; + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + filter->input.flow.esp_ipv6_flow.spi = + esp_spec->hdr.spi; + + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + } + break; case RTE_FLOW_ITEM_TYPE_SCTP: sctp_spec = item->spec; sctp_mask = item->mask;