[2/5] net/iavf: add support for FDIR GTPU

Message ID 1584510121-377747-3-git-send-email-simei.su@intel.com (mailing list archive)
State Superseded, archived
Delegated to: xiaolong ye
Headers
Series net/iavf: support FDIR capabiltiy |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail apply issues

Commit Message

Simei Su March 18, 2020, 5:41 a.m. UTC
  This patch enables GTPU pattern for RTE_FLOW.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 67 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)
  

Comments

Qi Zhang March 19, 2020, 1:46 a.m. UTC | #1
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, March 18, 2020 1:42 PM
> To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> Subject: [PATCH 2/5] net/iavf: add support for FDIR GTPU
> 
> This patch enables GTPU pattern for RTE_FLOW.

The comment is misleading, the GTPU pattern for rte_flow is already enabled in other patch, 
this patch actually add GTPU flow filter support in FDIR.
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/iavf/iavf_fdir.c | 67
> ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 67 insertions(+)
> 
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index
> dd321ba..ad100c8 100644
> --- a/drivers/net/iavf/iavf_fdir.c
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -67,6 +67,14 @@
>  	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
>  	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> 
> +#define IAVF_FDIR_INSET_GTPU (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_GTPU_TEID)
> +
> +#define IAVF_FDIR_INSET_GTPU_EH (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
> +
>  static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
>  	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,
> 	IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,
> 	IAVF_INSET_NONE},
> @@ -77,6 +85,8 @@
>  	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,
> 		IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,
> 	IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv6_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,
> 	IAVF_INSET_NONE},
>  };
> 
>  static struct iavf_flow_parser iavf_fdir_parser; @@ -360,6 +370,8 @@
>  	const struct rte_flow_item_udp *udp_spec, *udp_mask;
>  	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>  	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
> +	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
>  	uint64_t input_set = IAVF_INSET_NONE;
> 
>  	enum rte_flow_item_type next_type;
> @@ -686,6 +698,61 @@
>  			filter->input.rule_cfg.proto_stack.count = ++layer;
>  			break;
> 
> +		case RTE_FLOW_ITEM_TYPE_GTPU:
> +			gtp_spec = item->spec;
> +			gtp_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
> +
> +			if (gtp_spec && gtp_mask) {
> +				if (gtp_mask->v_pt_rsv_flags ||
> +					gtp_mask->msg_type ||
> +					gtp_mask->msg_len) {
> +					rte_flow_error_set(error, EINVAL,
> +						RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid GTP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (gtp_mask->teid == UINT32_MAX) {
> +					input_set |= IAVF_INSET_GTPU_TEID;
> +					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, GTPU_IP, TEID);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					gtp_spec, sizeof(*gtp_spec));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
> +			gtp_psc_spec = item->spec;
> +			gtp_psc_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
> +
> +			if (gtp_psc_spec && gtp_psc_mask) {
> +				if (gtp_psc_mask->qfi == UINT8_MAX) {
> +					input_set |= IAVF_INSET_GTPU_QFI;
> +					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, GTPU_EH, QFI);
> +				}
> +
> +				rte_memcpy(hdr->buffer, gtp_psc_spec,
> +					sizeof(*gtp_psc_spec));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
>  		case RTE_FLOW_ITEM_TYPE_VOID:
>  			break;
> 
> --
> 1.8.3.1
  

Patch

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index dd321ba..ad100c8 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@ 
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@ 
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -360,6 +370,8 @@ 
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -686,6 +698,61 @@ 
 			filter->input.rule_cfg.proto_stack.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;