[v2,18/20] net/mlx5: add flow GRE item

Message ID 6190b2d787d3a9d2095e2eed17db1c20b2e3c4e1.1530111623.git.nelio.laranjeiro@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Shahaf Shuler
Headers
Series net/mlx5: flow rework |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Nélio Laranjeiro June 27, 2018, 3:07 p.m. UTC
  Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 191 ++++++++++++++++++++++++++++++++++-
 1 file changed, 186 insertions(+), 5 deletions(-)
  

Comments

Yongseok Koh July 6, 2018, 11:46 p.m. UTC | #1
On Wed, Jun 27, 2018 at 05:07:50PM +0200, Nelio Laranjeiro wrote:
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
>  drivers/net/mlx5/mlx5_flow.c | 191 ++++++++++++++++++++++++++++++++++-
>  1 file changed, 186 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 47c55b426..636aaabe8 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -54,6 +54,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
>  /* Pattern tunnel Layer bits. */
>  #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
>  #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
> +#define MLX5_FLOW_LAYER_GRE (1u << 14)
>  
>  /* Outer Masks. */
>  #define MLX5_FLOW_LAYER_OUTER_L3 \
> @@ -66,7 +67,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
>  
>  /* Tunnel masks. */
>  #define MLX5_FLOW_LAYER_TUNNEL \
> -	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)
> +	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
> +	 MLX5_FLOW_LAYER_GRE)
>  
>  /* Inner Masks. */
>  #define MLX5_FLOW_LAYER_INNER_L3 \
> @@ -215,6 +217,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
>  	},
>  };
>  
> +/* Tunnel protocol values. */
> +#define MLX5_PROTOCOL_GRE 47

How about using IPPROTO_GRE instead?

> +
>  /** Handles information leading to a drop fate. */
>  struct mlx5_flow_verbs {
>  	LIST_ENTRY(mlx5_flow_verbs) next;
> @@ -1005,12 +1010,23 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
>  						  item,
>  						  "L3 cannot follow an L4"
>  						  " layer");
> +		/*
> +		 * IPv6 is not recognised by the NIC inside a GRE tunnel.
> +		 * Such support has to be disabled as the rule will be
> +		 * accepted.  Tested with Mellanox OFED 4.3-3.0.2.1
> +		 */

This comment doesn't look appropriate. Do you think it is a bug of OFED/FW,
which can be fixed? Or, is it a HW erratum? Let's talk offline.

> +		if (tunnel && layers & MLX5_FLOW_LAYER_GRE)
> +			return rte_flow_error_set(error, ENOTSUP,
> +						  RTE_FLOW_ERROR_TYPE_ITEM,
> +						  item,
> +						  "IPv6 inside a GRE tunnel is"
> +						  " not recognised.");
>  		if (!mask)
>  			mask = &rte_flow_item_ipv6_mask;
> -		ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
> -					      (const uint8_t *)&nic_mask,
> -					      sizeof(struct rte_flow_item_ipv6),
> -					      error);
> +		ret = mlx5_flow_item_validate
> +			(item, (const uint8_t *)mask,
> +			 (const uint8_t *)&nic_mask,
> +			 sizeof(struct rte_flow_item_ipv6), error);
>  		if (ret < 0)
>  			return ret;
>  	}
> @@ -1411,6 +1427,168 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
>  	return size;
>  }
>  
> +/**
> + * Update the protocol in Verbs IPv4 spec.
> + *
> + * @param attr[in, out]
> + *   Pointer to Verbs attributes structure.
> + * @param protocol[in]
> + *   Protocol value to set if none is present in the specification.
> + */
> +static void
> +mlx5_flow_item_gre_ipv4_protocol_update(struct ibv_flow_attr *attr,
> +					uint8_t protocol)
> +{
> +	unsigned int i;
> +	const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV4_EXT;
> +	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
> +		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
> +
> +	if (!attr)
> +		return;
> +	for (i = 0; i != attr->num_of_specs; ++i) {
> +		if (hdr->type == search) {
> +			struct ibv_flow_spec_ipv4_ext *ip =
> +				(struct ibv_flow_spec_ipv4_ext *)hdr;
> +
> +			if (!ip->val.proto) {
> +				ip->val.proto = protocol;
> +				ip->mask.proto = 0xff;
> +			}
> +			break;
> +		}
> +		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
> +	}
> +}
> +
> +/**
> + * Update the protocol in Verbs IPv6 spec.
> + *
> + * @param attr[in, out]
> + *   Pointer to Verbs attributes structure.
> + * @param protocol[in]
> + *   Protocol value to set if none is present in the specification.
> + */
> +static void
> +mlx5_flow_item_gre_ipv6_protocol_update(struct ibv_flow_attr *attr,
> +					uint8_t protocol)

How about consolidating the two funcs -
mlx5_flow_item_gre_ipv6_protocol_update() and
mlx5_flow_item_gre_ipv4_protocol_update()? There are many things in common.

> +{
> +	unsigned int i;
> +	const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV6;
> +	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
> +		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
> +
> +	if (!attr)
> +		return;
> +	for (i = 0; i != attr->num_of_specs; ++i) {
> +		if (hdr->type == search) {
> +			struct ibv_flow_spec_ipv6 *ip =
> +				(struct ibv_flow_spec_ipv6 *)hdr;
> +
> +			if (!ip->val.next_hdr) {

What if protocol in IP header does have wrong value other than 47 (IPPROTO_GRE)?
Shouldn't we have a validation check for it in mlx5_flow_item_gre()?

> +				ip->val.next_hdr = protocol;
> +				ip->mask.next_hdr = 0xff;
> +			}
> +			break;
> +		}
> +		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
> +	}
> +}
> +
> +/**
> + * Validate GRE layer and possibly create the Verbs specification.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + * @param item[in]
> + *   Item specification.
> + * @param flow[in, out]
> + *   Pointer to flow structure.
> + * @param flow_size[in]
> + *   Size in bytes of the available space for to store the flow information.
> + * @param error
> + *   Pointer to error structure.
> + *
> + * @return
> + *   size in bytes necessary for the conversion, a negative errno value
> + *   otherwise and rte_errno is set.
> + */
> +static int
> +mlx5_flow_item_gre(const struct rte_flow_item *item,
> +		   struct rte_flow *flow, const size_t flow_size,
> +		   struct rte_flow_error *error)
> +{
> +	struct mlx5_flow_verbs *verbs = flow->cur_verbs;
> +	const struct rte_flow_item_gre *spec = item->spec;
> +	const struct rte_flow_item_gre *mask = item->mask;
> +	const uint32_t layers = mlx5_flow_layers(flow);
> +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> +	unsigned int size = sizeof(struct ibv_flow_spec_gre);
> +	struct ibv_flow_spec_gre tunnel = {
> +		.type = IBV_FLOW_SPEC_GRE,
> +		.size = size,
> +	};
> +#else
> +	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
> +	struct ibv_flow_spec_tunnel tunnel = {
> +		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
> +		.size = size,
> +	};
> +#endif
> +	int ret;
> +
> +	if (layers & MLX5_FLOW_LAYER_TUNNEL)
> +		return rte_flow_error_set(error, ENOTSUP,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> +					  item,
> +					  "a tunnel is already present");
> +	if (!(layers & MLX5_FLOW_LAYER_OUTER_L3))
> +		return rte_flow_error_set(error, ENOTSUP,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> +					  item,
> +					  "L3 Layer is missing");
> +	if (!mask)
> +		mask = &rte_flow_item_gre_mask;
> +	ret = mlx5_flow_item_validate
> +		(item, (const uint8_t *)mask,
> +		 (const uint8_t *)&rte_flow_item_gre_mask,
> +		 sizeof(struct rte_flow_item_gre), error);
> +	if (ret < 0)
> +		return ret;
> +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> +	if (spec) {
> +		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
> +		tunnel.val.protocol = spec->protocol;
> +		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
> +		tunnel.mask.protocol = mask->protocol;
> +		/* Remove unwanted bits from values. */
> +		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
> +		tunnel.val.protocol &= tunnel.mask.protocol;
> +		tunnel.val.key &= tunnel.mask.key;
> +	}
> +#else
> +	if (spec && (spec->protocol & mask->protocol))
> +		return rte_flow_error_set(error, ENOTSUP,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> +					  item,
> +					  "without MPLS support the"
> +					  " specification cannot be used for"
> +					  " filtering");
> +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
> +	if (size <= flow_size) {
> +		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
> +			mlx5_flow_item_gre_ipv4_protocol_update
> +				(verbs->attr, MLX5_PROTOCOL_GRE);
> +		else
> +			mlx5_flow_item_gre_ipv6_protocol_update
> +				(verbs->attr, MLX5_PROTOCOL_GRE);
> +		mlx5_flow_spec_verbs_add(flow, &tunnel, size);
> +	}
> +	mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_GRE);
> +	flow->ptype = RTE_PTYPE_TUNNEL_GRE;
> +	return size;
> +}
> +
>  /**
>   * Validate items provided by the user.
>   *
> @@ -1469,6 +1647,9 @@ mlx5_flow_items(struct rte_eth_dev *dev,
>  			ret = mlx5_flow_item_vxlan_gpe(dev, items, flow,
>  						       remain, error);
>  			break;
> +		case RTE_FLOW_ITEM_TYPE_GRE:
> +			ret = mlx5_flow_item_gre(items, flow, remain, error);
> +			break;
>  		default:
>  			return rte_flow_error_set(error, ENOTSUP,
>  						  RTE_FLOW_ERROR_TYPE_ITEM,
> -- 
> 2.18.0
>
  
Nélio Laranjeiro July 9, 2018, 1:58 p.m. UTC | #2
Hi Yongseok,

Only discussing here question, other comments are address, as I don't
have any objection I'll make the modification for them.

On Fri, Jul 06, 2018 at 04:46:11PM -0700, Yongseok Koh wrote:
>[...] 
> > +
> >  /** Handles information leading to a drop fate. */
> >  struct mlx5_flow_verbs {
> >  	LIST_ENTRY(mlx5_flow_verbs) next;
> > @@ -1005,12 +1010,23 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
> >  						  item,
> >  						  "L3 cannot follow an L4"
> >  						  " layer");
> > +		/*
> > +		 * IPv6 is not recognised by the NIC inside a GRE tunnel.
> > +		 * Such support has to be disabled as the rule will be
> > +		 * accepted.  Tested with Mellanox OFED 4.3-3.0.2.1
> > +		 */
> 
> This comment doesn't look appropriate. Do you think it is a bug of OFED/FW,
> which can be fixed? Or, is it a HW erratum? Let's talk offline.

By the time it was as this Mellanox OFED was the latest GA 4.3-3.0.2.1,
this is no more the case today as it cannot be downloaded anymore. A
verification is still necessary.  If the issue is not present anymore
I'll remove the comment with the test.

>[...] 
> > +{
> > +	unsigned int i;
> > +	const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV6;
> > +	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
> > +		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
> > +
> > +	if (!attr)
> > +		return;
> > +	for (i = 0; i != attr->num_of_specs; ++i) {
> > +		if (hdr->type == search) {
> > +			struct ibv_flow_spec_ipv6 *ip =
> > +				(struct ibv_flow_spec_ipv6 *)hdr;
> > +
> > +			if (!ip->val.next_hdr) {
> 
> What if protocol in IP header does have wrong value other than 47 (IPPROTO_GRE)?
> Shouldn't we have a validation check for it in mlx5_flow_item_gre()?
>[...]

Already added, the same issue occurs also with UDP/TCP.  If the user
uses some protocol it must match the following layer, otherwise its
request won't be respected which is a bug.

Thanks,
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 47c55b426..636aaabe8 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -54,6 +54,7 @@  extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 /* Pattern tunnel Layer bits. */
 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
 
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
@@ -66,7 +67,8 @@  extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
 /* Tunnel masks. */
 #define MLX5_FLOW_LAYER_TUNNEL \
-	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)
+	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+	 MLX5_FLOW_LAYER_GRE)
 
 /* Inner Masks. */
 #define MLX5_FLOW_LAYER_INNER_L3 \
@@ -215,6 +217,9 @@  static const struct rte_flow_expand_node mlx5_support_expansion[] = {
 	},
 };
 
+/* Tunnel protocol values. */
+#define MLX5_PROTOCOL_GRE 47
+
 /** Handles information leading to a drop fate. */
 struct mlx5_flow_verbs {
 	LIST_ENTRY(mlx5_flow_verbs) next;
@@ -1005,12 +1010,23 @@  mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
 						  item,
 						  "L3 cannot follow an L4"
 						  " layer");
+		/*
+		 * IPv6 is not recognised by the NIC inside a GRE tunnel.
+		 * Such support has to be disabled as the rule will be
+		 * accepted.  Tested with Mellanox OFED 4.3-3.0.2.1
+		 */
+		if (tunnel && layers & MLX5_FLOW_LAYER_GRE)
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "IPv6 inside a GRE tunnel is"
+						  " not recognised.");
 		if (!mask)
 			mask = &rte_flow_item_ipv6_mask;
-		ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
-					      (const uint8_t *)&nic_mask,
-					      sizeof(struct rte_flow_item_ipv6),
-					      error);
+		ret = mlx5_flow_item_validate
+			(item, (const uint8_t *)mask,
+			 (const uint8_t *)&nic_mask,
+			 sizeof(struct rte_flow_item_ipv6), error);
 		if (ret < 0)
 			return ret;
 	}
@@ -1411,6 +1427,168 @@  mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
 	return size;
 }
 
+/**
+ * Update the protocol in Verbs IPv4 spec.
+ *
+ * @param attr[in, out]
+ *   Pointer to Verbs attributes structure.
+ * @param protocol[in]
+ *   Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ipv4_protocol_update(struct ibv_flow_attr *attr,
+					uint8_t protocol)
+{
+	unsigned int i;
+	const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV4_EXT;
+	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+	if (!attr)
+		return;
+	for (i = 0; i != attr->num_of_specs; ++i) {
+		if (hdr->type == search) {
+			struct ibv_flow_spec_ipv4_ext *ip =
+				(struct ibv_flow_spec_ipv4_ext *)hdr;
+
+			if (!ip->val.proto) {
+				ip->val.proto = protocol;
+				ip->mask.proto = 0xff;
+			}
+			break;
+		}
+		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+	}
+}
+
+/**
+ * Update the protocol in Verbs IPv6 spec.
+ *
+ * @param attr[in, out]
+ *   Pointer to Verbs attributes structure.
+ * @param protocol[in]
+ *   Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ipv6_protocol_update(struct ibv_flow_attr *attr,
+					uint8_t protocol)
+{
+	unsigned int i;
+	const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV6;
+	struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+		((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+	if (!attr)
+		return;
+	for (i = 0; i != attr->num_of_specs; ++i) {
+		if (hdr->type == search) {
+			struct ibv_flow_spec_ipv6 *ip =
+				(struct ibv_flow_spec_ipv6 *)hdr;
+
+			if (!ip->val.next_hdr) {
+				ip->val.next_hdr = protocol;
+				ip->mask.next_hdr = 0xff;
+			}
+			break;
+		}
+		hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+	}
+}
+
+/**
+ * Validate GRE layer and possibly create the Verbs specification.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param item[in]
+ *   Item specification.
+ * @param flow[in, out]
+ *   Pointer to flow structure.
+ * @param flow_size[in]
+ *   Size in bytes of the available space for to store the flow information.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   size in bytes necessary for the conversion, a negative errno value
+ *   otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+		   struct rte_flow *flow, const size_t flow_size,
+		   struct rte_flow_error *error)
+{
+	struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+	const struct rte_flow_item_gre *spec = item->spec;
+	const struct rte_flow_item_gre *mask = item->mask;
+	const uint32_t layers = mlx5_flow_layers(flow);
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+	unsigned int size = sizeof(struct ibv_flow_spec_gre);
+	struct ibv_flow_spec_gre tunnel = {
+		.type = IBV_FLOW_SPEC_GRE,
+		.size = size,
+	};
+#else
+	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+	struct ibv_flow_spec_tunnel tunnel = {
+		.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+		.size = size,
+	};
+#endif
+	int ret;
+
+	if (layers & MLX5_FLOW_LAYER_TUNNEL)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "a tunnel is already present");
+	if (!(layers & MLX5_FLOW_LAYER_OUTER_L3))
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "L3 Layer is missing");
+	if (!mask)
+		mask = &rte_flow_item_gre_mask;
+	ret = mlx5_flow_item_validate
+		(item, (const uint8_t *)mask,
+		 (const uint8_t *)&rte_flow_item_gre_mask,
+		 sizeof(struct rte_flow_item_gre), error);
+	if (ret < 0)
+		return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+	if (spec) {
+		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+		tunnel.val.protocol = spec->protocol;
+		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+		tunnel.mask.protocol = mask->protocol;
+		/* Remove unwanted bits from values. */
+		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+		tunnel.val.protocol &= tunnel.mask.protocol;
+		tunnel.val.key &= tunnel.mask.key;
+	}
+#else
+	if (spec && (spec->protocol & mask->protocol))
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "without MPLS support the"
+					  " specification cannot be used for"
+					  " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+	if (size <= flow_size) {
+		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+			mlx5_flow_item_gre_ipv4_protocol_update
+				(verbs->attr, MLX5_PROTOCOL_GRE);
+		else
+			mlx5_flow_item_gre_ipv6_protocol_update
+				(verbs->attr, MLX5_PROTOCOL_GRE);
+		mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+	}
+	mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_GRE);
+	flow->ptype = RTE_PTYPE_TUNNEL_GRE;
+	return size;
+}
+
 /**
  * Validate items provided by the user.
  *
@@ -1469,6 +1647,9 @@  mlx5_flow_items(struct rte_eth_dev *dev,
 			ret = mlx5_flow_item_vxlan_gpe(dev, items, flow,
 						       remain, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_GRE:
+			ret = mlx5_flow_item_gre(items, flow, remain, error);
+			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,