[2/2] app/testpmd: add NVGRE encap/decap support

Message ID a5f0df2146a2ab4052688b887232401fba77342f.1528988347.git.nelio.laranjeiro@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series implement VXLAN/NVGRE Encap/Decap in testpmd |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Nélio Laranjeiro June 14, 2018, 3:09 p.m. UTC
  Due to the complex NVGRE_ENCAP flow action and based on the fact testpmd
does not allocate memory, this patch adds a new command in testpmd to
initialise a global structure containing the necessary information to
make the outer layer of the packet.  This same global structure will
then be used by the flow command line in testpmd when the action
nvgre_encap will be parsed, at this point, the conversion into such
action becomes trivial.

This global structure is only used for the encap action.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 app/test-pmd/cmdline.c                      |  79 +++++++++++++
 app/test-pmd/cmdline_flow.c                 | 119 ++++++++++++++++++++
 app/test-pmd/testpmd.c                      |   9 ++
 app/test-pmd/testpmd.h                      |  13 +++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  13 +++
 5 files changed, 233 insertions(+)
  

Comments

Iremonger, Bernard June 15, 2018, 9:32 a.m. UTC | #1
Hi Nelio,

> -----Original Message-----
> From: Nelio Laranjeiro [mailto:nelio.laranjeiro@6wind.com]
> Sent: Thursday, June 14, 2018 4:09 PM
> To: dev@dpdk.org; Adrien Mazarguil <adrien.mazarguil@6wind.com>; Lu,
> Wenzhuo <wenzhuo.lu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Iremonger, Bernard <bernard.iremonger@intel.com>
> Cc: Awal, Mohammad Abdul <mohammad.abdul.awal@intel.com>
> Subject: [PATCH 2/2] app/testpmd: add NVGRE encap/decap support
> 
> Due to the complex NVGRE_ENCAP flow action and based on the fact
> testpmd does not allocate memory, this patch adds a new command in
> testpmd to initialise a global structure containing the necessary information
> to make the outer layer of the packet.  This same global structure will then be
> used by the flow command line in testpmd when the action nvgre_encap will
> be parsed, at this point, the conversion into such action becomes trivial.
> 
> This global structure is only used for the encap action.
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
>  app/test-pmd/cmdline.c                      |  79 +++++++++++++
>  app/test-pmd/cmdline_flow.c                 | 119 ++++++++++++++++++++
>  app/test-pmd/testpmd.c                      |   9 ++
>  app/test-pmd/testpmd.h                      |  13 +++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  13 +++
>  5 files changed, 233 insertions(+)
> 
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> a3b98b2f2..588696d5c 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -785,6 +785,9 @@ static void cmd_help_long_parsed(void
> *parsed_result,
>  			" eth-src eth-dst\n"
>  			"       Configure the VXLAN encapsulation for
> flows.\n\n"
> 
> +			"nvgre ipv4|ipv6 tni ip-src ip-dst eth-src eth-dst\n"
> +			"       Configure the NVGRE encapsulation for
> flows.\n\n"
> +
>  			, list_pkt_forwarding_modes()
>  		);
>  	}
> @@ -14927,6 +14930,81 @@ cmdline_parse_inst_t cmd_set_vxlan = {
>  	},
>  };
> 
> +/** Set VXLAN encapsulation details */

VXLAN should be NVGRE.

> +struct cmd_set_nvgre_result {
> +	cmdline_fixed_string_t set;
> +	cmdline_fixed_string_t nvgre;
> +	cmdline_fixed_string_t ip_version;
> +	uint32_t tni;
> +	cmdline_ipaddr_t ip_src;
> +	cmdline_ipaddr_t ip_dst;
> +	struct ether_addr eth_src;
> +	struct ether_addr eth_dst;
> +};
> +
> +cmdline_parse_token_string_t cmd_set_nvgre_set =
> +	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set,
> "set");
> +cmdline_parse_token_string_t cmd_set_nvgre_nvgre =
> +	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre,
> "nvgre");
> +cmdline_parse_token_string_t cmd_set_nvgre_ip_version =
> +	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result,
> ip_version,
> +				 "ipv4#ipv6");
> +cmdline_parse_token_num_t cmd_set_nvgre_tni =
> +	TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni,
> UINT32);
> +cmdline_parse_token_num_t cmd_set_nvgre_ip_src =
> +	TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src);
> +cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst =
> +	TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst);
> +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src =
> +	TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result,
> eth_src);
> +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst =
> +	TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result,
> eth_dst);
> +
> +static void cmd_set_nvgre_parsed(void *parsed_result,
> +	__attribute__((unused)) struct cmdline *cl,
> +	__attribute__((unused)) void *data)
> +{
> +	struct cmd_set_nvgre_result *res = parsed_result;
> +	uint32_t tni = rte_cpu_to_be_32(res->tni) >> 8;
> +
> +	if (strcmp(res->ip_version, "ipv4") == 0)
> +		nvgre_encap_conf.select_ipv4 = 1;
> +	else if (strcmp(res->ip_version, "ipv6") == 0)
> +		nvgre_encap_conf.select_ipv4 = 0;
> +	else
> +		return;
> +	memcpy(nvgre_encap_conf.tni, &tni, 3);
> +	if (nvgre_encap_conf.select_ipv4) {
> +		IPV4_ADDR_TO_UINT(res->ip_src,
> nvgre_encap_conf.ipv4_src);
> +		IPV4_ADDR_TO_UINT(res->ip_dst,
> nvgre_encap_conf.ipv4_dst);
> +	} else {
> +		IPV6_ADDR_TO_ARRAY(res->ip_src,
> nvgre_encap_conf.ipv6_src);
> +		IPV6_ADDR_TO_ARRAY(res->ip_dst,
> nvgre_encap_conf.ipv6_dst);
> +	}
> +	memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
> +	       ETHER_ADDR_LEN);
> +	memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
> +	       ETHER_ADDR_LEN);
> +}
> +
> +cmdline_parse_inst_t cmd_set_nvgre = {
> +	.f = cmd_set_nvgre_parsed,
> +	.data = NULL,
> +	.help_str = "set nvgre ipv4|ipv6 <vni> <ip-src> <ip-dst> <eth-src>"
> +		" <eth-dst>",
> +	.tokens = {
> +		(void *)&cmd_set_nvgre_set,
> +		(void *)&cmd_set_nvgre_nvgre,
> +		(void *)&cmd_set_nvgre_ip_version,
> +		(void *)&cmd_set_nvgre_tni,
> +		(void *)&cmd_set_nvgre_ip_src,
> +		(void *)&cmd_set_nvgre_ip_dst,
> +		(void *)&cmd_set_nvgre_eth_src,
> +		(void *)&cmd_set_nvgre_eth_dst,
> +		NULL,
> +	},
> +};
> +
>  /* Strict link priority scheduling mode setting */  static void
> cmd_strict_link_prio_parsed( @@ -17552,6 +17630,7 @@
> cmdline_parse_ctx_t main_ctx[] = {
>  	(cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
>  #endif
>  	(cmdline_parse_inst_t *)&cmd_set_vxlan,
> +	(cmdline_parse_inst_t *)&cmd_set_nvgre,
>  	(cmdline_parse_inst_t *)&cmd_ddp_add,
>  	(cmdline_parse_inst_t *)&cmd_ddp_del,
>  	(cmdline_parse_inst_t *)&cmd_ddp_get_list, diff --git a/app/test-
> pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c index
> 9f609b7db..dd55056fd 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -239,6 +239,8 @@ enum index {
>  	ACTION_OF_PUSH_MPLS_ETHERTYPE,
>  	ACTION_VXLAN_ENCAP,
>  	ACTION_VXLAN_DECAP,
> +	ACTION_NVGRE_ENCAP,
> +	ACTION_NVGRE_DECAP,
>  };
> 
>  /** Maximum size for pattern in struct rte_flow_item_raw. */ @@ -274,6
> +276,21 @@ struct action_vxlan_encap_data {
>  	struct rte_flow_item_vxlan item_vxlan;  };
> 
> +/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
> +#define ACTION_NVGRE_ENCAP_ITEMS_NUM 4
> +
> +/** Storage for struct rte_flow_action_nvgre_encap including external
> +data. */ struct action_nvgre_encap_data {
> +	struct rte_flow_action_nvgre_encap conf;
> +	struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
> +	struct rte_flow_item_eth item_eth;
> +	union {
> +		struct rte_flow_item_ipv4 item_ipv4;
> +		struct rte_flow_item_ipv6 item_ipv6;
> +	};
> +	struct rte_flow_item_nvgre item_nvgre; };
> +
>  /** Maximum number of subsequent tokens and arguments on the stack.
> */  #define CTX_STACK_SIZE 16
> 
> @@ -793,6 +810,8 @@ static const enum index next_action[] = {
>  	ACTION_OF_PUSH_MPLS,
>  	ACTION_VXLAN_ENCAP,
>  	ACTION_VXLAN_DECAP,
> +	ACTION_NVGRE_ENCAP,
> +	ACTION_NVGRE_DECAP,
>  	ZERO,
>  };
> 
> @@ -919,6 +938,9 @@ static int parse_vc_action_rss_queue(struct context
> *, const struct token *,  static int parse_vc_action_vxlan_encap(struct
> context *, const struct token *,
>  				       const char *, unsigned int, void *,
>  				       unsigned int);
> +static int parse_vc_action_nvgre_encap(struct context *, const struct token
> *,
> +				       const char *, unsigned int, void *,
> +				       unsigned int);
>  static int parse_destroy(struct context *, const struct token *,
>  			 const char *, unsigned int,
>  			 void *, unsigned int);
> @@ -2403,6 +2425,24 @@ static const struct token token_list[] = {
>  		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
>  		.call = parse_vc,
>  	},
> +	[ACTION_NVGRE_ENCAP] = {
> +		.name = "nvgre_encap",
> +		.help = "NVGRE encapsulation, uses configuration set by
> \"set"
> +			" nvgre\"",
> +		.priv = PRIV_ACTION(NVGRE_ENCAP,
> +				    sizeof(struct action_nvgre_encap_data)),
> +		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
> +		.call = parse_vc_action_nvgre_encap,
> +	},
> +	[ACTION_NVGRE_DECAP] = {
> +		.name = "nvgre_decap",
> +		.help = "Performs a decapsulation action by stripping all"
> +			" headers of the NVGRE tunnel network overlay from
> the"
> +			" matched flow.",
> +		.priv = PRIV_ACTION(NVGRE_DECAP, 0),
> +		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
> +		.call = parse_vc,
> +	},
>  };
> 
>  /** Remove and return last entry from argument stack. */ @@ -3055,6
> +3095,85 @@ parse_vc_action_vxlan_encap(struct context *ctx, const struct
> token *token,
>  	return ret;
>  }
> 
> +/** Parse NVGRE encap action. */
> +static int
> +parse_vc_action_nvgre_encap(struct context *ctx, const struct token
> *token,
> +			    const char *str, unsigned int len,
> +			    void *buf, unsigned int size)
> +{
> +	struct buffer *out = buf;
> +	struct rte_flow_action *action;
> +	struct action_nvgre_encap_data *action_nvgre_encap_data;
> +	int ret;
> +
> +	ret = parse_vc(ctx, token, str, len, buf, size);
> +	if (ret < 0)
> +		return ret;
> +	/* Nothing else to do if there is no buffer. */
> +	if (!out)
> +		return ret;
> +	if (!out->args.vc.actions_n)
> +		return -1;
> +	action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> +	/* Point to selected object. */
> +	ctx->object = out->args.vc.data;
> +	ctx->objmask = NULL;
> +	/* Set up default configuration. */
> +	action_nvgre_encap_data = ctx->object;
> +	*action_nvgre_encap_data = (struct action_nvgre_encap_data){
> +		.conf = (struct rte_flow_action_nvgre_encap){
> +			.definition = action_nvgre_encap_data->items,
> +		},
> +		.items = {
> +			{
> +				.type = RTE_FLOW_ITEM_TYPE_ETH,
> +				.spec = &action_nvgre_encap_data-
> >item_eth,
> +				.mask = &action_nvgre_encap_data-
> >item_eth,
> +			},
> +			{
> +				.type = RTE_FLOW_ITEM_TYPE_IPV4,
> +				.spec = &action_nvgre_encap_data-
> >item_ipv4,
> +				.mask = &action_nvgre_encap_data-
> >item_ipv4,
> +			},
> +			{
> +				.type = RTE_FLOW_ITEM_TYPE_NVGRE,
> +				.spec = &action_nvgre_encap_data-
> >item_nvgre,
> +				.mask = &action_nvgre_encap_data-
> >item_nvgre,
> +			},
> +			{
> +				.type = RTE_FLOW_ITEM_TYPE_END,
> +			},
> +		},
> +		.item_eth = { .type = 0, },
> +		.item_ipv4.hdr = {
> +		       .src_addr = nvgre_encap_conf.ipv4_src,
> +		       .dst_addr = nvgre_encap_conf.ipv4_dst,
> +		},
> +		.item_nvgre.flow_id = 0,
> +	};
> +	memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
> +	       nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
> +	memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
> +	       nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
> +	if (!nvgre_encap_conf.select_ipv4) {
> +		memcpy(&action_nvgre_encap_data-
> >item_ipv6.hdr.src_addr,
> +		       &nvgre_encap_conf.ipv6_src,
> +		       sizeof(nvgre_encap_conf.ipv6_src));
> +		memcpy(&action_nvgre_encap_data-
> >item_ipv6.hdr.dst_addr,
> +		       &nvgre_encap_conf.ipv6_dst,
> +		       sizeof(nvgre_encap_conf.ipv6_dst));
> +		action_nvgre_encap_data->items[1] = (struct
> rte_flow_item){
> +			.type = RTE_FLOW_ITEM_TYPE_IPV6,
> +			.spec = &action_nvgre_encap_data->item_ipv6,
> +			.mask = &action_nvgre_encap_data->item_ipv6,
> +		};
> +	}
> +	memcpy(action_nvgre_encap_data->item_nvgre.tni,
> nvgre_encap_conf.tni,
> +	       RTE_DIM(nvgre_encap_conf.tni));
> +	action->conf = &action_nvgre_encap_data->conf;
> +	return ret;
> +}
> +
>  /** Parse tokens for destroy command. */  static int  parse_destroy(struct
> context *ctx, const struct token *token, diff --git a/app/test-pmd/testpmd.c
> b/app/test-pmd/testpmd.c index 1c68c9d30..f54205949 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -408,6 +408,15 @@ struct vxlan_encap_conf vxlan_encap_conf = {
>  	.eth_dst = "\xff\xff\xff\xff\xff\xff",  };
> 
> +struct nvgre_encap_conf nvgre_encap_conf = {
> +	.select_ipv4 = 1,
> +	.tni = "\x00\x00\x00",
> +	.ipv4_src = IPv4(127, 0, 0, 1),
> +	.ipv4_dst = IPv4(255, 255, 255, 255),

Should there be  .ipv6_src and .ipv6_dst here ?

> +	.eth_src = "\x00\x00\x00\x00\x00\x00",
> +	.eth_dst = "\xff\xff\xff\xff\xff\xff", };
> +
>  /* Forward function declarations */
>  static void map_port_queue_stats_mapping_registers(portid_t pi,
>  						   struct rte_port *port);
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> 72c4e8d54..7871b93e1 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -494,6 +494,19 @@ struct vxlan_encap_conf {  };  struct
> vxlan_encap_conf vxlan_encap_conf;
> 
> +/* NVGRE encap/decap parameters. */
> +struct nvgre_encap_conf {
> +	uint32_t select_ipv4:1;
> +	uint8_t tni[3];
> +	rte_be32_t ipv4_src;
> +	rte_be32_t ipv4_dst;
> +	uint8_t ipv6_src[16];
> +	uint8_t ipv6_dst[16];
> +	uint8_t eth_src[ETHER_ADDR_LEN];
> +	uint8_t eth_dst[ETHER_ADDR_LEN];
> +};
> +struct nvgre_encap_conf nvgre_encap_conf;
> +
>  static inline unsigned int
>  lcore_num(void)
>  {
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index 162d1c535..0ee497f11 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -1541,6 +1541,13 @@ Configure the outer layer to encapsulate a packet
> inside a VXLAN tunnel::
> 
>   testpmd> set vxlan ipv4|ipv6 (udp-src) (udp-dst) (ip-src) (ip-dst) (mac-src)
> (mac-dst)
> 
> +Config NVGRE Encap outer layers
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +Configure the outer layer to encapsulate a packet inside a NVGRE tunnel::
> +
> + testpmd> set nvgre ipv4|ipv6 (ip-src) (ip-dst) (mac-src) (mac-dst)
> +
>  Port Functions
>  --------------
> 
> @@ -3662,6 +3669,12 @@ This section lists supported actions and their
> attributes, if any.
>  - ``vxlan_decap``: Performs a decapsulation action by stripping all headers of
>    the VXLAN tunnel network overlay from the matched flow.
> 
> +- ``nvgre_encap``: Performs a NVGRE encapsulation, outer layer
> +configuration
> +  is done through `Config NVGRE Encap outer layers`_.
> +
> +- ``nvgre_decap``: Performs a decapsulation action by stripping all
> +headers of
> +  the VXLAN tunnel network overlay from the matched flow.

VXLAN should be NVGRE.

> +
>  Destroying flow rules
>  ~~~~~~~~~~~~~~~~~~~~~
> 
> --
> 2.17.1

Regards,

Bernard.
  
Nélio Laranjeiro June 15, 2018, 11:25 a.m. UTC | #2
Hi Bernard,

On Fri, Jun 15, 2018 at 09:32:02AM +0000, Iremonger, Bernard wrote:
> Hi Nelio,
> 
>[...]
> > @@ -14927,6 +14930,81 @@ cmdline_parse_inst_t cmd_set_vxlan = {
> >  	},
> >  };
> > 
> > +/** Set VXLAN encapsulation details */
> 
> VXLAN should be NVGRE.
>[...]

Right,

> > b/app/test-pmd/testpmd.c index 1c68c9d30..f54205949 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -408,6 +408,15 @@ struct vxlan_encap_conf vxlan_encap_conf = {
> >  	.eth_dst = "\xff\xff\xff\xff\xff\xff",  };
> > 
> > +struct nvgre_encap_conf nvgre_encap_conf = {
> > +	.select_ipv4 = 1,
> > +	.tni = "\x00\x00\x00",
> > +	.ipv4_src = IPv4(127, 0, 0, 1),
> > +	.ipv4_dst = IPv4(255, 255, 255, 255),
> 
> Should there be  .ipv6_src and .ipv6_dst here ?
>[...]

Yes indeed initialisation of IPv6 is missing.

> > +- ``nvgre_decap``: Performs a decapsulation action by stripping all
> > +headers of
> > +  the VXLAN tunnel network overlay from the matched flow.
> 
> VXLAN should be NVGRE.
> 
>[...]

Here also,

I am will update it in a V2.

Thanks for you review,
  

Patch

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a3b98b2f2..588696d5c 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -785,6 +785,9 @@  static void cmd_help_long_parsed(void *parsed_result,
 			" eth-src eth-dst\n"
 			"       Configure the VXLAN encapsulation for flows.\n\n"
 
+			"nvgre ipv4|ipv6 tni ip-src ip-dst eth-src eth-dst\n"
+			"       Configure the NVGRE encapsulation for flows.\n\n"
+
 			, list_pkt_forwarding_modes()
 		);
 	}
@@ -14927,6 +14930,81 @@  cmdline_parse_inst_t cmd_set_vxlan = {
 	},
 };
 
+/** Set VXLAN encapsulation details */
+struct cmd_set_nvgre_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t nvgre;
+	cmdline_fixed_string_t ip_version;
+	uint32_t tni;
+	cmdline_ipaddr_t ip_src;
+	cmdline_ipaddr_t ip_dst;
+	struct ether_addr eth_src;
+	struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_nvgre_set =
+	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set, "set");
+cmdline_parse_token_string_t cmd_set_nvgre_nvgre =
+	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, "nvgre");
+cmdline_parse_token_string_t cmd_set_nvgre_ip_version =
+	TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, ip_version,
+				 "ipv4#ipv6");
+cmdline_parse_token_num_t cmd_set_nvgre_tni =
+	TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni, UINT32);
+cmdline_parse_token_num_t cmd_set_nvgre_ip_src =
+	TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src);
+cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst =
+	TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst);
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src =
+	TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_src);
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst =
+	TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_dst);
+
+static void cmd_set_nvgre_parsed(void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_set_nvgre_result *res = parsed_result;
+	uint32_t tni = rte_cpu_to_be_32(res->tni) >> 8;
+
+	if (strcmp(res->ip_version, "ipv4") == 0)
+		nvgre_encap_conf.select_ipv4 = 1;
+	else if (strcmp(res->ip_version, "ipv6") == 0)
+		nvgre_encap_conf.select_ipv4 = 0;
+	else
+		return;
+	memcpy(nvgre_encap_conf.tni, &tni, 3);
+	if (nvgre_encap_conf.select_ipv4) {
+		IPV4_ADDR_TO_UINT(res->ip_src, nvgre_encap_conf.ipv4_src);
+		IPV4_ADDR_TO_UINT(res->ip_dst, nvgre_encap_conf.ipv4_dst);
+	} else {
+		IPV6_ADDR_TO_ARRAY(res->ip_src, nvgre_encap_conf.ipv6_src);
+		IPV6_ADDR_TO_ARRAY(res->ip_dst, nvgre_encap_conf.ipv6_dst);
+	}
+	memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
+	       ETHER_ADDR_LEN);
+	memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+	       ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_nvgre = {
+	.f = cmd_set_nvgre_parsed,
+	.data = NULL,
+	.help_str = "set nvgre ipv4|ipv6 <vni> <ip-src> <ip-dst> <eth-src>"
+		" <eth-dst>",
+	.tokens = {
+		(void *)&cmd_set_nvgre_set,
+		(void *)&cmd_set_nvgre_nvgre,
+		(void *)&cmd_set_nvgre_ip_version,
+		(void *)&cmd_set_nvgre_tni,
+		(void *)&cmd_set_nvgre_ip_src,
+		(void *)&cmd_set_nvgre_ip_dst,
+		(void *)&cmd_set_nvgre_eth_src,
+		(void *)&cmd_set_nvgre_eth_dst,
+		NULL,
+	},
+};
+
 /* Strict link priority scheduling mode setting */
 static void
 cmd_strict_link_prio_parsed(
@@ -17552,6 +17630,7 @@  cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
 #endif
 	(cmdline_parse_inst_t *)&cmd_set_vxlan,
+	(cmdline_parse_inst_t *)&cmd_set_nvgre,
 	(cmdline_parse_inst_t *)&cmd_ddp_add,
 	(cmdline_parse_inst_t *)&cmd_ddp_del,
 	(cmdline_parse_inst_t *)&cmd_ddp_get_list,
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 9f609b7db..dd55056fd 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -239,6 +239,8 @@  enum index {
 	ACTION_OF_PUSH_MPLS_ETHERTYPE,
 	ACTION_VXLAN_ENCAP,
 	ACTION_VXLAN_DECAP,
+	ACTION_NVGRE_ENCAP,
+	ACTION_NVGRE_DECAP,
 };
 
 /** Maximum size for pattern in struct rte_flow_item_raw. */
@@ -274,6 +276,21 @@  struct action_vxlan_encap_data {
 	struct rte_flow_item_vxlan item_vxlan;
 };
 
+/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
+#define ACTION_NVGRE_ENCAP_ITEMS_NUM 4
+
+/** Storage for struct rte_flow_action_nvgre_encap including external data. */
+struct action_nvgre_encap_data {
+	struct rte_flow_action_nvgre_encap conf;
+	struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
+	struct rte_flow_item_eth item_eth;
+	union {
+		struct rte_flow_item_ipv4 item_ipv4;
+		struct rte_flow_item_ipv6 item_ipv6;
+	};
+	struct rte_flow_item_nvgre item_nvgre;
+};
+
 /** Maximum number of subsequent tokens and arguments on the stack. */
 #define CTX_STACK_SIZE 16
 
@@ -793,6 +810,8 @@  static const enum index next_action[] = {
 	ACTION_OF_PUSH_MPLS,
 	ACTION_VXLAN_ENCAP,
 	ACTION_VXLAN_DECAP,
+	ACTION_NVGRE_ENCAP,
+	ACTION_NVGRE_DECAP,
 	ZERO,
 };
 
@@ -919,6 +938,9 @@  static int parse_vc_action_rss_queue(struct context *, const struct token *,
 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
 				       const char *, unsigned int, void *,
 				       unsigned int);
+static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
+				       const char *, unsigned int, void *,
+				       unsigned int);
 static int parse_destroy(struct context *, const struct token *,
 			 const char *, unsigned int,
 			 void *, unsigned int);
@@ -2403,6 +2425,24 @@  static const struct token token_list[] = {
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc,
 	},
+	[ACTION_NVGRE_ENCAP] = {
+		.name = "nvgre_encap",
+		.help = "NVGRE encapsulation, uses configuration set by \"set"
+			" nvgre\"",
+		.priv = PRIV_ACTION(NVGRE_ENCAP,
+				    sizeof(struct action_nvgre_encap_data)),
+		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+		.call = parse_vc_action_nvgre_encap,
+	},
+	[ACTION_NVGRE_DECAP] = {
+		.name = "nvgre_decap",
+		.help = "Performs a decapsulation action by stripping all"
+			" headers of the NVGRE tunnel network overlay from the"
+			" matched flow.",
+		.priv = PRIV_ACTION(NVGRE_DECAP, 0),
+		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+		.call = parse_vc,
+	},
 };
 
 /** Remove and return last entry from argument stack. */
@@ -3055,6 +3095,85 @@  parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
 	return ret;
 }
 
+/** Parse NVGRE encap action. */
+static int
+parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
+			    const char *str, unsigned int len,
+			    void *buf, unsigned int size)
+{
+	struct buffer *out = buf;
+	struct rte_flow_action *action;
+	struct action_nvgre_encap_data *action_nvgre_encap_data;
+	int ret;
+
+	ret = parse_vc(ctx, token, str, len, buf, size);
+	if (ret < 0)
+		return ret;
+	/* Nothing else to do if there is no buffer. */
+	if (!out)
+		return ret;
+	if (!out->args.vc.actions_n)
+		return -1;
+	action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+	/* Point to selected object. */
+	ctx->object = out->args.vc.data;
+	ctx->objmask = NULL;
+	/* Set up default configuration. */
+	action_nvgre_encap_data = ctx->object;
+	*action_nvgre_encap_data = (struct action_nvgre_encap_data){
+		.conf = (struct rte_flow_action_nvgre_encap){
+			.definition = action_nvgre_encap_data->items,
+		},
+		.items = {
+			{
+				.type = RTE_FLOW_ITEM_TYPE_ETH,
+				.spec = &action_nvgre_encap_data->item_eth,
+				.mask = &action_nvgre_encap_data->item_eth,
+			},
+			{
+				.type = RTE_FLOW_ITEM_TYPE_IPV4,
+				.spec = &action_nvgre_encap_data->item_ipv4,
+				.mask = &action_nvgre_encap_data->item_ipv4,
+			},
+			{
+				.type = RTE_FLOW_ITEM_TYPE_NVGRE,
+				.spec = &action_nvgre_encap_data->item_nvgre,
+				.mask = &action_nvgre_encap_data->item_nvgre,
+			},
+			{
+				.type = RTE_FLOW_ITEM_TYPE_END,
+			},
+		},
+		.item_eth = { .type = 0, },
+		.item_ipv4.hdr = {
+		       .src_addr = nvgre_encap_conf.ipv4_src,
+		       .dst_addr = nvgre_encap_conf.ipv4_dst,
+		},
+		.item_nvgre.flow_id = 0,
+	};
+	memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+	       nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+	memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+	       nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
+	if (!nvgre_encap_conf.select_ipv4) {
+		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
+		       &nvgre_encap_conf.ipv6_src,
+		       sizeof(nvgre_encap_conf.ipv6_src));
+		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
+		       &nvgre_encap_conf.ipv6_dst,
+		       sizeof(nvgre_encap_conf.ipv6_dst));
+		action_nvgre_encap_data->items[1] = (struct rte_flow_item){
+			.type = RTE_FLOW_ITEM_TYPE_IPV6,
+			.spec = &action_nvgre_encap_data->item_ipv6,
+			.mask = &action_nvgre_encap_data->item_ipv6,
+		};
+	}
+	memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
+	       RTE_DIM(nvgre_encap_conf.tni));
+	action->conf = &action_nvgre_encap_data->conf;
+	return ret;
+}
+
 /** Parse tokens for destroy command. */
 static int
 parse_destroy(struct context *ctx, const struct token *token,
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 1c68c9d30..f54205949 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -408,6 +408,15 @@  struct vxlan_encap_conf vxlan_encap_conf = {
 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
 };
 
+struct nvgre_encap_conf nvgre_encap_conf = {
+	.select_ipv4 = 1,
+	.tni = "\x00\x00\x00",
+	.ipv4_src = IPv4(127, 0, 0, 1),
+	.ipv4_dst = IPv4(255, 255, 255, 255),
+	.eth_src = "\x00\x00\x00\x00\x00\x00",
+	.eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
 /* Forward function declarations */
 static void map_port_queue_stats_mapping_registers(portid_t pi,
 						   struct rte_port *port);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 72c4e8d54..7871b93e1 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -494,6 +494,19 @@  struct vxlan_encap_conf {
 };
 struct vxlan_encap_conf vxlan_encap_conf;
 
+/* NVGRE encap/decap parameters. */
+struct nvgre_encap_conf {
+	uint32_t select_ipv4:1;
+	uint8_t tni[3];
+	rte_be32_t ipv4_src;
+	rte_be32_t ipv4_dst;
+	uint8_t ipv6_src[16];
+	uint8_t ipv6_dst[16];
+	uint8_t eth_src[ETHER_ADDR_LEN];
+	uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct nvgre_encap_conf nvgre_encap_conf;
+
 static inline unsigned int
 lcore_num(void)
 {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 162d1c535..0ee497f11 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -1541,6 +1541,13 @@  Configure the outer layer to encapsulate a packet inside a VXLAN tunnel::
 
  testpmd> set vxlan ipv4|ipv6 (udp-src) (udp-dst) (ip-src) (ip-dst) (mac-src) (mac-dst)
 
+Config NVGRE Encap outer layers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the outer layer to encapsulate a packet inside a NVGRE tunnel::
+
+ testpmd> set nvgre ipv4|ipv6 (ip-src) (ip-dst) (mac-src) (mac-dst)
+
 Port Functions
 --------------
 
@@ -3662,6 +3669,12 @@  This section lists supported actions and their attributes, if any.
 - ``vxlan_decap``: Performs a decapsulation action by stripping all headers of
   the VXLAN tunnel network overlay from the matched flow.
 
+- ``nvgre_encap``: Performs a NVGRE encapsulation, outer layer configuration
+  is done through `Config NVGRE Encap outer layers`_.
+
+- ``nvgre_decap``: Performs a decapsulation action by stripping all headers of
+  the VXLAN tunnel network overlay from the matched flow.
+
 Destroying flow rules
 ~~~~~~~~~~~~~~~~~~~~~