[2/3] ethdev: convert testpmd encap commands to new API
diff mbox series

Message ID 1537116824-191205-3-git-send-email-orika@mellanox.com
State Superseded, archived
Delegated to: Ferruh Yigit
Headers show
Series
  • add generic L2/L3 tunnel encapsulation actions
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Ori Kam Sept. 16, 2018, 4:53 p.m. UTC
Currently there are 2 encapsulation commands in testpmd one for VXLAN
and one for NVGRE, both of those commands are using the old rte encap
command.

This commit update the commands to work with the new tunnel encap
actions.

The reason that we have different encapsulation commands, one for VXLAN
and one for NVGRE is the ease of use in testpmd, both commands are using
the same rte flow action for tunnel encap.

Signed-off-by: Ori Kam <orika@mellanox.com>
---
 app/test-pmd/cmdline_flow.c | 294 +++++++++++++++++++++-----------------------
 1 file changed, 137 insertions(+), 157 deletions(-)

Patch
diff mbox series

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index f926060..349e822 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -262,37 +262,13 @@  struct action_rss_data {
 	uint16_t queue[ACTION_RSS_QUEUE_NUM];
 };
 
-/** Maximum number of items in struct rte_flow_action_vxlan_encap. */
-#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
-
-/** Storage for struct rte_flow_action_vxlan_encap including external data. */
-struct action_vxlan_encap_data {
-	struct rte_flow_action_vxlan_encap conf;
-	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
-	struct rte_flow_item_eth item_eth;
-	struct rte_flow_item_vlan item_vlan;
-	union {
-		struct rte_flow_item_ipv4 item_ipv4;
-		struct rte_flow_item_ipv6 item_ipv6;
-	};
-	struct rte_flow_item_udp item_udp;
-	struct rte_flow_item_vxlan item_vxlan;
-};
+/** Maximum buffer size for the encap data. */
+#define ACTION_TUNNEL_ENCAP_MAX_BUFFER_SIZE 64
 
-/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
-#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
-
-/** Storage for struct rte_flow_action_nvgre_encap including external data. */
-struct action_nvgre_encap_data {
-	struct rte_flow_action_nvgre_encap conf;
-	struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
-	struct rte_flow_item_eth item_eth;
-	struct rte_flow_item_vlan item_vlan;
-	union {
-		struct rte_flow_item_ipv4 item_ipv4;
-		struct rte_flow_item_ipv6 item_ipv6;
-	};
-	struct rte_flow_item_nvgre item_nvgre;
+/** Storage for struct rte_flow_action_tunnel_encap including external data. */
+struct action_tunnel_encap_data {
+	struct rte_flow_action_tunnel_encap conf;
+	uint8_t buf[ACTION_TUNNEL_ENCAP_MAX_BUFFER_SIZE];
 };
 
 /** Maximum number of subsequent tokens and arguments on the stack. */
@@ -2438,8 +2414,8 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.name = "vxlan_encap",
 		.help = "VXLAN encapsulation, uses configuration set by \"set"
 			" vxlan\"",
-		.priv = PRIV_ACTION(VXLAN_ENCAP,
-				    sizeof(struct action_vxlan_encap_data)),
+		.priv = PRIV_ACTION(TUNNEL_ENCAP,
+				    sizeof(struct action_tunnel_encap_data)),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc_action_vxlan_encap,
 	},
@@ -2448,7 +2424,7 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.help = "Performs a decapsulation action by stripping all"
 			" headers of the VXLAN tunnel network overlay from the"
 			" matched flow.",
-		.priv = PRIV_ACTION(VXLAN_DECAP, 0),
+		.priv = PRIV_ACTION(TUNNEL_DECAP, 0),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc,
 	},
@@ -2456,8 +2432,8 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.name = "nvgre_encap",
 		.help = "NVGRE encapsulation, uses configuration set by \"set"
 			" nvgre\"",
-		.priv = PRIV_ACTION(NVGRE_ENCAP,
-				    sizeof(struct action_nvgre_encap_data)),
+		.priv = PRIV_ACTION(TUNNEL_ENCAP,
+				    sizeof(struct action_tunnel_encap_data)),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc_action_nvgre_encap,
 	},
@@ -2466,7 +2442,7 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.help = "Performs a decapsulation action by stripping all"
 			" headers of the NVGRE tunnel network overlay from the"
 			" matched flow.",
-		.priv = PRIV_ACTION(NVGRE_DECAP, 0),
+		.priv = PRIV_ACTION(TUNNEL_DECAP, 0),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc,
 	},
@@ -3034,6 +3010,9 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	return len;
 }
 
+/** IP next protocol UDP. */
+#define IP_PROTO_UDP 0x11
+
 /** Parse VXLAN encap action. */
 static int
 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
@@ -3042,7 +3021,32 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 {
 	struct buffer *out = buf;
 	struct rte_flow_action *action;
-	struct action_vxlan_encap_data *action_vxlan_encap_data;
+	struct action_tunnel_encap_data *action_vxlan_encap_data;
+	struct rte_flow_item_eth eth = { .type = 0, };
+	struct rte_flow_item_vlan vlan = {
+		.tci = vxlan_encap_conf.vlan_tci,
+		.inner_type = 0,
+	};
+	struct rte_flow_item_ipv4 ipv4 = {
+		.hdr =  {
+			.src_addr = vxlan_encap_conf.ipv4_src,
+			.dst_addr = vxlan_encap_conf.ipv4_dst,
+			.next_proto_id = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_ipv6 ipv6 = {
+		.hdr =  {
+			.proto = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_udp udp = {
+		.hdr = {
+			.src_port = vxlan_encap_conf.udp_src,
+			.dst_port = vxlan_encap_conf.udp_dst,
+		},
+	};
+	struct rte_flow_item_vxlan vxlan = { .flags = 0, };
+	uint8_t *header;
 	int ret;
 
 	ret = parse_vc(ctx, token, str, len, buf, size);
@@ -3057,83 +3061,58 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	/* Point to selected object. */
 	ctx->object = out->args.vc.data;
 	ctx->objmask = NULL;
-	/* Set up default configuration. */
+	/* Copy the headers to the buffer. */
 	action_vxlan_encap_data = ctx->object;
-	*action_vxlan_encap_data = (struct action_vxlan_encap_data){
-		.conf = (struct rte_flow_action_vxlan_encap){
-			.definition = action_vxlan_encap_data->items,
-		},
-		.items = {
-			{
-				.type = RTE_FLOW_ITEM_TYPE_ETH,
-				.spec = &action_vxlan_encap_data->item_eth,
-				.mask = &rte_flow_item_eth_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VLAN,
-				.spec = &action_vxlan_encap_data->item_vlan,
-				.mask = &rte_flow_item_vlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_IPV4,
-				.spec = &action_vxlan_encap_data->item_ipv4,
-				.mask = &rte_flow_item_ipv4_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_UDP,
-				.spec = &action_vxlan_encap_data->item_udp,
-				.mask = &rte_flow_item_udp_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VXLAN,
-				.spec = &action_vxlan_encap_data->item_vxlan,
-				.mask = &rte_flow_item_vxlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_END,
-			},
-		},
-		.item_eth.type = 0,
-		.item_vlan = {
-			.tci = vxlan_encap_conf.vlan_tci,
-			.inner_type = 0,
-		},
-		.item_ipv4.hdr = {
-			.src_addr = vxlan_encap_conf.ipv4_src,
-			.dst_addr = vxlan_encap_conf.ipv4_dst,
+	*action_vxlan_encap_data = (struct action_tunnel_encap_data) {
+		.conf = (struct rte_flow_action_tunnel_encap){
+			.buf = action_vxlan_encap_data->buf,
 		},
-		.item_udp.hdr = {
-			.src_port = vxlan_encap_conf.udp_src,
-			.dst_port = vxlan_encap_conf.udp_dst,
-		},
-		.item_vxlan.flags = 0,
+		.buf = {},
 	};
-	memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
+	header = action_vxlan_encap_data->buf;
+	if (vxlan_encap_conf.select_vlan)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+	else if (vxlan_encap_conf.select_ipv4)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+	else
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+	memcpy(eth.dst.addr_bytes,
 	       vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
-	memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
+	memcpy(eth.src.addr_bytes,
 	       vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
-	if (!vxlan_encap_conf.select_ipv4) {
-		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
+	memcpy(header, &eth, sizeof(eth));
+	header += sizeof(eth);
+	if (vxlan_encap_conf.select_vlan) {
+		if (vxlan_encap_conf.select_ipv4)
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		else
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		memcpy(header, &vlan, sizeof(vlan));
+		header += sizeof(vlan);
+	}
+	if (vxlan_encap_conf.select_ipv4) {
+		memcpy(header, &ipv4, sizeof(ipv4));
+		header += sizeof(ipv4);
+	} else {
+		memcpy(&ipv6.hdr.src_addr,
 		       &vxlan_encap_conf.ipv6_src,
 		       sizeof(vxlan_encap_conf.ipv6_src));
-		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
+		memcpy(&ipv6.hdr.dst_addr,
 		       &vxlan_encap_conf.ipv6_dst,
 		       sizeof(vxlan_encap_conf.ipv6_dst));
-		action_vxlan_encap_data->items[2] = (struct rte_flow_item){
-			.type = RTE_FLOW_ITEM_TYPE_IPV6,
-			.spec = &action_vxlan_encap_data->item_ipv6,
-			.mask = &rte_flow_item_ipv6_mask,
-		};
+		memcpy(header, &ipv6, sizeof(ipv6));
+		header += sizeof(ipv6);
 	}
-	if (!vxlan_encap_conf.select_vlan)
-		action_vxlan_encap_data->items[1].type =
-			RTE_FLOW_ITEM_TYPE_VOID;
-	memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
-	       RTE_DIM(vxlan_encap_conf.vni));
+	memcpy(header, &udp, sizeof(udp));
+	header += sizeof(udp);
+	memcpy(vxlan.vni, vxlan_encap_conf.vni, RTE_DIM(vxlan_encap_conf.vni));
+	memcpy(header, &vxlan, sizeof(vxlan));
+	header += sizeof(vxlan);
+	action_vxlan_encap_data->conf.size = header -
+		action_vxlan_encap_data->buf;
 	action->conf = &action_vxlan_encap_data->conf;
 	return ret;
 }
-
 /** Parse NVGRE encap action. */
 static int
 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
@@ -3142,7 +3121,26 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 {
 	struct buffer *out = buf;
 	struct rte_flow_action *action;
-	struct action_nvgre_encap_data *action_nvgre_encap_data;
+	struct action_tunnel_encap_data *action_nvgre_encap_data;
+	struct rte_flow_item_eth eth = { .type = 0, };
+	struct rte_flow_item_vlan vlan = {
+		.tci = nvgre_encap_conf.vlan_tci,
+		.inner_type = 0,
+	};
+	struct rte_flow_item_ipv4 ipv4 = {
+		.hdr =  {
+			.src_addr = nvgre_encap_conf.ipv4_src,
+			.dst_addr = nvgre_encap_conf.ipv4_dst,
+			.next_proto_id = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_ipv6 ipv6 = {
+		.hdr =  {
+			.proto = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_nvgre nvgre = { .flow_id = 0, };
+	uint8_t *header;
 	int ret;
 
 	ret = parse_vc(ctx, token, str, len, buf, size);
@@ -3157,74 +3155,56 @@  static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	/* Point to selected object. */
 	ctx->object = out->args.vc.data;
 	ctx->objmask = NULL;
-	/* Set up default configuration. */
+	/* Copy the headers to the buffer. */
 	action_nvgre_encap_data = ctx->object;
-	*action_nvgre_encap_data = (struct action_nvgre_encap_data){
-		.conf = (struct rte_flow_action_nvgre_encap){
-			.definition = action_nvgre_encap_data->items,
-		},
-		.items = {
-			{
-				.type = RTE_FLOW_ITEM_TYPE_ETH,
-				.spec = &action_nvgre_encap_data->item_eth,
-				.mask = &rte_flow_item_eth_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VLAN,
-				.spec = &action_nvgre_encap_data->item_vlan,
-				.mask = &rte_flow_item_vlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_IPV4,
-				.spec = &action_nvgre_encap_data->item_ipv4,
-				.mask = &rte_flow_item_ipv4_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_NVGRE,
-				.spec = &action_nvgre_encap_data->item_nvgre,
-				.mask = &rte_flow_item_nvgre_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_END,
-			},
-		},
-		.item_eth.type = 0,
-		.item_vlan = {
-			.tci = nvgre_encap_conf.vlan_tci,
-			.inner_type = 0,
-		},
-		.item_ipv4.hdr = {
-		       .src_addr = nvgre_encap_conf.ipv4_src,
-		       .dst_addr = nvgre_encap_conf.ipv4_dst,
+	*action_nvgre_encap_data = (struct action_tunnel_encap_data) {
+		.conf = (struct rte_flow_action_tunnel_encap){
+			.buf = action_nvgre_encap_data->buf,
 		},
-		.item_nvgre.flow_id = 0,
+		.buf = {},
 	};
-	memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+	header = action_nvgre_encap_data->buf;
+	if (nvgre_encap_conf.select_vlan)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+	else if (nvgre_encap_conf.select_ipv4)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+	else
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+	memcpy(eth.dst.addr_bytes,
 	       nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
-	memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+	memcpy(eth.src.addr_bytes,
 	       nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
-	if (!nvgre_encap_conf.select_ipv4) {
-		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
+	memcpy(header, &eth, sizeof(eth));
+	header += sizeof(eth);
+	if (nvgre_encap_conf.select_vlan) {
+		if (nvgre_encap_conf.select_ipv4)
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		else
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		memcpy(header, &vlan, sizeof(vlan));
+		header += sizeof(vlan);
+	}
+	if (nvgre_encap_conf.select_ipv4) {
+		memcpy(header, &ipv4, sizeof(ipv4));
+		header += sizeof(ipv4);
+	} else {
+		memcpy(&ipv6.hdr.src_addr,
 		       &nvgre_encap_conf.ipv6_src,
 		       sizeof(nvgre_encap_conf.ipv6_src));
-		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
+		memcpy(&ipv6.hdr.dst_addr,
 		       &nvgre_encap_conf.ipv6_dst,
 		       sizeof(nvgre_encap_conf.ipv6_dst));
-		action_nvgre_encap_data->items[2] = (struct rte_flow_item){
-			.type = RTE_FLOW_ITEM_TYPE_IPV6,
-			.spec = &action_nvgre_encap_data->item_ipv6,
-			.mask = &rte_flow_item_ipv6_mask,
-		};
+		memcpy(header, &ipv6, sizeof(ipv6));
+		header += sizeof(ipv6);
 	}
-	if (!nvgre_encap_conf.select_vlan)
-		action_nvgre_encap_data->items[1].type =
-			RTE_FLOW_ITEM_TYPE_VOID;
-	memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
-	       RTE_DIM(nvgre_encap_conf.tni));
+	memcpy(nvgre.tni, nvgre_encap_conf.tni, RTE_DIM(nvgre_encap_conf.tni));
+	memcpy(header, &nvgre, sizeof(nvgre));
+	header += sizeof(nvgre);
+	action_nvgre_encap_data->conf.size = header -
+		action_nvgre_encap_data->buf;
 	action->conf = &action_nvgre_encap_data->conf;
 	return ret;
 }
-
 /** Parse tokens for destroy command. */
 static int
 parse_destroy(struct context *ctx, const struct token *token,