[v5,25/80] net/ntnic: add items gtp and actions raw encap/decap

Message ID 20241030213940.3470062-26-sil-plv@napatech.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Provide flow filter and statistics support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Serhii Iliushyk Oct. 30, 2024, 9:38 p.m. UTC
From: Danylo Vodopianov <dvo-plv@napatech.com>

Add possibility to use
* RTE_FLOW_ITEM_TYPE_GTP
* RTE_FLOW_ITEM_TYPE_GTP_PSC
* RTE_FLOW_ACTION_TYPE_RAW_ENCAP
* RTE_FLOW_ACTION_TYPE_RAW_DECAP

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 doc/guides/nics/features/ntnic.ini            |   4 +
 doc/guides/nics/ntnic.rst                     |   4 +
 drivers/net/ntnic/include/create_elements.h   |   4 +
 drivers/net/ntnic/include/flow_api_engine.h   |  40 ++
 drivers/net/ntnic/include/hw_mod_backend.h    |   4 +
 .../ntnic/include/stream_binary_flow_api.h    |  22 ++
 .../profile_inline/flow_api_profile_inline.c  | 366 +++++++++++++++++-
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c | 291 +++++++++++++-
 8 files changed, 730 insertions(+), 5 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini
index 4201c8e8b9..4cb9509742 100644
--- a/doc/guides/nics/features/ntnic.ini
+++ b/doc/guides/nics/features/ntnic.ini
@@ -16,6 +16,8 @@  x86-64               = Y
 [rte_flow items]
 any                  = Y
 eth                  = Y
+gtp                  = Y
+gtp_psc              = Y
 icmp                 = Y
 icmp6                = Y
 ipv4                 = Y
@@ -33,3 +35,5 @@  mark                 = Y
 modify_field         = Y
 port_id              = Y
 queue                = Y
+raw_decap            = Y
+raw_encap            = Y
diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst
index 63ad4d95f5..cd7d315456 100644
--- a/doc/guides/nics/ntnic.rst
+++ b/doc/guides/nics/ntnic.rst
@@ -48,6 +48,10 @@  Features
 - Traffic mirroring.
 - VLAN filtering.
 - Packet modification: NAT, TTL decrement, DSCP tagging
+- Tunnel types: GTP.
+- Encapsulation and decapsulation of GTP data.
+- RX VLAN stripping via raw decap.
+- TX VLAN insertion via raw encap.
 
 Limitations
 ~~~~~~~~~~~
diff --git a/drivers/net/ntnic/include/create_elements.h b/drivers/net/ntnic/include/create_elements.h
index 179542d2b2..70e6cad195 100644
--- a/drivers/net/ntnic/include/create_elements.h
+++ b/drivers/net/ntnic/include/create_elements.h
@@ -27,6 +27,8 @@  struct cnv_attr_s {
 
 struct cnv_action_s {
 	struct rte_flow_action flow_actions[MAX_ACTIONS];
+	struct flow_action_raw_encap encap;
+	struct flow_action_raw_decap decap;
 	struct rte_flow_action_queue queue;
 };
 
@@ -52,6 +54,8 @@  enum nt_rte_flow_item_type {
 };
 
 extern rte_spinlock_t flow_lock;
+
+int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size, struct rte_flow_item *out);
 int convert_error(struct rte_flow_error *error, struct rte_flow_error *rte_flow_error);
 int create_attr(struct cnv_attr_s *attribute, const struct rte_flow_attr *attr);
 int create_match_elements(struct cnv_match_s *match, const struct rte_flow_item items[],
diff --git a/drivers/net/ntnic/include/flow_api_engine.h b/drivers/net/ntnic/include/flow_api_engine.h
index f6557d0d20..b1d39b919b 100644
--- a/drivers/net/ntnic/include/flow_api_engine.h
+++ b/drivers/net/ntnic/include/flow_api_engine.h
@@ -56,6 +56,29 @@  enum res_type_e {
 
 #define MAX_MATCH_FIELDS 16
 
+/*
+ * Tunnel encapsulation header definition
+ */
+#define MAX_TUN_HDR_SIZE 128
+struct tunnel_header_s {
+	union {
+		uint8_t hdr8[MAX_TUN_HDR_SIZE];
+		uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+	} d;
+	uint32_t user_port_id;
+	uint8_t len;
+
+	uint8_t nb_vlans;
+
+	uint8_t ip_version;	/* 4: v4, 6: v6 */
+	uint16_t ip_csum_precalc;
+
+	uint8_t new_outer;
+	uint8_t l2_len;
+	uint8_t l3_len;
+	uint8_t l4_len;
+};
+
 struct match_elem_s {
 	int masked_for_tcam;	/* if potentially selected for TCAM */
 	uint32_t e_word[4];
@@ -124,6 +147,23 @@  struct nic_flow_def {
 
 	int full_offload;
 
+	/*
+	 * Action push tunnel
+	 */
+	struct tunnel_header_s tun_hdr;
+
+	/*
+	 * If DPDK RTE tunnel helper API used
+	 * this holds the tunnel if used in flow
+	 */
+	struct tunnel_s *tnl;
+
+	/*
+	 * Header Stripper
+	 */
+	int header_strip_end_dyn;
+	int header_strip_end_ofs;
+
 	/*
 	 * Modify field
 	 */
diff --git a/drivers/net/ntnic/include/hw_mod_backend.h b/drivers/net/ntnic/include/hw_mod_backend.h
index 6a8a38636f..1b45ea4296 100644
--- a/drivers/net/ntnic/include/hw_mod_backend.h
+++ b/drivers/net/ntnic/include/hw_mod_backend.h
@@ -175,6 +175,10 @@  enum {
 	PROT_L4_ICMP = 4
 };
 
+enum {
+	PROT_TUN_GTPV1U = 6,
+};
+
 enum {
 	PROT_TUN_L3_OTHER = 0,
 	PROT_TUN_L3_IPV4 = 1,
diff --git a/drivers/net/ntnic/include/stream_binary_flow_api.h b/drivers/net/ntnic/include/stream_binary_flow_api.h
index d878b848c2..8097518d61 100644
--- a/drivers/net/ntnic/include/stream_binary_flow_api.h
+++ b/drivers/net/ntnic/include/stream_binary_flow_api.h
@@ -18,6 +18,7 @@ 
 
 #define FLOW_MAX_QUEUES 128
 
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
 /*
  * Flow eth dev profile determines how the FPGA module resources are
  * managed and what features are available
@@ -31,6 +32,27 @@  struct flow_queue_id_s {
 	int hw_id;
 };
 
+/*
+ * RTE_FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+	uint8_t *data;
+	uint8_t *preserve;
+	size_t size;
+	struct rte_flow_item items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
+/*
+ * RTE_FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+	uint8_t *data;
+	size_t size;
+	struct rte_flow_item items[RAW_ENCAP_DECAP_ELEMS_MAX];
+	int item_count;
+};
+
 struct flow_eth_dev;             /* port device */
 struct flow_handle;
 
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index 4cadd3169b..7b932c7cc5 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -463,6 +463,202 @@  static int interpret_flow_actions(const struct flow_eth_dev *dev,
 
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+			NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_RAW_ENCAP", dev);
+
+			if (action[aidx].conf) {
+				const struct flow_action_raw_encap *encap =
+					(const struct flow_action_raw_encap *)action[aidx].conf;
+				const struct flow_action_raw_encap *encap_mask = action_mask
+					? (const struct flow_action_raw_encap *)action_mask[aidx]
+					.conf
+					: NULL;
+				const struct rte_flow_item *items = encap->items;
+
+				if (encap_decap_order != 1) {
+					NT_LOG(ERR, FILTER,
+						"ERROR: - RAW_ENCAP must follow RAW_DECAP.");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+					return -1;
+				}
+
+				if (encap->size == 0 || encap->size > 255 ||
+					encap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+						"ERROR: - RAW_ENCAP data/size invalid.");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+					return -1;
+				}
+
+				encap_decap_order = 2;
+
+				fd->tun_hdr.len = (uint8_t)encap->size;
+
+				if (encap_mask) {
+					memcpy_mask_if(fd->tun_hdr.d.hdr8, encap->data,
+						encap_mask->data, fd->tun_hdr.len);
+
+				} else {
+					memcpy(fd->tun_hdr.d.hdr8, encap->data, fd->tun_hdr.len);
+				}
+
+				while (items->type != RTE_FLOW_ITEM_TYPE_END) {
+					switch (items->type) {
+					case RTE_FLOW_ITEM_TYPE_ETH:
+						fd->tun_hdr.l2_len = 14;
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_VLAN:
+						fd->tun_hdr.nb_vlans += 1;
+						fd->tun_hdr.l2_len += 4;
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_IPV4:
+						fd->tun_hdr.ip_version = 4;
+						fd->tun_hdr.l3_len = sizeof(struct rte_ipv4_hdr);
+						fd->tun_hdr.new_outer = 1;
+
+						/* Patch length */
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 2] = 0x07;
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 3] = 0xfd;
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_IPV6:
+						fd->tun_hdr.ip_version = 6;
+						fd->tun_hdr.l3_len = sizeof(struct rte_ipv6_hdr);
+						fd->tun_hdr.new_outer = 1;
+
+						/* Patch length */
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 4] = 0x07;
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 5] = 0xfd;
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_SCTP:
+						fd->tun_hdr.l4_len = sizeof(struct rte_sctp_hdr);
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_TCP:
+						fd->tun_hdr.l4_len = sizeof(struct rte_tcp_hdr);
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_UDP:
+						fd->tun_hdr.l4_len = sizeof(struct rte_udp_hdr);
+
+						/* Patch length */
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len +
+							fd->tun_hdr.l3_len + 4] = 0x07;
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len +
+							fd->tun_hdr.l3_len + 5] = 0xfd;
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_ICMP:
+						fd->tun_hdr.l4_len = sizeof(struct rte_icmp_hdr);
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_ICMP6:
+						fd->tun_hdr.l4_len =
+							sizeof(struct rte_flow_item_icmp6);
+						break;
+
+					case RTE_FLOW_ITEM_TYPE_GTP:
+						/* Patch length */
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len +
+							fd->tun_hdr.l3_len +
+							fd->tun_hdr.l4_len + 2] = 0x07;
+						fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len +
+							fd->tun_hdr.l3_len +
+							fd->tun_hdr.l4_len + 3] = 0xfd;
+						break;
+
+					default:
+						break;
+					}
+
+					items++;
+				}
+
+				if (fd->tun_hdr.nb_vlans > 3) {
+					NT_LOG(ERR, FILTER,
+						"ERROR: - Encapsulation with %d vlans not supported.",
+						(int)fd->tun_hdr.nb_vlans);
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+					return -1;
+				}
+
+				/* Convert encap data to 128-bit little endian */
+				for (size_t i = 0; i < (encap->size + 15) / 16; ++i) {
+					uint8_t *data = fd->tun_hdr.d.hdr8 + i * 16;
+
+					for (unsigned int j = 0; j < 8; ++j) {
+						uint8_t t = data[j];
+						data[j] = data[15 - j];
+						data[15 - j] = t;
+					}
+				}
+			}
+
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+			NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_RAW_DECAP", dev);
+
+			if (action[aidx].conf) {
+				/* Mask is N/A for RAW_DECAP */
+				const struct flow_action_raw_decap *decap =
+					(const struct flow_action_raw_decap *)action[aidx].conf;
+
+				if (encap_decap_order != 0) {
+					NT_LOG(ERR, FILTER,
+						"ERROR: - RAW_ENCAP must follow RAW_DECAP.");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+					return -1;
+				}
+
+				if (decap->item_count < 2) {
+					NT_LOG(ERR, FILTER,
+						"ERROR: - RAW_DECAP must decap something.");
+					flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+					return -1;
+				}
+
+				encap_decap_order = 1;
+
+				switch (decap->items[decap->item_count - 2].type) {
+				case RTE_FLOW_ITEM_TYPE_ETH:
+				case RTE_FLOW_ITEM_TYPE_VLAN:
+					fd->header_strip_end_dyn = DYN_L3;
+					fd->header_strip_end_ofs = 0;
+					break;
+
+				case RTE_FLOW_ITEM_TYPE_IPV4:
+				case RTE_FLOW_ITEM_TYPE_IPV6:
+					fd->header_strip_end_dyn = DYN_L4;
+					fd->header_strip_end_ofs = 0;
+					break;
+
+				case RTE_FLOW_ITEM_TYPE_SCTP:
+				case RTE_FLOW_ITEM_TYPE_TCP:
+				case RTE_FLOW_ITEM_TYPE_UDP:
+				case RTE_FLOW_ITEM_TYPE_ICMP:
+				case RTE_FLOW_ITEM_TYPE_ICMP6:
+					fd->header_strip_end_dyn = DYN_L4_PAYLOAD;
+					fd->header_strip_end_ofs = 0;
+					break;
+
+				case RTE_FLOW_ITEM_TYPE_GTP:
+					fd->header_strip_end_dyn = DYN_TUN_L3;
+					fd->header_strip_end_ofs = 0;
+					break;
+
+				default:
+					fd->header_strip_end_dyn = DYN_L2;
+					fd->header_strip_end_ofs = 0;
+					break;
+				}
+			}
+
+			break;
+
 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
 			NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_MODIFY_FIELD", dev);
 			{
@@ -1765,6 +1961,174 @@  static int interpret_flow_elements(const struct flow_eth_dev *dev,
 
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTP:
+			NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_GTP",
+				dev->ndev->adapter_no, dev->port);
+			{
+				const struct rte_gtp_hdr *gtp_spec =
+					(const struct rte_gtp_hdr *)elem[eidx].spec;
+				const struct rte_gtp_hdr *gtp_mask =
+					(const struct rte_gtp_hdr *)elem[eidx].mask;
+
+				if (gtp_spec == NULL || gtp_mask == NULL) {
+					fd->tunnel_prot = PROT_TUN_GTPV1U;
+					break;
+				}
+
+				if (gtp_mask->gtp_hdr_info != 0 ||
+					gtp_mask->msg_type != 0 || gtp_mask->plen != 0) {
+					NT_LOG(ERR, FILTER,
+						"Requested GTP field not support by running SW version");
+					flow_nic_set_error(ERR_FAILED, error);
+					return -1;
+				}
+
+				if (gtp_mask->teid) {
+					if (sw_counter < 2) {
+						uint32_t *sw_data =
+							&packet_data[1 - sw_counter];
+						uint32_t *sw_mask =
+							&packet_mask[1 - sw_counter];
+
+						sw_mask[0] = ntohl(gtp_mask->teid);
+						sw_data[0] =
+							ntohl(gtp_spec->teid) & sw_mask[0];
+
+						km_add_match_elem(&fd->km, &sw_data[0],
+							&sw_mask[0], 1,
+							DYN_L4_PAYLOAD, 4);
+						set_key_def_sw(key_def, sw_counter,
+							DYN_L4_PAYLOAD, 4);
+						sw_counter += 1;
+
+					} else if (qw_counter < 2 && qw_free > 0) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+							qw_counter * 4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+							qw_counter * 4];
+
+						qw_data[0] = ntohl(gtp_spec->teid);
+						qw_data[1] = 0;
+						qw_data[2] = 0;
+						qw_data[3] = 0;
+
+						qw_mask[0] = ntohl(gtp_mask->teid);
+						qw_mask[1] = 0;
+						qw_mask[2] = 0;
+						qw_mask[3] = 0;
+
+						qw_data[0] &= qw_mask[0];
+						qw_data[1] &= qw_mask[1];
+						qw_data[2] &= qw_mask[2];
+						qw_data[3] &= qw_mask[3];
+
+						km_add_match_elem(&fd->km, &qw_data[0],
+							&qw_mask[0], 4,
+							DYN_L4_PAYLOAD, 4);
+						set_key_def_qw(key_def, qw_counter,
+							DYN_L4_PAYLOAD, 4);
+						qw_counter += 1;
+						qw_free -= 1;
+
+					} else {
+						NT_LOG(ERR, FILTER,
+							"Key size too big. Out of SW-QW resources.");
+						flow_nic_set_error(ERR_FAILED, error);
+						return -1;
+					}
+				}
+
+				fd->tunnel_prot = PROT_TUN_GTPV1U;
+			}
+
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_GTP_PSC",
+				dev->ndev->adapter_no, dev->port);
+			{
+				const struct rte_gtp_psc_generic_hdr *gtp_psc_spec =
+					(const struct rte_gtp_psc_generic_hdr *)elem[eidx].spec;
+				const struct rte_gtp_psc_generic_hdr *gtp_psc_mask =
+					(const struct rte_gtp_psc_generic_hdr *)elem[eidx].mask;
+
+				if (gtp_psc_spec == NULL || gtp_psc_mask == NULL) {
+					fd->tunnel_prot = PROT_TUN_GTPV1U;
+					break;
+				}
+
+				if (gtp_psc_mask->type != 0 ||
+					gtp_psc_mask->ext_hdr_len != 0) {
+					NT_LOG(ERR, FILTER,
+						"Requested GTP PSC field is not supported by running SW version");
+					flow_nic_set_error(ERR_FAILED, error);
+					return -1;
+				}
+
+				if (gtp_psc_mask->qfi) {
+					if (sw_counter < 2) {
+						uint32_t *sw_data =
+							&packet_data[1 - sw_counter];
+						uint32_t *sw_mask =
+							&packet_mask[1 - sw_counter];
+
+						sw_mask[0] = ntohl(gtp_psc_mask->qfi);
+						sw_data[0] = ntohl(gtp_psc_spec->qfi) &
+							sw_mask[0];
+
+						km_add_match_elem(&fd->km, &sw_data[0],
+							&sw_mask[0], 1,
+							DYN_L4_PAYLOAD, 14);
+						set_key_def_sw(key_def, sw_counter,
+							DYN_L4_PAYLOAD, 14);
+						sw_counter += 1;
+
+					} else if (qw_counter < 2 && qw_free > 0) {
+						uint32_t *qw_data =
+							&packet_data[2 + 4 -
+							qw_counter * 4];
+						uint32_t *qw_mask =
+							&packet_mask[2 + 4 -
+							qw_counter * 4];
+
+						qw_data[0] = ntohl(gtp_psc_spec->qfi);
+						qw_data[1] = 0;
+						qw_data[2] = 0;
+						qw_data[3] = 0;
+
+						qw_mask[0] = ntohl(gtp_psc_mask->qfi);
+						qw_mask[1] = 0;
+						qw_mask[2] = 0;
+						qw_mask[3] = 0;
+
+						qw_data[0] &= qw_mask[0];
+						qw_data[1] &= qw_mask[1];
+						qw_data[2] &= qw_mask[2];
+						qw_data[3] &= qw_mask[3];
+
+						km_add_match_elem(&fd->km, &qw_data[0],
+							&qw_mask[0], 4,
+							DYN_L4_PAYLOAD, 14);
+						set_key_def_qw(key_def, qw_counter,
+							DYN_L4_PAYLOAD, 14);
+						qw_counter += 1;
+						qw_free -= 1;
+
+					} else {
+						NT_LOG(ERR, FILTER,
+							"Key size too big. Out of SW-QW resources.");
+						flow_nic_set_error(ERR_FAILED, error);
+						return -1;
+					}
+				}
+
+				fd->tunnel_prot = PROT_TUN_GTPV1U;
+			}
+
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
 			NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_PORT_ID",
 				dev->ndev->adapter_no, dev->port);
@@ -1928,7 +2292,7 @@  static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
 	uint16_t forced_vlan_vid __rte_unused, uint16_t caller_id,
 	struct rte_flow_error *error, uint32_t port_id,
 	uint32_t num_dest_port __rte_unused, uint32_t num_queues __rte_unused,
-	uint32_t *packet_data __rte_unused, uint32_t *packet_mask __rte_unused,
+	uint32_t *packet_data, uint32_t *packet_mask __rte_unused,
 	struct flm_flow_key_def_s *key_def __rte_unused)
 {
 	struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
index b9d723c9dd..20b5cb2835 100644
--- a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -16,6 +16,224 @@ 
 rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;
 static struct rte_flow nt_flows[MAX_RTE_FLOWS];
 
+int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size, struct rte_flow_item *out)
+{
+	int hdri = 0;
+	int pkti = 0;
+
+	/* Ethernet */
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	if (size - pkti < (int)sizeof(struct rte_ether_hdr))
+		return -1;
+
+	out[hdri].type = RTE_FLOW_ITEM_TYPE_ETH;
+	out[hdri].spec = &data[pkti];
+	out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+	rte_be16_t ether_type = ((struct rte_ether_hdr *)&data[pkti])->ether_type;
+
+	hdri += 1;
+	pkti += sizeof(struct rte_ether_hdr);
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* VLAN */
+	while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+		ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||
+		ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {
+		if (size - pkti == 0)
+			goto interpret_end;
+
+		if (size - pkti < (int)sizeof(struct rte_vlan_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_VLAN;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_vlan_hdr);
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 3 */
+	uint8_t next_header = 0;
+
+	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) && (data[pkti] & 0xF0) == 0x40) {
+		if (size - pkti < (int)sizeof(struct rte_ipv4_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_IPV4;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 9];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv4_hdr);
+
+	} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&
+			(data[pkti] & 0xF0) == 0x60) {
+		if (size - pkti < (int)sizeof(struct rte_ipv6_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_IPV6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		next_header = data[pkti + 6];
+
+		hdri += 1;
+		pkti += sizeof(struct rte_ipv6_hdr);
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* Layer 4 */
+	int gtpu_encap = 0;
+
+	if (next_header == 1) {	/* ICMP */
+		if (size - pkti < (int)sizeof(struct rte_icmp_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_ICMP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+
+	} else if (next_header == 58) {	/* ICMP6 */
+		if (size - pkti < (int)sizeof(struct rte_flow_item_icmp6))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_ICMP6;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_icmp_hdr);
+
+	} else if (next_header == 6) {	/* TCP */
+		if (size - pkti < (int)sizeof(struct rte_tcp_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_TCP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_tcp_hdr);
+
+	} else if (next_header == 17) {	/* UDP */
+		if (size - pkti < (int)sizeof(struct rte_udp_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_UDP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==
+			rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+
+		hdri += 1;
+		pkti += sizeof(struct rte_udp_hdr);
+
+	} else if (next_header == 132) {/* SCTP */
+		if (size - pkti < (int)sizeof(struct rte_sctp_hdr))
+			return -1;
+
+		out[hdri].type = RTE_FLOW_ITEM_TYPE_SCTP;
+		out[hdri].spec = &data[pkti];
+		out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_sctp_hdr);
+
+	} else {
+		return -1;
+	}
+
+	if (size - pkti == 0)
+		goto interpret_end;
+
+	/* GTPv1-U */
+	if (gtpu_encap) {
+		if (size - pkti < (int)sizeof(struct rte_gtp_hdr))
+			return -1;
+
+		out[hdri]
+		.type = RTE_FLOW_ITEM_TYPE_GTP;
+		out[hdri]
+		.spec = &data[pkti];
+		out[hdri]
+		.mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+		int extension_present_bit = ((struct rte_gtp_hdr *)&data[pkti])
+			->e;
+
+		hdri += 1;
+		pkti += sizeof(struct rte_gtp_hdr);
+
+		if (extension_present_bit) {
+			if (size - pkti < (int)sizeof(struct rte_gtp_hdr_ext_word))
+				return -1;
+
+			out[hdri]
+			.type = RTE_FLOW_ITEM_TYPE_GTP;
+			out[hdri]
+			.spec = &data[pkti];
+			out[hdri]
+			.mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+			uint8_t next_ext = ((struct rte_gtp_hdr_ext_word *)&data[pkti])
+				->next_ext;
+
+			hdri += 1;
+			pkti += sizeof(struct rte_gtp_hdr_ext_word);
+
+			while (next_ext) {
+				size_t ext_len = data[pkti] * 4;
+
+				if (size - pkti < (int)ext_len)
+					return -1;
+
+				out[hdri]
+				.type = RTE_FLOW_ITEM_TYPE_GTP;
+				out[hdri]
+				.spec = &data[pkti];
+				out[hdri]
+				.mask = (preserve != NULL) ? &preserve[pkti] : NULL;
+
+				next_ext = data[pkti + ext_len - 1];
+
+				hdri += 1;
+				pkti += ext_len;
+			}
+		}
+	}
+
+	if (size - pkti != 0)
+		return -1;
+
+interpret_end:
+	out[hdri].type = RTE_FLOW_ITEM_TYPE_END;
+	out[hdri].spec = NULL;
+	out[hdri].mask = NULL;
+
+	return hdri + 1;
+}
+
 int convert_error(struct rte_flow_error *error, struct rte_flow_error *rte_flow_error)
 {
 	if (error) {
@@ -95,13 +313,78 @@  int create_match_elements(struct cnv_match_s *match, const struct rte_flow_item
 	return (type >= 0) ? 0 : -1;
 }
 
-int create_action_elements_inline(struct cnv_action_s *action __rte_unused,
-	const struct rte_flow_action actions[] __rte_unused,
-	int max_elem __rte_unused,
-	uint32_t queue_offset __rte_unused)
+int create_action_elements_inline(struct cnv_action_s *action,
+	const struct rte_flow_action actions[],
+	int max_elem,
+	uint32_t queue_offset)
 {
+	int aidx = 0;
 	int type = -1;
 
+	do {
+		type = actions[aidx].type;
+		if (type >= 0) {
+			action->flow_actions[aidx].type = type;
+
+			/*
+			 * Non-compatible actions handled here
+			 */
+			switch (type) {
+			case RTE_FLOW_ACTION_TYPE_RAW_DECAP: {
+				const struct rte_flow_action_raw_decap *decap =
+					(const struct rte_flow_action_raw_decap *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(decap->data, NULL, decap->size,
+					action->decap.items);
+
+				if (item_count < 0)
+					return item_count;
+				action->decap.data = decap->data;
+				action->decap.size = decap->size;
+				action->decap.item_count = item_count;
+				action->flow_actions[aidx].conf = &action->decap;
+			}
+			break;
+
+			case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: {
+				const struct rte_flow_action_raw_encap *encap =
+					(const struct rte_flow_action_raw_encap *)actions[aidx]
+					.conf;
+				int item_count = interpret_raw_data(encap->data, encap->preserve,
+					encap->size, action->encap.items);
+
+				if (item_count < 0)
+					return item_count;
+				action->encap.data = encap->data;
+				action->encap.preserve = encap->preserve;
+				action->encap.size = encap->size;
+				action->encap.item_count = item_count;
+				action->flow_actions[aidx].conf = &action->encap;
+			}
+			break;
+
+			case RTE_FLOW_ACTION_TYPE_QUEUE: {
+				const struct rte_flow_action_queue *queue =
+					(const struct rte_flow_action_queue *)actions[aidx].conf;
+				action->queue.index = queue->index + queue_offset;
+				action->flow_actions[aidx].conf = &action->queue;
+			}
+			break;
+
+			default: {
+				action->flow_actions[aidx].conf = actions[aidx].conf;
+			}
+			break;
+			}
+
+			aidx++;
+
+			if (aidx == max_elem)
+				return -1;
+		}
+
+	} while (type >= 0 && type != RTE_FLOW_ITEM_TYPE_END);
+
 	return (type >= 0) ? 0 : -1;
 }