[07/25] net/nfp: prepare for the decap action of IPv4 UDP tunnel

Message ID 1666063359-34283-8-git-send-email-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add the extend rte_flow offload support of nfp PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 18, 2022, 3:22 a.m. UTC
  Add the related data structure and functions, prepare for
the decap action of IPv4 UDP tunnel.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_cmsg.c | 118 ++++++++
 drivers/net/nfp/flower/nfp_flower_cmsg.h |  94 +++++++
 drivers/net/nfp/nfp_flow.c               | 461 ++++++++++++++++++++++++++++++-
 drivers/net/nfp/nfp_flow.h               |  17 ++
 4 files changed, 675 insertions(+), 15 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index 8983178..f18f3de 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -304,3 +304,121 @@ 
 
 	return 0;
 }
+
+int
+nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower)
+{
+	uint16_t cnt;
+	uint32_t count = 0;
+	struct rte_mbuf *mbuf;
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv4_addr_entry *entry;
+	struct nfp_flower_cmsg_tun_ipv4_addr *msg;
+
+	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+	if (mbuf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr");
+		return -ENOMEM;
+	}
+
+	msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_IPS, sizeof(*msg));
+
+	priv = app_fw_flower->flow_priv;
+	rte_spinlock_lock(&priv->ipv4_off_lock);
+	LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+			rte_spinlock_unlock(&priv->ipv4_off_lock);
+			PMD_DRV_LOG(ERR, "IPv4 offload exceeds limit.");
+			return -ERANGE;
+		}
+		msg->ipv4_addr[count] = entry->ipv4_addr;
+		count++;
+	}
+	msg->count = rte_cpu_to_be_32(count);
+	rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+	cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+	if (cnt == 0) {
+		PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+		rte_pktmbuf_free(mbuf);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_fl_rule_metadata *nfp_flow_meta,
+		uint16_t mac_idx,
+		bool is_del)
+{
+	uint16_t cnt;
+	struct rte_mbuf *mbuf;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_cmsg_pre_tun_rule *msg;
+
+	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+	if (mbuf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule");
+		return -ENOMEM;
+	}
+
+	msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, sizeof(*msg));
+
+	meta_tci = (struct nfp_flower_meta_tci *)((char *)nfp_flow_meta +
+			sizeof(struct nfp_fl_rule_metadata));
+	if (meta_tci->tci)
+		msg->vlan_tci = meta_tci->tci;
+	else
+		msg->vlan_tci = 0xffff;
+
+	if (is_del)
+		msg->flags = rte_cpu_to_be_32(NFP_TUN_PRE_TUN_RULE_DEL);
+
+	msg->port_idx = rte_cpu_to_be_16(mac_idx);
+	msg->host_ctx_id = nfp_flow_meta->host_ctx_id;
+
+	cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+	if (cnt == 0) {
+		PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+		rte_pktmbuf_free(mbuf);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_ether_addr *mac,
+		uint16_t mac_idx,
+		bool is_del)
+{
+	uint16_t cnt;
+	struct rte_mbuf *mbuf;
+	struct nfp_flower_cmsg_tun_mac *msg;
+
+	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+	if (mbuf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac");
+		return -ENOMEM;
+	}
+
+	msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_MAC, sizeof(*msg));
+
+	msg->count = rte_cpu_to_be_16(1);
+	msg->index = rte_cpu_to_be_16(mac_idx);
+	rte_ether_addr_copy(mac, &msg->addr);
+	if (is_del)
+		msg->flags = rte_cpu_to_be_16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
+
+	cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+	if (cnt == 0) {
+		PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+		rte_pktmbuf_free(mbuf);
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index d1e0562..0933dac 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -195,6 +195,91 @@  struct nfp_flower_cmsg_tun_neigh_v6 {
 	struct nfp_flower_tun_neigh common;
 };
 
+#define NFP_TUN_PRE_TUN_RULE_DEL    (1 << 0)
+#define NFP_TUN_PRE_TUN_IDX_BIT     (1 << 3)
+#define NFP_TUN_PRE_TUN_IPV6_BIT    (1 << 7)
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE
+ * Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *       +---------------------------------------------------------------+
+ *     0 |                             FLAGS                             |
+ *       +---------------------------------------------------------------+
+ *     1 |         MAC_IDX               |            VLAN_ID            |
+ *       +---------------------------------------------------------------+
+ *     2 |                           HOST_CTX                            |
+ *       +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_pre_tun_rule {
+	rte_be32_t flags;
+	rte_be16_t port_idx;
+	rte_be16_t vlan_tci;
+	rte_be32_t host_ctx_id;
+};
+
+#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG  0x2
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_MAC
+ *     Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ *    -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *    Word  +-----------------------+---+-+-+---------------+---------------+
+ *       0  |              spare    |NBI|D|F| Amount of MAC’s in this msg   |
+ *          +---------------+-------+---+-+-+---------------+---------------+
+ *       1  |            Index 0            |     MAC[0]    |     MAC[1]    |
+ *          +---------------+---------------+---------------+---------------+
+ *       2  |     MAC[2]    |     MAC[3]    |     MAC[4]    |     MAC[5]    |
+ *          +---------------+---------------+---------------+---------------+
+ *       3  |            Index 1            |     MAC[0]    |     MAC[1]    |
+ *          +---------------+---------------+---------------+---------------+
+ *       4  |     MAC[2]    |     MAC[3]    |     MAC[4]    |     MAC[5]    |
+ *          +---------------+---------------+---------------+---------------+
+ *                                        ...
+ *          +---------------+---------------+---------------+---------------+
+ *     2N-1 |            Index N            |     MAC[0]    |     MAC[1]    |
+ *          +---------------+---------------+---------------+---------------+
+ *     2N   |     MAC[2]    |     MAC[3]    |     MAC[4]    |     MAC[5]    |
+ *          +---------------+---------------+---------------+---------------+
+ *
+ *    F:   Flush bit. Set if entire table must be flushed. Rest of info in cmsg
+ *        will be ignored. Not implemented.
+ *    D:   Delete bit. Set if entry must be deleted instead of added
+ *    NBI: Network Block Interface. Set to 0
+ *    The amount of MAC’s per control message is limited only by the packet
+ *    buffer size. A 2048B buffer can fit 253 MAC address and a 10240B buffer
+ *    1277 MAC addresses.
+ */
+struct nfp_flower_cmsg_tun_mac {
+	rte_be16_t flags;
+	rte_be16_t count;           /**< Should always be 1 */
+	rte_be16_t index;
+	struct rte_ether_addr addr;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX        32
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_IPS
+ *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ *    -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *          +---------------------------------------------------------------+
+ *        0 |                    Number of IP Addresses                     |
+ *          +---------------------------------------------------------------+
+ *        1 |                        IP Address #1                          |
+ *          +---------------------------------------------------------------+
+ *        2 |                        IP Address #2                          |
+ *          +---------------------------------------------------------------+
+ *          |                             ...                               |
+ *          +---------------------------------------------------------------+
+ *       32 |                        IP Address #32                         |
+ *          +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_tun_ipv4_addr {
+	rte_be32_t count;
+	rte_be32_t ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
 /*
  * NFP_FLOWER_CMSG_TYPE_FLOW_STATS
  *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
@@ -716,5 +801,14 @@  int nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_flower_cmsg_tun_neigh_v4 *payload);
 int nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_flower_cmsg_tun_neigh_v6 *payload);
+int nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_fl_rule_metadata *nfp_flow_meta,
+		uint16_t mac_idx,
+		bool is_del);
+int nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_ether_addr *mac,
+		uint16_t mac_idx,
+		bool is_del);
 
 #endif /* _NFP_CMSG_H_ */
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index b9c37b6..816c733 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -47,7 +47,8 @@  struct nfp_flow_item_proc {
 	/* Size in bytes for @p mask_support and @p mask_default. */
 	const unsigned int mask_sz;
 	/* Merge a pattern item into a flow rule handle. */
-	int (*merge)(struct rte_flow *nfp_flow,
+	int (*merge)(struct nfp_app_fw_flower *app_fw_flower,
+			struct rte_flow *nfp_flow,
 			char **mbuf_off,
 			const struct rte_flow_item *item,
 			const struct nfp_flow_item_proc *proc,
@@ -63,6 +64,12 @@  struct nfp_mask_id_entry {
 	uint8_t mask_id;
 };
 
+struct nfp_pre_tun_entry {
+	uint16_t mac_index;
+	uint16_t ref_cnt;
+	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+} __rte_aligned(32);
+
 static inline struct nfp_flow_priv *
 nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
 {
@@ -417,6 +424,83 @@  struct nfp_mask_id_entry {
 	return 0;
 }
 
+__rte_unused static int
+nfp_tun_add_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
+		rte_be32_t ipv4)
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv4_addr_entry *entry;
+	struct nfp_ipv4_addr_entry *tmp_entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv4_off_lock);
+	LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+		if (entry->ipv4_addr == ipv4) {
+			entry->ref_count++;
+			rte_spinlock_unlock(&priv->ipv4_off_lock);
+			return 0;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+	tmp_entry = rte_zmalloc("nfp_ipv4_off", sizeof(struct nfp_ipv4_addr_entry), 0);
+	if (tmp_entry == NULL) {
+		PMD_DRV_LOG(ERR, "Mem error when offloading IP address.");
+		return -ENOMEM;
+	}
+
+	tmp_entry->ipv4_addr = ipv4;
+	tmp_entry->ref_count = 1;
+
+	rte_spinlock_lock(&priv->ipv4_off_lock);
+	LIST_INSERT_HEAD(&priv->ipv4_off_list, tmp_entry, next);
+	rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+	return nfp_flower_cmsg_tun_off_v4(app_fw_flower);
+}
+
+static int
+nfp_tun_del_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
+		rte_be32_t ipv4)
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv4_addr_entry *entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv4_off_lock);
+	LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+		if (entry->ipv4_addr == ipv4) {
+			entry->ref_count--;
+			if (entry->ref_count == 0) {
+				LIST_REMOVE(entry, next);
+				rte_free(entry);
+				rte_spinlock_unlock(&priv->ipv4_off_lock);
+				return nfp_flower_cmsg_tun_off_v4(app_fw_flower);
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+	return 0;
+}
+
+static int
+nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr,
+		struct rte_flow *nfp_flow)
+{
+	int ret;
+	struct nfp_flower_ipv4_udp_tun *udp4;
+
+	udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
+			sizeof(struct nfp_flower_ipv4_udp_tun));
+	ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+
+	return ret;
+}
+
 static void
 nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
 {
@@ -653,6 +737,9 @@  struct nfp_mask_id_entry {
 		case RTE_FLOW_ACTION_TYPE_RSS:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_RSS detected");
 			break;
+		case RTE_FLOW_ACTION_TYPE_JUMP:
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_JUMP detected");
+			break;
 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_PORT_ID detected");
 			key_ls->act_size += sizeof(struct nfp_fl_act_output);
@@ -804,7 +891,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow,
+nfp_flow_merge_eth(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		__rte_unused struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -841,7 +929,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_vlan(struct rte_flow *nfp_flow,
+nfp_flow_merge_vlan(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		__rte_unused char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -871,7 +960,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_ipv4(struct rte_flow *nfp_flow,
+nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -932,7 +1022,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_ipv6(struct rte_flow *nfp_flow,
+nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -997,7 +1088,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_tcp(struct rte_flow *nfp_flow,
+nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -1070,7 +1162,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_udp(struct rte_flow *nfp_flow,
+nfp_flow_merge_udp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -1118,7 +1211,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_sctp(struct rte_flow *nfp_flow,
+nfp_flow_merge_sctp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -1160,7 +1254,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_vxlan(struct rte_flow *nfp_flow,
+nfp_flow_merge_vxlan(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -1409,7 +1504,8 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_compile_item_proc(const struct rte_flow_item items[],
+nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
+		const struct rte_flow_item items[],
 		struct rte_flow *nfp_flow,
 		char **mbuf_off_exact,
 		char **mbuf_off_mask,
@@ -1420,6 +1516,7 @@  struct nfp_mask_id_entry {
 	bool continue_flag = true;
 	const struct rte_flow_item *item;
 	const struct nfp_flow_item_proc *proc_list;
+	struct nfp_app_fw_flower *app_fw_flower = repr->app_fw_flower;
 
 	proc_list = nfp_flow_item_proc_list;
 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END && continue_flag; ++item) {
@@ -1455,14 +1552,14 @@  struct nfp_mask_id_entry {
 			break;
 		}
 
-		ret = proc->merge(nfp_flow, mbuf_off_exact, item,
+		ret = proc->merge(app_fw_flower, nfp_flow, mbuf_off_exact, item,
 				proc, false, is_outer_layer);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR, "nfp flow item %d exact merge failed", item->type);
 			break;
 		}
 
-		ret = proc->merge(nfp_flow, mbuf_off_mask, item,
+		ret = proc->merge(app_fw_flower, nfp_flow, mbuf_off_mask, item,
 				proc, true, is_outer_layer);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR, "nfp flow item %d mask merge failed", item->type);
@@ -1476,7 +1573,7 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_compile_items(__rte_unused struct nfp_flower_representor *representor,
+nfp_flow_compile_items(struct nfp_flower_representor *representor,
 		const struct rte_flow_item items[],
 		struct rte_flow *nfp_flow)
 {
@@ -1507,7 +1604,7 @@  struct nfp_mask_id_entry {
 		is_outer_layer = false;
 
 	/* Go over items */
-	ret = nfp_flow_compile_item_proc(loop_item, nfp_flow,
+	ret = nfp_flow_compile_item_proc(representor, loop_item, nfp_flow,
 			&mbuf_off_exact, &mbuf_off_mask, is_outer_layer);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "nfp flow item compile failed.");
@@ -1516,7 +1613,7 @@  struct nfp_mask_id_entry {
 
 	/* Go over inner items */
 	if (is_tun_flow) {
-		ret = nfp_flow_compile_item_proc(items, nfp_flow,
+		ret = nfp_flow_compile_item_proc(representor, items, nfp_flow,
 				&mbuf_off_exact, &mbuf_off_mask, true);
 		if (ret != 0) {
 			PMD_DRV_LOG(ERR, "nfp flow outer item compile failed.");
@@ -1891,6 +1988,59 @@  struct nfp_mask_id_entry {
 	return nfp_flower_cmsg_tun_neigh_v4_rule(app_fw_flower, &payload);
 }
 
+__rte_unused static int
+nfp_flower_add_tun_neigh_v4_decap(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow)
+{
+	struct nfp_fl_tun *tmp;
+	struct nfp_fl_tun *tun;
+	struct nfp_flow_priv *priv;
+	struct nfp_flower_ipv4 *ipv4;
+	struct nfp_flower_mac_mpls *eth;
+	struct nfp_flower_in_port *port;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_cmsg_tun_neigh_v4 payload;
+
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	port = (struct nfp_flower_in_port *)(meta_tci + 1);
+	eth = (struct nfp_flower_mac_mpls *)(port + 1);
+
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+		ipv4 = (struct nfp_flower_ipv4 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls) +
+				sizeof(struct nfp_flower_tp_ports));
+	else
+		ipv4 = (struct nfp_flower_ipv4 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls));
+
+	tun = &nfp_flow->tun;
+	tun->payload.v6_flag = 0;
+	tun->payload.dst.dst_ipv4 = ipv4->ipv4_src;
+	tun->payload.src.src_ipv4 = ipv4->ipv4_dst;
+	memcpy(tun->payload.dst_addr, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(tun->payload.src_addr, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+
+	tun->ref_cnt = 1;
+	priv = app_fw_flower->flow_priv;
+	LIST_FOREACH(tmp, &priv->nn_list, next) {
+		if (memcmp(&tmp->payload, &tun->payload, sizeof(struct nfp_fl_tun_entry)) == 0) {
+			tmp->ref_cnt++;
+			return 0;
+		}
+	}
+
+	LIST_INSERT_HEAD(&priv->nn_list, tun, next);
+
+	memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v4));
+	payload.dst_ipv4 = ipv4->ipv4_src;
+	payload.src_ipv4 = ipv4->ipv4_dst;
+	memcpy(payload.common.dst_mac, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(payload.common.src_mac, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+	payload.common.port_id = port->in_port;
+
+	return nfp_flower_cmsg_tun_neigh_v4_rule(app_fw_flower, &payload);
+}
+
 static int
 nfp_flower_del_tun_neigh_v4(struct nfp_app_fw_flower *app_fw_flower,
 		rte_be32_t ipv4)
@@ -2108,6 +2258,200 @@  struct nfp_mask_id_entry {
 				actions, vxlan_data, nfp_flow_meta, tun);
 }
 
+static struct nfp_pre_tun_entry *
+nfp_pre_tun_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_pre_tun_entry *mac_index;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	index = rte_hash_lookup_data(priv->pre_tun_table, &hash_key, (void **)&mac_index);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table");
+		return NULL;
+	}
+
+	return mac_index;
+}
+
+static bool
+nfp_pre_tun_table_add(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	ret = rte_hash_add_key_data(priv->pre_tun_table, &hash_key, hash_data);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to pre tunnel table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_pre_tun_table_delete(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	ret = rte_hash_del_key(priv->pre_tun_table, &hash_key);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed");
+		return false;
+	}
+
+	return true;
+}
+
+__rte_unused static int
+nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr,
+		uint16_t *index)
+{
+	uint16_t i;
+	uint32_t entry_size;
+	uint16_t mac_index = 1;
+	struct nfp_flow_priv *priv;
+	struct nfp_pre_tun_entry *entry;
+	struct nfp_pre_tun_entry *find_entry;
+
+	priv = repr->app_fw_flower->flow_priv;
+	if (priv->pre_tun_cnt >= NFP_TUN_PRE_TUN_RULE_LIMIT) {
+		PMD_DRV_LOG(ERR, "Pre tunnel table has full");
+		return -EINVAL;
+	}
+
+	entry_size = sizeof(struct nfp_pre_tun_entry);
+	entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
+	if (entry == NULL) {
+		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+		return -ENOMEM;
+	}
+
+	entry->ref_cnt = 1U;
+	memcpy(entry->mac_addr, repr->mac_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
+
+	/* 0 is considered a failed match */
+	for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+		if (priv->pre_tun_bitmap[i] == 0)
+			continue;
+		entry->mac_index = i;
+		find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
+		if (find_entry != NULL) {
+			find_entry->ref_cnt++;
+			*index = find_entry->mac_index;
+			rte_free(entry);
+			return 0;
+		}
+	}
+
+	for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+		if (priv->pre_tun_bitmap[i] == 0) {
+			priv->pre_tun_bitmap[i] = 1U;
+			mac_index = i;
+			break;
+		}
+	}
+
+	entry->mac_index = mac_index;
+	if (!nfp_pre_tun_table_add(priv, (char *)entry, entry_size)) {
+		rte_free(entry);
+		return -EINVAL;
+	}
+
+	*index = entry->mac_index;
+	priv->pre_tun_cnt++;
+	return 0;
+}
+
+static int
+nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
+		struct rte_flow *nfp_flow)
+{
+	uint16_t i;
+	int ret = 0;
+	uint32_t entry_size;
+	uint16_t nfp_mac_idx;
+	struct nfp_flow_priv *priv;
+	struct nfp_pre_tun_entry *entry;
+	struct nfp_pre_tun_entry *find_entry;
+	struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+	priv = repr->app_fw_flower->flow_priv;
+	if (priv->pre_tun_cnt == 1)
+		return 0;
+
+	entry_size = sizeof(struct nfp_pre_tun_entry);
+	entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
+	if (entry == NULL) {
+		PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+		return -ENOMEM;
+	}
+
+	entry->ref_cnt = 1U;
+	memcpy(entry->mac_addr, repr->mac_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
+
+	/* 0 is considered a failed match */
+	for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+		if (priv->pre_tun_bitmap[i] == 0)
+			continue;
+		entry->mac_index = i;
+		find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
+		if (find_entry != NULL) {
+			find_entry->ref_cnt--;
+			if (find_entry->ref_cnt != 0)
+				goto free_entry;
+			priv->pre_tun_bitmap[i] = 0;
+			break;
+		}
+	}
+
+	nfp_flow_meta = nfp_flow->payload.meta;
+	nfp_mac_idx = (find_entry->mac_index << 8) |
+			NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT |
+			NFP_TUN_PRE_TUN_IDX_BIT;
+	ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr,
+			nfp_mac_idx, true);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Send tunnel mac rule failed");
+		ret = -EINVAL;
+		goto free_entry;
+	}
+
+	ret = nfp_flower_cmsg_pre_tunnel_rule(repr->app_fw_flower, nfp_flow_meta,
+			nfp_mac_idx, true);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Send pre tunnel rule failed");
+		ret = -EINVAL;
+		goto free_entry;
+	}
+
+	find_entry->ref_cnt = 1U;
+	if (!nfp_pre_tun_table_delete(priv, (char *)find_entry, entry_size)) {
+		PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed");
+		ret = -EINVAL;
+		goto free_entry;
+	}
+
+	rte_free(entry);
+	rte_free(find_entry);
+	priv->pre_tun_cnt--;
+
+free_entry:
+	rte_free(entry);
+
+	return ret;
+}
+
 static int
 nfp_flow_compile_action(struct nfp_flower_representor *representor,
 		const struct rte_flow_action actions[],
@@ -2149,6 +2493,9 @@  struct nfp_mask_id_entry {
 		case RTE_FLOW_ACTION_TYPE_RSS:
 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_RSS");
 			break;
+		case RTE_FLOW_ACTION_TYPE_JUMP:
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_JUMP");
+			break;
 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
 			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_PORT_ID");
 			ret = nfp_flow_action_output(position, action, nfp_flow_meta);
@@ -2569,6 +2916,15 @@  struct nfp_mask_id_entry {
 		/* Delete the entry from nn table */
 		ret = nfp_flower_del_tun_neigh(app_fw_flower, nfp_flow);
 		break;
+	case NFP_FLOW_DECAP:
+		/* Delete the entry from nn table */
+		ret = nfp_flower_del_tun_neigh(app_fw_flower, nfp_flow);
+		if (ret != 0)
+			goto exit;
+
+		/* Delete the entry in pre tunnel table */
+		ret = nfp_pre_tun_table_check_del(representor, nfp_flow);
+		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid nfp flow type %d.", nfp_flow->type);
 		ret = -EINVAL;
@@ -2578,6 +2934,10 @@  struct nfp_mask_id_entry {
 	if (ret != 0)
 		goto exit;
 
+	/* Delete the ip off */
+	if (nfp_flow_is_tunnel(nfp_flow))
+		nfp_tun_check_ip_off_del(representor, nfp_flow);
+
 	/* Delete the flow from hardware */
 	if (nfp_flow->install_flag) {
 		ret = nfp_flower_cmsg_flow_delete(app_fw_flower, nfp_flow);
@@ -2707,6 +3067,49 @@  struct nfp_mask_id_entry {
 	return 0;
 }
 
+static int
+nfp_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+		struct rte_flow_tunnel *tunnel,
+		struct rte_flow_action **pmd_actions,
+		uint32_t *num_of_actions,
+		__rte_unused struct rte_flow_error *err)
+{
+	struct rte_flow_action *nfp_action;
+
+	nfp_action = rte_zmalloc("nfp_tun_action", sizeof(struct rte_flow_action), 0);
+	if (nfp_action == NULL) {
+		PMD_DRV_LOG(ERR, "Alloc memory for nfp tunnel action failed.");
+		return -ENOMEM;
+	}
+
+	switch (tunnel->type) {
+	default:
+		*pmd_actions = NULL;
+		*num_of_actions = 0;
+		rte_free(nfp_action);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nfp_flow_tunnel_action_decap_release(__rte_unused struct rte_eth_dev *dev,
+		struct rte_flow_action *pmd_actions,
+		uint32_t num_of_actions,
+		__rte_unused struct rte_flow_error *err)
+{
+	uint32_t i;
+	struct rte_flow_action *nfp_action;
+
+	for (i = 0; i < num_of_actions; i++) {
+		nfp_action = &pmd_actions[i];
+		rte_free(nfp_action);
+	}
+
+	return 0;
+}
+
 static const struct rte_flow_ops nfp_flow_ops = {
 	.validate                    = nfp_flow_validate,
 	.create                      = nfp_flow_create,
@@ -2715,6 +3118,8 @@  struct nfp_mask_id_entry {
 	.query                       = nfp_flow_query,
 	.tunnel_match                = nfp_flow_tunnel_match,
 	.tunnel_item_release         = nfp_flow_tunnel_item_release,
+	.tunnel_decap_set            = nfp_flow_tunnel_decap_set,
+	.tunnel_action_decap_release = nfp_flow_tunnel_action_decap_release,
 };
 
 int
@@ -2759,6 +3164,15 @@  struct nfp_mask_id_entry {
 		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
 	};
 
+	struct rte_hash_parameters pre_tun_hash_params = {
+		.name       = "pre_tunnel_table",
+		.entries    = 32,
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
 	ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl,
 			"CONFIG_FC_HOST_CTX_COUNT", &ret);
 	if (ret < 0) {
@@ -2839,11 +3253,27 @@  struct nfp_mask_id_entry {
 		goto free_mask_table;
 	}
 
+	/* pre tunnel table */
+	priv->pre_tun_cnt = 1;
+	pre_tun_hash_params.hash_func_init_val = priv->hash_seed;
+	priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params);
+	if (priv->pre_tun_table == NULL) {
+		PMD_INIT_LOG(ERR, "Pre tunnel table creation failed");
+		ret = -ENOMEM;
+		goto free_flow_table;
+	}
+
+	/* ipv4 off list */
+	rte_spinlock_init(&priv->ipv4_off_lock);
+	LIST_INIT(&priv->ipv4_off_list);
+
 	/* neighbor next list */
 	LIST_INIT(&priv->nn_list);
 
 	return 0;
 
+free_flow_table:
+	rte_hash_free(priv->flow_table);
 free_mask_table:
 	rte_free(priv->mask_table);
 free_stats:
@@ -2867,6 +3297,7 @@  struct nfp_mask_id_entry {
 	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
 	priv = app_fw_flower->flow_priv;
 
+	rte_hash_free(priv->pre_tun_table);
 	rte_hash_free(priv->flow_table);
 	rte_hash_free(priv->mask_table);
 	rte_free(priv->stats);
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 14da800..84a3005 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -6,6 +6,7 @@ 
 #ifndef _NFP_FLOW_H_
 #define _NFP_FLOW_H_
 
+#include <sys/queue.h>
 #include <ethdev_driver.h>
 
 #define NFP_FLOWER_LAYER_EXT_META   (1 << 0)
@@ -92,6 +93,7 @@  enum nfp_flower_tun_type {
 enum nfp_flow_type {
 	NFP_FLOW_COMMON,
 	NFP_FLOW_ENCAP,
+	NFP_FLOW_DECAP,
 };
 
 struct nfp_fl_key_ls {
@@ -168,6 +170,14 @@  struct nfp_fl_stats {
 	uint64_t bytes;
 };
 
+struct nfp_ipv4_addr_entry {
+	LIST_ENTRY(nfp_ipv4_addr_entry) next;
+	rte_be32_t ipv4_addr;
+	int ref_count;
+};
+
+#define NFP_TUN_PRE_TUN_RULE_LIMIT  32
+
 struct nfp_flow_priv {
 	uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
 	uint64_t flower_version; /**< Flow version, always increase. */
@@ -183,6 +193,13 @@  struct nfp_flow_priv {
 	struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */
 	struct nfp_fl_stats *stats; /**< Store stats of flow. */
 	rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */
+	/* pre tunnel rule */
+	uint16_t pre_tun_cnt; /**< The size of pre tunnel rule */
+	uint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */
+	struct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */
+	/* IPv4 off */
+	LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */
+	rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */
 	/* neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
 };