[v2,08/14] net/idpf: add support for basic Rx/Tx datapath

Message ID 20220905105828.3190335-9-junfeng.guo@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Junfeng Guo Sept. 5, 2022, 10:58 a.m. UTC
  Add basic RX & TX support in split queue mode and single queue mode.
Split queue mode is selected by default.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |  47 +-
 drivers/net/idpf/idpf_rxtx.c   | 896 ++++++++++++++++++++++++++++++++-
 drivers/net/idpf/idpf_rxtx.h   |  28 ++
 3 files changed, 966 insertions(+), 5 deletions(-)
  

Comments

Andrew Rybchenko Oct. 3, 2022, 2:02 p.m. UTC | #1
On 9/5/22 13:58, Junfeng Guo wrote:
> Add basic RX & TX support in split queue mode and single queue mode.

RX -> Rx, TX -> Tx

> Split queue mode is selected by default.

What is split queue mode? Where is it defined/described?

The patch is very big again. Please, start from really basic Rx
and Tx functionality and then add offloads one by one.
Otherwise it is really hard to review everything at once.
  
Junfeng Guo Oct. 14, 2022, 9:18 a.m. UTC | #2
> -----Original Message-----
> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Sent: Monday, October 3, 2022 22:03
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>; Li, Xiaoyun
> <xiaoyun.li@intel.com>
> Subject: Re: [PATCH v2 08/14] net/idpf: add support for basic Rx/Tx
> datapath
> 
> On 9/5/22 13:58, Junfeng Guo wrote:
> > Add basic RX & TX support in split queue mode and single queue mode.
> 
> RX -> Rx, TX -> Tx
> 
> > Split queue mode is selected by default.
> 
> What is split queue mode? Where is it defined/described?
> 
> The patch is very big again. Please, start from really basic Rx
> and Tx functionality and then add offloads one by one.
> Otherwise it is really hard to review everything at once.

Sure, will split the basic Rx/Tx and the offloads, and also add
doc to describe the queue modes. Thanks!
  

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 2cbf2f0e19..6310745684 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -12,6 +12,8 @@ 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+#define IDPF_TX_SINGLE_Q	"tx_single"
+#define IDPF_RX_SINGLE_Q	"rx_single"
 #define IDPF_VPORT		"vport"
 
 struct idpf_adapter_list adapter_list;
@@ -20,6 +22,8 @@  bool adapter_list_init;
 uint64_t idpf_timestamp_dynflag;
 
 static const char * const idpf_valid_args[] = {
+	IDPF_TX_SINGLE_Q,
+	IDPF_RX_SINGLE_Q,
 	IDPF_VPORT,
 	NULL
 };
@@ -367,6 +371,9 @@  idpf_dev_start(struct rte_eth_dev *dev)
 		goto err_mtu;
 	}
 
+	idpf_set_rx_function(dev);
+	idpf_set_tx_function(dev);
+
 	if (idpf_ena_dis_vport(vport, true)) {
 		PMD_DRV_LOG(ERR, "Failed to enable vport");
 		goto err_vport;
@@ -393,6 +400,8 @@  idpf_dev_stop(struct rte_eth_dev *dev)
 	if (idpf_ena_dis_vport(vport, false))
 		PMD_DRV_LOG(ERR, "disable vport failed");
 
+	idpf_stop_queues(dev);
+
 	vport->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -519,6 +528,26 @@  parse_vport(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+	int *i = (int *)args;
+	char *end;
+	int num;
+
+	num = strtoul(value, &end, 10);
+
+	if (num != 0 && num != 1) {
+		PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", "
+			"value must be 0 or 1",
+			value, key);
+		return -1;
+	}
+
+	*i = num;
+	return 0;
+}
+
 static int
 idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 {
@@ -537,7 +566,20 @@  idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 
 	ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
 		adapter);
+	if (ret)
+		goto bail;
+
+	ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+				 &adapter->txq_model);
+	if (ret)
+		goto bail;
+
+	ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+				 &adapter->rxq_model);
+	if (ret)
+		goto bail;
 
+bail:
 	rte_kvargs_free(kvlist);
 	return ret;
 }
@@ -763,8 +805,11 @@  idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
 	/* for secondary processes, we don't initialise any further as primary
 	 * has already done this work.
 	 */
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		idpf_set_rx_function(dev);
+		idpf_set_tx_function(dev);
 		return ret;
+	}
 
 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 1c5c4688cc..54d83a7c61 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -990,10 +990,6 @@  idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
 		rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];
 		rxd->pkt_addr = dma_addr;
 		rxd->hdr_addr = 0;
-#ifndef RTE_LIBRTE_IDPF_16BYTE_RX_DESC
-		rxd->rsvd1 = 0;
-		rxd->rsvd2 = 0;
-#endif
 
 		rxq->sw_ring[i] = mbuf;
 	}
@@ -1287,3 +1283,895 @@  idpf_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
+#define IDPF_RX_ERR0_QW1					\
+	(BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |	\
+	 BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |	\
+	 BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |	\
+	 BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
+
+static inline uint64_t
+idpf_splitq_rx_csum_offload(uint8_t err)
+{
+	uint64_t flags = 0;
+
+	if (unlikely(!(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S))))
+		return flags;
+
+	if (likely((err & IDPF_RX_ERR0_QW1) == 0)) {
+		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+		return flags;
+	}
+
+	if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)))
+		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+	if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)))
+		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+	if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)))
+		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+	if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)))
+		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+
+	return flags;
+}
+
+#define IDPF_RX_FLEX_DESC_HASH1_S	0
+#define IDPF_RX_FLEX_DESC_HASH2_S	16
+#define IDPF_RX_FLEX_DESC_HASH3_S	24
+
+static inline uint64_t
+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
+			   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+	uint8_t status_err0_qw0;
+	uint64_t flags = 0;
+
+	status_err0_qw0 = rx_desc->status_err0_qw0;
+
+	if (status_err0_qw0 & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) {
+		flags |= RTE_MBUF_F_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_16(rx_desc->hash1) |
+			((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
+			 IDPF_RX_FLEX_DESC_HASH2_S) |
+			((uint32_t)(rx_desc->hash3) <<
+			 IDPF_RX_FLEX_DESC_HASH3_S);
+	}
+
+	return flags;
+}
+
+static void
+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
+{
+	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
+	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
+	uint16_t nb_refill = rx_bufq->nb_rx_hold;
+	uint16_t nb_desc = rx_bufq->nb_rx_desc;
+	uint16_t next_avail = rx_bufq->rx_tail;
+	struct rte_mbuf *nmb[nb_refill];
+	struct rte_eth_dev *dev;
+	uint64_t dma_addr;
+	uint16_t delta;
+
+	if (nb_refill <= rx_bufq->rx_free_thresh)
+		return;
+
+	if (nb_refill >= nb_desc)
+		nb_refill = nb_desc - 1;
+
+	rx_buf_ring =
+	       (volatile struct virtchnl2_splitq_rx_buf_desc *)rx_bufq->rx_ring;
+	delta = nb_desc - next_avail;
+	if (delta < nb_refill) {
+		if (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta))) {
+			for (int i = 0; i < delta; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rx_bufq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->hdr_addr = 0;
+				rx_buf_desc->pkt_addr = dma_addr;
+			}
+			nb_refill -= delta;
+			next_avail = 0;
+			rx_bufq->nb_rx_hold -= delta;
+		} else {
+			dev = &rte_eth_devices[rx_bufq->port_id];
+			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+				   rx_bufq->port_id, rx_bufq->queue_id);
+			return;
+		}
+	}
+
+	if (nb_desc - next_avail >= nb_refill) {
+		if (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill))) {
+			for (int i = 0; i < nb_refill; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rx_bufq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->hdr_addr = 0;
+				rx_buf_desc->pkt_addr = dma_addr;
+			}
+			next_avail += nb_refill;
+			rx_bufq->nb_rx_hold -= nb_refill;
+		} else {
+			dev = &rte_eth_devices[rx_bufq->port_id];
+			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+				   rx_bufq->port_id, rx_bufq->queue_id);
+		}
+	}
+
+	IECM_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
+
+	rx_bufq->rx_tail = next_avail;
+}
+
+uint16_t
+idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		      uint16_t nb_pkts)
+{
+	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
+	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+	uint16_t pktlen_gen_bufq_id;
+	struct idpf_rx_queue *rxq;
+	const uint32_t *ptype_tbl;
+	uint8_t status_err0_qw1;
+	struct rte_mbuf *rxm;
+	uint16_t rx_id_bufq1;
+	uint16_t rx_id_bufq2;
+	uint64_t pkt_flags;
+	uint16_t pkt_len;
+	uint16_t bufq_id;
+	uint16_t gen_id;
+	uint16_t rx_id;
+	uint16_t nb_rx;
+
+	nb_rx = 0;
+	rxq = (struct idpf_rx_queue *)rx_queue;
+
+	if (unlikely(!rxq) || unlikely(!rxq->q_started))
+		return nb_rx;
+
+	rx_id = rxq->rx_tail;
+	rx_id_bufq1 = rxq->bufq1->rx_next_avail;
+	rx_id_bufq2 = rxq->bufq2->rx_next_avail;
+	rx_desc_ring =
+	       (volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *)rxq->rx_ring;
+	ptype_tbl = rxq->adapter->ptype_tbl;
+
+	while (nb_rx < nb_pkts) {
+		rx_desc = &rx_desc_ring[rx_id];
+
+		pktlen_gen_bufq_id =
+			rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);
+		gen_id = (pktlen_gen_bufq_id &
+			  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>
+			  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;
+		if (gen_id != rxq->expected_gen_id)
+			break;
+
+		pkt_len = (pktlen_gen_bufq_id &
+			   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>
+			VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;
+		if (!pkt_len)
+			PMD_RX_LOG(ERR, "Packet length is 0");
+
+		rx_id++;
+		if (unlikely(rx_id == rxq->nb_rx_desc)) {
+			rx_id = 0;
+			rxq->expected_gen_id ^= 1;
+		}
+
+		bufq_id = (pktlen_gen_bufq_id &
+			   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>
+			VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;
+		if (!bufq_id) {
+			rxm = rxq->bufq1->sw_ring[rx_id_bufq1];
+			rx_id_bufq1++;
+			if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))
+				rx_id_bufq1 = 0;
+			rxq->bufq1->nb_rx_hold++;
+		} else {
+			rxm = rxq->bufq2->sw_ring[rx_id_bufq2];
+			rx_id_bufq2++;
+			if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))
+				rx_id_bufq2 = 0;
+			rxq->bufq2->nb_rx_hold++;
+		}
+
+		pkt_len -= rxq->crc_len;
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->data_off = RTE_PKTMBUF_HEADROOM;
+		rxm->next = NULL;
+		rxm->nb_segs = 1;
+		rxm->port = rxq->port_id;
+		rxm->ol_flags = 0;
+		rxm->packet_type =
+			ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &
+				   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>
+				  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];
+
+		status_err0_qw1 = rx_desc->status_err0_qw1;
+		pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);
+		pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);
+		rxm->ol_flags |= pkt_flags;
+
+		rx_pkts[nb_rx++] = rxm;
+	}
+
+	if (nb_rx) {
+		rxq->rx_tail = rx_id;
+		if (rx_id_bufq1 != rxq->bufq1->rx_next_avail)
+			rxq->bufq1->rx_next_avail = rx_id_bufq1;
+		if (rx_id_bufq2 != rxq->bufq2->rx_next_avail)
+			rxq->bufq2->rx_next_avail = rx_id_bufq2;
+
+		idpf_split_rx_bufq_refill(rxq->bufq1);
+		idpf_split_rx_bufq_refill(rxq->bufq2);
+	}
+
+	return nb_rx;
+}
+
+static inline void
+idpf_split_tx_free(struct idpf_tx_queue *cq)
+{
+	volatile struct iecm_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
+	volatile struct iecm_splitq_tx_compl_desc *txd;
+	uint16_t next = cq->tx_tail;
+	struct idpf_tx_entry *txe;
+	struct idpf_tx_queue *txq;
+	uint16_t gen, qid, q_head;
+	uint8_t ctype;
+
+	txd = &compl_ring[next];
+	gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+		IECM_TXD_COMPLQ_GEN_M) >> IECM_TXD_COMPLQ_GEN_S;
+	if (gen != cq->expected_gen_id)
+		return;
+
+	ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+		IECM_TXD_COMPLQ_COMPL_TYPE_M) >> IECM_TXD_COMPLQ_COMPL_TYPE_S;
+	qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+		IECM_TXD_COMPLQ_QID_M) >> IECM_TXD_COMPLQ_QID_S;
+	q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
+	txq = cq->txqs[qid - cq->tx_start_qid];
+
+	switch (ctype) {
+	case IECM_TXD_COMPLT_RE:
+		if (q_head == 0)
+			txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+		else
+			txq->last_desc_cleaned = q_head - 1;
+		if (unlikely(!(txq->last_desc_cleaned % 32))) {
+			PMD_DRV_LOG(ERR, "unexpected desc (head = %u) completion.",
+						q_head);
+			return;
+		}
+
+		break;
+	case IECM_TXD_COMPLT_RS:
+		txq->nb_free++;
+		txq->nb_used--;
+		txe = &txq->sw_ring[q_head];
+		if (txe->mbuf) {
+			rte_pktmbuf_free_seg(txe->mbuf);
+			txe->mbuf = NULL;
+		}
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown completion type.");
+		return;
+	}
+
+	if (++next == cq->nb_tx_desc) {
+		next = 0;
+		cq->expected_gen_id ^= 1;
+	}
+
+	cq->tx_tail = next;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+idpf_calc_context_desc(uint64_t flags)
+{
+	if (flags & RTE_MBUF_F_TX_TCP_SEG)
+		return 1;
+
+	return 0;
+}
+
+/* set TSO context descriptor
+ */
+static inline void
+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
+			union idpf_tx_offload tx_offload,
+			volatile union iecm_flex_tx_ctx_desc *ctx_desc)
+{
+	uint16_t cmd_dtype;
+	uint32_t tso_len;
+	uint8_t hdr_len;
+
+	if (!tx_offload.l4_len) {
+		PMD_TX_LOG(DEBUG, "L4 length set to 0");
+		return;
+	}
+
+	hdr_len = tx_offload.l2_len +
+		tx_offload.l3_len +
+		tx_offload.l4_len;
+	cmd_dtype = IECM_TX_DESC_DTYPE_FLEX_TSO_CTX |
+		IECM_TX_FLEX_CTX_DESC_CMD_TSO;
+	tso_len = mbuf->pkt_len - hdr_len;
+
+	ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
+	ctx_desc->tso.qw0.hdr_len = hdr_len;
+	ctx_desc->tso.qw0.mss_rt =
+		rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &
+				 IECM_TXD_FLEX_CTX_MSS_RT_M);
+	ctx_desc->tso.qw0.flex_tlen =
+		rte_cpu_to_le_32(tso_len &
+				 IECM_TXD_FLEX_CTX_MSS_RT_M);
+}
+
+uint16_t
+idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		      uint16_t nb_pkts)
+{
+	struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
+	volatile struct iecm_flex_tx_sched_desc *txr;
+	volatile struct iecm_flex_tx_sched_desc *txd;
+	struct idpf_tx_entry *sw_ring;
+	union idpf_tx_offload tx_offload = {0};
+	struct idpf_tx_entry *txe, *txn;
+	uint16_t nb_used, tx_id, sw_id;
+	struct rte_mbuf *tx_pkt;
+	uint16_t nb_to_clean;
+	uint16_t nb_tx = 0;
+	uint64_t ol_flags;
+	uint16_t nb_ctx;
+
+	if (unlikely(!txq) || unlikely(!txq->q_started))
+		return nb_tx;
+
+	txr = txq->desc_ring;
+	sw_ring = txq->sw_ring;
+	tx_id = txq->tx_tail;
+	sw_id = txq->sw_tail;
+	txe = &sw_ring[sw_id];
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = tx_pkts[nb_tx];
+
+		if (txq->nb_free <= txq->free_thresh) {
+			/* TODO: Need to refine
+			 * 1. free and clean: Better to decide a clean destination instead of
+			 * loop times. And don't free mbuf when RS got immediately, free when
+			 * transmit or according to the clean destination.
+			 * Now, just ingnore the RE write back, free mbuf when get RS
+			 * 2. out-of-order rewrite back haven't be supported, SW head and HW head
+			 * need to be separated.
+			 **/
+			nb_to_clean = 2 * txq->rs_thresh;
+			while (nb_to_clean--)
+				idpf_split_tx_free(txq->complq);
+		}
+
+		if (txq->nb_free < tx_pkt->nb_segs)
+			break;
+
+		ol_flags = tx_pkt->ol_flags;
+		tx_offload.l2_len = tx_pkt->l2_len;
+		tx_offload.l3_len = tx_pkt->l3_len;
+		tx_offload.l4_len = tx_pkt->l4_len;
+		tx_offload.tso_segsz = tx_pkt->tso_segsz;
+		/* Calculate the number of context descriptors needed. */
+		nb_ctx = idpf_calc_context_desc(ol_flags);
+		nb_used = tx_pkt->nb_segs + nb_ctx;
+
+		/* context descriptor */
+		if (nb_ctx) {
+			volatile union iecm_flex_tx_ctx_desc *ctx_desc =
+			(volatile union iecm_flex_tx_ctx_desc *)&txr[tx_id];
+
+			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+				idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
+							ctx_desc);
+
+			tx_id++;
+			if (tx_id == txq->nb_tx_desc)
+				tx_id = 0;
+		}
+
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+			txe->mbuf = tx_pkt;
+
+			/* Setup TX descriptor */
+			txd->buf_addr =
+				rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
+			txd->qw1.cmd_dtype =
+				rte_cpu_to_le_16(IECM_TX_DESC_DTYPE_FLEX_FLOW_SCHE);
+			txd->qw1.rxr_bufsize = tx_pkt->data_len;
+			txd->qw1.compl_tag = sw_id;
+			tx_id++;
+			if (tx_id == txq->nb_tx_desc)
+				tx_id = 0;
+			sw_id = txe->next_id;
+			txe = txn;
+			tx_pkt = tx_pkt->next;
+		} while (tx_pkt);
+
+		/* fill the last descriptor with End of Packet (EOP) bit */
+		txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_EOP;
+
+		if (unlikely(!(tx_id % 32)))
+			txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_RE;
+		if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)
+			txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_CS_EN;
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+	}
+
+	/* update the tail pointer if any packets were processed */
+	if (likely(nb_tx)) {
+		IECM_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+		txq->tx_tail = tx_id;
+		txq->sw_tail = sw_id;
+	}
+
+	return nb_tx;
+}
+
+static inline void
+idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,
+		    uint16_t rx_id)
+{
+	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+
+	if (nb_hold > rxq->rx_free_thresh) {
+		PMD_RX_LOG(DEBUG,
+			   "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
+			   rxq->port_id, rxq->queue_id, rx_id, nb_hold);
+		rx_id = (uint16_t)((rx_id == 0) ?
+			(rxq->nb_rx_desc - 1) : (rx_id - 1));
+		IECM_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+		nb_hold = 0;
+	}
+	rxq->nb_rx_hold = nb_hold;
+}
+
+uint16_t
+idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		       uint16_t nb_pkts)
+{
+	volatile union virtchnl2_rx_desc *rx_ring;
+	volatile union virtchnl2_rx_desc *rxdp;
+	union virtchnl2_rx_desc rxd;
+	struct idpf_rx_queue *rxq;
+	const uint32_t *ptype_tbl;
+	uint16_t rx_id, nb_hold;
+	struct rte_eth_dev *dev;
+	uint16_t rx_packet_len;
+	struct rte_mbuf *rxm;
+	struct rte_mbuf *nmb;
+	uint16_t rx_status0;
+	uint64_t dma_addr;
+	uint16_t nb_rx;
+
+	nb_rx = 0;
+	nb_hold = 0;
+	rxq = rx_queue;
+
+	if (unlikely(!rxq) || unlikely(!rxq->q_started))
+		return nb_rx;
+
+	rx_id = rxq->rx_tail;
+	rx_ring = rxq->rx_ring;
+	ptype_tbl = rxq->adapter->ptype_tbl;
+
+	while (nb_rx < nb_pkts) {
+		rxdp = &rx_ring[rx_id];
+		rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);
+
+		/* Check the DD bit first */
+		if (!(rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)))
+			break;
+
+		nmb = rte_mbuf_raw_alloc(rxq->mp);
+		if (unlikely(!nmb)) {
+			dev = &rte_eth_devices[rxq->port_id];
+			dev->data->rx_mbuf_alloc_failed++;
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", rxq->port_id, rxq->queue_id);
+			break;
+		}
+		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
+
+		nb_hold++;
+		rxm = rxq->sw_ring[rx_id];
+		rxq->sw_ring[rx_id] = nmb;
+		rx_id++;
+		if (unlikely(rx_id == rxq->nb_rx_desc))
+			rx_id = 0;
+
+		/* Prefetch next mbuf */
+		rte_prefetch0(rxq->sw_ring[rx_id]);
+
+		/* When next RX descriptor is on a cache line boundary,
+		 * prefetch the next 4 RX descriptors and next 8 pointers
+		 * to mbufs.
+		 */
+		if ((rx_id & 0x3) == 0) {
+			rte_prefetch0(&rx_ring[rx_id]);
+			rte_prefetch0(rxq->sw_ring[rx_id]);
+		}
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+		rxdp->read.hdr_addr = 0;
+		rxdp->read.pkt_addr = dma_addr;
+
+		rx_packet_len = (rte_cpu_to_le_16(rxd.flex_nic_wb.pkt_len) &
+				 VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M) -
+				rxq->crc_len;
+
+		rxm->data_off = RTE_PKTMBUF_HEADROOM;
+		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = rx_packet_len;
+		rxm->data_len = rx_packet_len;
+		rxm->port = rxq->port_id;
+		rxm->ol_flags = 0;
+		rxm->packet_type =
+			ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &
+				VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];
+		rx_pkts[nb_rx++] = rxm;
+	}
+	rxq->rx_tail = rx_id;
+
+	idpf_update_rx_tail(rxq, nb_hold, rx_id);
+
+	return nb_rx;
+}
+
+static inline int
+idpf_xmit_cleanup(struct idpf_tx_queue *txq)
+{
+	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+	struct idpf_tx_entry *sw_ring = txq->sw_ring;
+	uint16_t nb_tx_desc = txq->nb_tx_desc;
+	uint16_t desc_to_clean_to;
+	uint16_t nb_tx_to_clean;
+
+	volatile struct iecm_base_tx_desc *txd = txq->tx_ring;
+
+	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
+	if (desc_to_clean_to >= nb_tx_desc)
+		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+	if ((txd[desc_to_clean_to].qw1 &
+			rte_cpu_to_le_64(IECM_TXD_QW1_DTYPE_M)) !=
+			rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE)) {
+		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
+			   "(port=%d queue=%d)", desc_to_clean_to,
+			   txq->port_id, txq->queue_id);
+		return -1;
+	}
+
+	if (last_desc_cleaned > desc_to_clean_to)
+		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+					    desc_to_clean_to);
+	else
+		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+					last_desc_cleaned);
+
+	txd[desc_to_clean_to].qw1 = 0;
+
+	txq->last_desc_cleaned = desc_to_clean_to;
+	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
+
+	return 0;
+}
+
+/* set TSO context descriptor
+ * support IP -> L4 and IP -> IP -> L4
+ */
+static inline uint64_t
+idpf_set_tso_ctx(struct rte_mbuf *mbuf, union idpf_tx_offload tx_offload)
+{
+	uint64_t ctx_desc = 0;
+	uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+	if (!tx_offload.l4_len) {
+		PMD_TX_LOG(DEBUG, "L4 length set to 0");
+		return ctx_desc;
+	}
+
+	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
+
+	cd_cmd = IECM_TX_CTX_DESC_TSO;
+	cd_tso_len = mbuf->pkt_len - hdr_len;
+	ctx_desc |= ((uint64_t)cd_cmd << IECM_TXD_CTX_QW1_CMD_S) |
+		     ((uint64_t)cd_tso_len << IECM_TXD_CTX_QW1_TSO_LEN_S) |
+		     ((uint64_t)mbuf->tso_segsz << IECM_TXD_CTX_QW1_MSS_S);
+
+	return ctx_desc;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+idpf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size)
+{
+	return rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DATA |
+				((uint64_t)td_cmd  << IECM_TXD_QW1_CMD_S) |
+				((uint64_t)td_offset <<
+				 IECM_TXD_QW1_OFFSET_S) |
+				((uint64_t)size  <<
+				 IECM_TXD_QW1_TX_BUF_SZ_S));
+}
+
+/* TX function */
+uint16_t
+idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	volatile struct iecm_base_tx_desc *txd;
+	volatile struct iecm_base_tx_desc *txr;
+	union idpf_tx_offload tx_offload = {0};
+	struct idpf_tx_entry *txe, *txn;
+	struct idpf_tx_entry *sw_ring;
+	struct idpf_tx_queue *txq;
+	struct rte_mbuf *tx_pkt;
+	struct rte_mbuf *m_seg;
+	uint64_t buf_dma_addr;
+	uint32_t td_offset;
+	uint64_t ol_flags;
+	uint16_t tx_last;
+	uint16_t nb_used;
+	uint16_t nb_ctx;
+	uint32_t td_cmd;
+	uint16_t tx_id;
+	uint16_t nb_tx;
+	uint16_t slen;
+
+	nb_tx = 0;
+	txq = tx_queue;
+
+	if (unlikely(!txq) || unlikely(!txq->q_started))
+		return nb_tx;
+
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+	tx_id = txq->tx_tail;
+	txe = &sw_ring[tx_id];
+
+	/* Check if the descriptor ring needs to be cleaned. */
+	if (txq->nb_free < txq->free_thresh)
+		(void)idpf_xmit_cleanup(txq);
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		td_cmd = 0;
+		td_offset = 0;
+
+		tx_pkt = *tx_pkts++;
+		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+		ol_flags = tx_pkt->ol_flags;
+		tx_offload.l2_len = tx_pkt->l2_len;
+		tx_offload.l3_len = tx_pkt->l3_len;
+		tx_offload.l4_len = tx_pkt->l4_len;
+		tx_offload.tso_segsz = tx_pkt->tso_segsz;
+		/* Calculate the number of context descriptors needed. */
+		nb_ctx = idpf_calc_context_desc(ol_flags);
+
+		/* The number of descriptors that must be allocated for
+		 * a packet equals to the number of the segments of that
+		 * packet plus 1 context descriptor if needed.
+		 */
+		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+		tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+		/* Circular ring */
+		if (tx_last >= txq->nb_tx_desc)
+			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
+			   " tx_first=%u tx_last=%u",
+			   txq->port_id, txq->queue_id, tx_id, tx_last);
+
+		if (nb_used > txq->nb_free) {
+			if (idpf_xmit_cleanup(txq)) {
+				if (nb_tx == 0)
+					return 0;
+				goto end_of_tx;
+			}
+			if (unlikely(nb_used > txq->rs_thresh)) {
+				while (nb_used > txq->nb_free) {
+					if (idpf_xmit_cleanup(txq)) {
+						if (nb_tx == 0)
+							return 0;
+						goto end_of_tx;
+					}
+				}
+			}
+		}
+
+		/* According to datasheet, the bit2 is reserved and must be
+		 * set to 1.
+		 */
+		td_cmd |= 0x04;
+
+		if (nb_ctx) {
+			/* Setup TX context descriptor if required */
+			volatile union iecm_flex_tx_ctx_desc *ctx_txd =
+				(volatile union iecm_flex_tx_ctx_desc *)
+							&txr[tx_id];
+
+			txn = &sw_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+			}
+
+			/* TSO enabled */
+			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+				idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
+							ctx_txd);
+
+			txe->last_id = tx_last;
+			tx_id = txe->next_id;
+			txe = txn;
+		}
+
+		m_seg = tx_pkt;
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+
+			if (txe->mbuf)
+				rte_pktmbuf_free_seg(txe->mbuf);
+			txe->mbuf = m_seg;
+
+			/* Setup TX Descriptor */
+			slen = m_seg->data_len;
+			buf_dma_addr = rte_mbuf_data_iova(m_seg);
+			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+			txd->qw1 = idpf_build_ctob(td_cmd, td_offset, slen);
+
+			txe->last_id = tx_last;
+			tx_id = txe->next_id;
+			txe = txn;
+			m_seg = m_seg->next;
+		} while (m_seg);
+
+		/* The last packet data descriptor needs End Of Packet (EOP) */
+		td_cmd |= IECM_TX_DESC_CMD_EOP;
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+
+		if (txq->nb_used >= txq->rs_thresh) {
+			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
+				   "%4u (port=%d queue=%d)",
+				   tx_last, txq->port_id, txq->queue_id);
+
+			td_cmd |= IECM_TX_DESC_CMD_RS;
+
+			/* Update txq RS bit counters */
+			txq->nb_used = 0;
+		}
+
+		txd->qw1 |=
+			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+					 IECM_TXD_QW1_CMD_S);
+	}
+
+end_of_tx:
+	rte_wmb();
+
+	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+
+	IECM_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+	txq->tx_tail = tx_id;
+
+	return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+	       uint16_t nb_pkts)
+{
+	int i, ret;
+	uint64_t ol_flags;
+	struct rte_mbuf *m;
+
+	for (i = 0; i < nb_pkts; i++) {
+		m = tx_pkts[i];
+		ol_flags = m->ol_flags;
+
+		/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */
+		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+			if (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {
+				rte_errno = EINVAL;
+				return i;
+			}
+		} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||
+			   (m->tso_segsz > IDPF_MAX_TSO_MSS)) {
+			/* MSS outside the range are considered malicious */
+			rte_errno = EINVAL;
+			return i;
+		}
+
+		if (ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = ENOTSUP;
+			return i;
+		}
+
+		if (!m->pkt_len) {
+			rte_errno = EINVAL;
+			return i;
+		}
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+		ret = rte_validate_tx_offload(m);
+		if (ret != 0) {
+			rte_errno = -ret;
+			return i;
+		}
+#endif
+		ret = rte_net_intel_cksum_prepare(m);
+		if (ret != 0) {
+			rte_errno = -ret;
+			return i;
+		}
+	}
+
+	return i;
+}
+
+void
+idpf_set_rx_function(struct rte_eth_dev *dev)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		dev->rx_pkt_burst = idpf_splitq_recv_pkts;
+		return;
+	} else {
+		dev->rx_pkt_burst = idpf_singleq_recv_pkts;
+		return;
+	}
+}
+
+void
+idpf_set_tx_function(struct rte_eth_dev *dev)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		dev->tx_pkt_burst = idpf_splitq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_prep_pkts;
+		return;
+	} else {
+		dev->tx_pkt_burst = idpf_singleq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_prep_pkts;
+		return;
+	}
+}
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index f2947a8492..3ccf9efe50 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -43,6 +43,22 @@ 
 #define IDPF_TSO_MAX_SEG	UINT8_MAX
 #define IDPF_TX_MAX_MTU_SEG     8
 
+#define IDPF_TX_CKSUM_OFFLOAD_MASK (		\
+		RTE_MBUF_F_TX_IP_CKSUM |	\
+		RTE_MBUF_F_TX_L4_MASK |		\
+		RTE_MBUF_F_TX_TCP_SEG)
+
+#define IDPF_TX_OFFLOAD_MASK (			\
+		RTE_MBUF_F_TX_OUTER_IPV6 |	\
+		RTE_MBUF_F_TX_OUTER_IPV4 |	\
+		RTE_MBUF_F_TX_IPV6 |		\
+		RTE_MBUF_F_TX_IPV4 |		\
+		RTE_MBUF_F_TX_VLAN |		\
+		RTE_MBUF_F_TX_IP_CKSUM |	\
+		RTE_MBUF_F_TX_L4_MASK |		\
+		RTE_MBUF_F_TX_TCP_SEG |		\
+		RTE_ETH_TX_OFFLOAD_SECURITY)
+
 #define IDPF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
 
@@ -187,8 +203,20 @@  int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
+uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+				uint16_t nb_pkts);
+uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts);
+uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+				uint16_t nb_pkts);
+uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts);
+uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			uint16_t nb_pkts);
 void idpf_stop_queues(struct rte_eth_dev *dev);
 
+void idpf_set_rx_function(struct rte_eth_dev *dev);
+void idpf_set_tx_function(struct rte_eth_dev *dev);
 
 const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);