[08/10] net/nfp: merge the xmit function of NFD3

Message ID 20230607015709.336420-9-chaoyong.he@corigine.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series support rte_flow for flower firmware with NFDk |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He June 7, 2023, 1:57 a.m. UTC
  The only different logic between the NFD3 xmit function of PF representor
port and PF is if the logic about metadata exists. So squash the NFD3
xmit function and use a parameter to distinguish them.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c | 124 +---------------------------
 drivers/net/nfp/nfd3/nfp_nfd3.h     |   4 +
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c  |  23 +++++-
 3 files changed, 24 insertions(+), 127 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 748b45b9ad..3bc863b9f1 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -490,129 +490,7 @@  nfp_flower_pf_nfd3_xmit_pkts(void *tx_queue,
 		struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts)
 {
-	int i;
-	int pkt_size;
-	int dma_size;
-	uint64_t dma_addr;
-	uint16_t free_descs;
-	uint16_t issued_descs;
-	struct rte_mbuf *pkt;
-	struct nfp_net_hw *hw;
-	struct rte_mbuf **lmbuf;
-	struct nfp_net_txq *txq;
-	struct nfp_net_nfd3_tx_desc txd;
-	struct nfp_net_nfd3_tx_desc *txds;
-
-	txq = tx_queue;
-	hw = txq->hw;
-	txds = &txq->txds[txq->wr_p];
-
-	PMD_TX_LOG(DEBUG, "working for queue %hu at pos %u and %hu packets",
-			txq->qidx, txq->wr_p, nb_pkts);
-
-	if ((nfp_net_nfd3_free_tx_desc(txq) < nb_pkts) || (nfp_net_nfd3_txq_full(txq)))
-		nfp_net_tx_free_bufs(txq);
-
-	free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
-	if (unlikely(free_descs == 0))
-		return 0;
-
-	pkt = *tx_pkts;
-	issued_descs = 0;
-
-	/* Sending packets */
-	for (i = 0; i < nb_pkts && free_descs > 0; i++) {
-		/* Grabbing the mbuf linked to the current descriptor */
-		lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-		/* Warming the cache for releasing the mbuf later on */
-		RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
-
-		pkt = *(tx_pkts + i);
-
-		if (unlikely(pkt->nb_segs > 1 &&
-				!(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-			PMD_INIT_LOG(ERR, "Multisegment packet not supported");
-			goto xmit_end;
-		}
-
-		/* Checking if we have enough descriptors */
-		if (unlikely(pkt->nb_segs > free_descs))
-			goto xmit_end;
-
-		/*
-		 * Checksum and VLAN flags just in the first descriptor for a
-		 * multisegment packet, but TSO info needs to be in all of them.
-		 */
-		txd.data_len = pkt->pkt_len;
-		nfp_net_nfd3_tx_tso(txq, &txd, pkt);
-		nfp_net_nfd3_tx_cksum(txq, &txd, pkt);
-
-		if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) &&
-				(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-			txd.flags |= NFD3_DESC_TX_VLAN;
-			txd.vlan = pkt->vlan_tci;
-		}
-
-		/*
-		 * mbuf data_len is the data in one segment and pkt_len data
-		 * in the whole packet. When the packet is just one segment,
-		 * then data_len = pkt_len
-		 */
-		pkt_size = pkt->pkt_len;
-
-		while (pkt != NULL && free_descs > 0) {
-			/* Copying TSO, VLAN and cksum info */
-			*txds = txd;
-
-			/* Releasing mbuf used by this descriptor previously*/
-			if (*lmbuf != NULL)
-				rte_pktmbuf_free_seg(*lmbuf);
-
-			/*
-			 * Linking mbuf with descriptor for being released
-			 * next time descriptor is used
-			 */
-			*lmbuf = pkt;
-
-			dma_size = pkt->data_len;
-			dma_addr = rte_mbuf_data_iova(pkt);
-
-			/* Filling descriptors fields */
-			txds->dma_len = dma_size;
-			txds->data_len = txd.data_len;
-			txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
-			txds->dma_addr_lo = (dma_addr & 0xffffffff);
-			free_descs--;
-
-			txq->wr_p++;
-			if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
-				txq->wr_p = 0;
-
-			pkt_size -= dma_size;
-
-			/*
-			 * Making the EOP, packets with just one segment
-			 * the priority
-			 */
-			if (likely(pkt_size == 0))
-				txds->offset_eop = NFD3_DESC_TX_EOP | FLOWER_PKT_DATA_OFFSET;
-			else
-				txds->offset_eop = 0;
-
-			pkt = pkt->next;
-			/* Referencing next free TX descriptor */
-			txds = &txq->txds[txq->wr_p];
-			lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-			issued_descs++;
-		}
-	}
-
-xmit_end:
-	/* Increment write pointers. Force memory write before we let HW know */
-	rte_wmb();
-	nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
-
-	return i;
+	return nfp_net_nfd3_xmit_pkts_common(tx_queue, tx_pkts, nb_pkts, true);
 }
 
 static void
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h
index 5bf89868fc..910e622fa2 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3.h
+++ b/drivers/net/nfp/nfd3/nfp_nfd3.h
@@ -155,6 +155,10 @@  nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq,
 
 uint32_t nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf,
 		uint32_t port_id);
+uint16_t nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
+		struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts,
+		bool repr_flag);
 uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue,
 		struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts);
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index 2ad098a699..ee39686329 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -109,10 +109,20 @@  uint16_t
 nfp_net_nfd3_xmit_pkts(void *tx_queue,
 		struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts)
+{
+	return nfp_net_nfd3_xmit_pkts_common(tx_queue, tx_pkts, nb_pkts, false);
+}
+
+uint16_t
+nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
+		struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts,
+		bool repr_flag)
 {
 	int i;
 	int pkt_size;
 	int dma_size;
+	uint8_t offset;
 	uint64_t dma_addr;
 	uint16_t free_descs;
 	uint16_t issued_descs;
@@ -122,7 +132,6 @@  nfp_net_nfd3_xmit_pkts(void *tx_queue,
 	struct nfp_net_txq *txq;
 	struct nfp_net_nfd3_tx_desc txd;
 	struct nfp_net_nfd3_tx_desc *txds;
-	struct nfp_net_meta_raw meta_data;
 
 	txq = tx_queue;
 	hw = txq->hw;
@@ -146,7 +155,6 @@  nfp_net_nfd3_xmit_pkts(void *tx_queue,
 
 	/* Sending packets */
 	for (i = 0; i < nb_pkts && free_descs > 0; i++) {
-		memset(&meta_data, 0, sizeof(meta_data));
 		/* Grabbing the mbuf linked to the current descriptor */
 		lmbuf = &txq->txbufs[txq->wr_p].mbuf;
 		/* Warming the cache for releasing the mbuf later on */
@@ -154,7 +162,14 @@  nfp_net_nfd3_xmit_pkts(void *tx_queue,
 
 		pkt = *(tx_pkts + i);
 
-		nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
+		if (!repr_flag) {
+			struct nfp_net_meta_raw meta_data;
+			memset(&meta_data, 0, sizeof(meta_data));
+			nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
+			offset = meta_data.length;
+		} else {
+			offset = FLOWER_PKT_DATA_OFFSET;
+		}
 
 		if (unlikely(pkt->nb_segs > 1 &&
 				(hw->cap & NFP_NET_CFG_CTRL_GATHER) == 0)) {
@@ -222,7 +237,7 @@  nfp_net_nfd3_xmit_pkts(void *tx_queue,
 				txds->offset_eop = 0;
 
 			/* Set the meta_len */
-			txds->offset_eop |= meta_data.length;
+			txds->offset_eop |= offset;
 
 			pkt = pkt->next;
 			/* Referencing next free TX descriptor */