diff mbox series

[08/44] net/cnxk: add Tx queue setup and release

Message ID 20210306153404.10781-9-ndabilpuram@marvell.com (mailing list archive)
State Superseded
Delegated to: Jerin Jacob
Headers show
Series Marvell CNXK Ethdev Driver | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Nithin Dabilpuram March 6, 2021, 3:33 p.m. UTC
Add Tx queue setup and release for CN9K and CN10K.
Release is common while setup is platform dependent due
to differences in fast path Tx queue structures.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 doc/guides/nics/features/cnxk.ini     |  1 +
 doc/guides/nics/features/cnxk_vec.ini |  1 +
 doc/guides/nics/features/cnxk_vf.ini  |  1 +
 drivers/net/cnxk/cn10k_ethdev.c       | 71 +++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_ethdev.h       | 12 +++++
 drivers/net/cnxk/cn10k_tx.h           | 13 +++++
 drivers/net/cnxk/cn9k_ethdev.c        | 69 ++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev.h        | 10 ++++
 drivers/net/cnxk/cn9k_tx.h            | 13 +++++
 drivers/net/cnxk/cnxk_ethdev.c        | 98 +++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev.h        |  3 ++
 11 files changed, 292 insertions(+)
 create mode 100644 drivers/net/cnxk/cn10k_tx.h
 create mode 100644 drivers/net/cnxk/cn9k_tx.h
diff mbox series

Patch

diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
index a9d2b03..462d7c4 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -11,6 +11,7 @@  Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini
index 6a8ca1f..09e0d3a 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -11,6 +11,7 @@  Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini
index f761638..4a93a35 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -10,6 +10,7 @@  Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
 Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index f7e2f7b..e194b13 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -2,6 +2,76 @@ 
  * Copyright(C) 2021 Marvell.
  */
 #include "cn10k_ethdev.h"
+#include "cn10k_tx.h"
+
+static void
+nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
+		      uint16_t qid)
+{
+	struct nix_send_ext_s *send_hdr_ext;
+	union nix_send_hdr_w0_u send_hdr_w0;
+	union nix_send_sg_s sg_w0;
+
+	RTE_SET_USED(dev);
+
+	/* Initialize the fields based on basic single segment packet */
+	memset(&txq->cmd, 0, sizeof(txq->cmd));
+	send_hdr_w0.u = 0;
+	sg_w0.u = 0;
+
+	if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+		/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+		send_hdr_w0.sizem1 = 2;
+
+		send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
+		send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+	} else {
+		/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+		send_hdr_w0.sizem1 = 1;
+	}
+
+	send_hdr_w0.sq = qid;
+	sg_w0.subdc = NIX_SUBDC_SG;
+	sg_w0.segs = 1;
+	sg_w0.ld_type = NIX_SENDLDTYPE_LDD;
+
+	txq->send_hdr_w0 = send_hdr_w0.u;
+	txq->sg_w0 = sg_w0.u;
+
+	rte_wmb();
+}
+
+static int
+cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+			 uint16_t nb_desc, unsigned int socket,
+			 const struct rte_eth_txconf *tx_conf)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_eth_txq *txq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	RTE_SET_USED(socket);
+
+	/* Common Tx queue setup */
+	rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
+				     sizeof(struct cn10k_eth_txq), tx_conf);
+	if (rc)
+		return rc;
+
+	sq = &dev->sqs[qid];
+	/* Update fast path queue */
+	txq = eth_dev->data->tx_queues[qid];
+	txq->fc_mem = sq->fc;
+	/* Store lmt base in tx queue for easy access */
+	txq->lmt_base = dev->nix.lmt_base;
+	txq->io_addr = sq->io_addr;
+	txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+	txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
+
+	nix_form_default_desc(dev, txq, qid);
+	return 0;
+}
 
 static int
 cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
@@ -76,6 +146,7 @@  nix_eth_dev_ops_override(void)
 
 	/* Update platform specific ops */
 	cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
+	cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
 	cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
 }
 
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index 08e11bb..2157b16 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -6,6 +6,18 @@ 
 
 #include <cnxk_ethdev.h>
 
+struct cn10k_eth_txq {
+	uint64_t send_hdr_w0;
+	uint64_t sg_w0;
+	int64_t fc_cache_pkts;
+	uint64_t *fc_mem;
+	uintptr_t lmt_base;
+	rte_iova_t io_addr;
+	uint16_t sqes_per_sqb_log2;
+	int16_t nb_sqb_bufs_adj;
+	uint64_t cmd[4];
+} __plt_cache_aligned;
+
 struct cn10k_eth_rxq {
 	uint64_t mbuf_initializer;
 	uintptr_t desc;
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
new file mode 100644
index 0000000..39d4755
--- /dev/null
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef __CN10K_TX_H__
+#define __CN10K_TX_H__
+
+#define NIX_TX_OFFLOAD_VLAN_QINQ_F    BIT(2)
+#define NIX_TX_OFFLOAD_TSO_F	      BIT(4)
+
+#define NIX_TX_NEED_EXT_HDR                                                    \
+	(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+
+#endif /* __CN10K_TX_H__ */
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 79c30aa..e97ce15 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -2,6 +2,74 @@ 
  * Copyright(C) 2021 Marvell.
  */
 #include "cn9k_ethdev.h"
+#include "cn9k_tx.h"
+
+static void
+nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
+		      uint16_t qid)
+{
+	struct nix_send_ext_s *send_hdr_ext;
+	struct nix_send_hdr_s *send_hdr;
+	union nix_send_sg_s *sg;
+
+	RTE_SET_USED(dev);
+
+	/* Initialize the fields based on basic single segment packet */
+	memset(&txq->cmd, 0, sizeof(txq->cmd));
+
+	if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+		send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+		/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+		send_hdr->w0.sizem1 = 2;
+
+		send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
+		send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+		sg = (union nix_send_sg_s *)&txq->cmd[4];
+	} else {
+		send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+		/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+		send_hdr->w0.sizem1 = 1;
+		sg = (union nix_send_sg_s *)&txq->cmd[2];
+	}
+
+	send_hdr->w0.sq = qid;
+	sg->subdc = NIX_SUBDC_SG;
+	sg->segs = 1;
+	sg->ld_type = NIX_SENDLDTYPE_LDD;
+
+	rte_wmb();
+}
+
+static int
+cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+			uint16_t nb_desc, unsigned int socket,
+			const struct rte_eth_txconf *tx_conf)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn9k_eth_txq *txq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	RTE_SET_USED(socket);
+
+	/* Common Tx queue setup */
+	rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
+				     sizeof(struct cn9k_eth_txq), tx_conf);
+	if (rc)
+		return rc;
+
+	sq = &dev->sqs[qid];
+	/* Update fast path queue */
+	txq = eth_dev->data->tx_queues[qid];
+	txq->fc_mem = sq->fc;
+	txq->lmt_addr = sq->lmt_addr;
+	txq->io_addr = sq->io_addr;
+	txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+	txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
+
+	nix_form_default_desc(dev, txq, qid);
+	return 0;
+}
 
 static int
 cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
@@ -87,6 +155,7 @@  nix_eth_dev_ops_override(void)
 
 	/* Update platform specific ops */
 	cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
+	cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
 	cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
 }
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 6384609..9ebf68f 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -6,6 +6,16 @@ 
 
 #include <cnxk_ethdev.h>
 
+struct cn9k_eth_txq {
+	uint64_t cmd[8];
+	int64_t fc_cache_pkts;
+	uint64_t *fc_mem;
+	void *lmt_addr;
+	rte_iova_t io_addr;
+	uint16_t sqes_per_sqb_log2;
+	int16_t nb_sqb_bufs_adj;
+} __plt_cache_aligned;
+
 struct cn9k_eth_rxq {
 	uint64_t mbuf_initializer;
 	uint64_t data_off;
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
new file mode 100644
index 0000000..bb6379b
--- /dev/null
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef __CN9K_TX_H__
+#define __CN9K_TX_H__
+
+#define NIX_TX_OFFLOAD_VLAN_QINQ_F    BIT(2)
+#define NIX_TX_OFFLOAD_TSO_F	      BIT(4)
+
+#define NIX_TX_NEED_EXT_HDR                                                    \
+	(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+
+#endif /* __CN9K_TX_H__ */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index dc41f78..5772345 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -66,6 +66,103 @@  cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
 	return *tmp;
 }
 
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+	/*
+	 * Maximum three segments can be supported with W8, Choose
+	 * NIX_MAXSQESZ_W16 for multi segment offload.
+	 */
+	if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+		return NIX_MAXSQESZ_W16;
+	else
+		return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+			uint16_t nb_desc, uint16_t fp_tx_q_sz,
+			const struct rte_eth_txconf *tx_conf)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct cnxk_eth_txq_sp *txq_sp;
+	struct roc_nix_sq *sq;
+	size_t txq_sz;
+	int rc;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (eth_dev->data->tx_queues[qid] != NULL) {
+		plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+		dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+		eth_dev->data->tx_queues[qid] = NULL;
+	}
+
+	/* Setup ROC SQ */
+	sq = &dev->sqs[qid];
+	sq->qid = qid;
+	sq->nb_desc = nb_desc;
+	sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+	rc = roc_nix_sq_init(&dev->nix, sq);
+	if (rc) {
+		plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+		return rc;
+	}
+
+	rc = -ENOMEM;
+	txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+	txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+	if (!txq_sp) {
+		plt_err("Failed to alloc tx queue mem");
+		rc |= roc_nix_sq_fini(sq);
+		return rc;
+	}
+
+	txq_sp->dev = dev;
+	txq_sp->qid = qid;
+	txq_sp->qconf.conf.tx = *tx_conf;
+	txq_sp->qconf.nb_desc = nb_desc;
+
+	plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+		    " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+		    qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+		    sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+	/* Store start of fast path area */
+	eth_dev->data->tx_queues[qid] = txq_sp + 1;
+	eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(void *txq)
+{
+	struct cnxk_eth_txq_sp *txq_sp;
+	struct cnxk_eth_dev *dev;
+	struct roc_nix_sq *sq;
+	uint16_t qid;
+	int rc;
+
+	if (!txq)
+		return;
+
+	txq_sp = ((struct cnxk_eth_txq_sp *)txq) - 1;
+	dev = txq_sp->dev;
+	qid = txq_sp->qid;
+
+	plt_nix_dbg("Releasing txq %u", qid);
+
+	/* Cleanup ROC SQ */
+	sq = &dev->sqs[qid];
+	rc = roc_nix_sq_fini(sq);
+	if (rc)
+		plt_err("Failed to cleanup sq, rc=%d", rc);
+
+	/* Finally free */
+	plt_free(txq_sp);
+}
+
 int
 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			uint16_t nb_desc, uint16_t fp_rx_q_sz,
@@ -726,6 +823,7 @@  cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 struct eth_dev_ops cnxk_eth_dev_ops = {
 	.dev_infos_get = cnxk_nix_info_get,
 	.link_update = cnxk_nix_link_update,
+	.tx_queue_release = cnxk_nix_tx_queue_release,
 	.rx_queue_release = cnxk_nix_rx_queue_release,
 };
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index e938c64..90c8ff6 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -182,6 +182,9 @@  int cnxk_nix_remove(struct rte_pci_device *pci_dev);
 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
 		      struct rte_eth_dev_info *dev_info);
 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
+int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+			    uint16_t nb_desc, uint16_t fp_tx_q_sz,
+			    const struct rte_eth_txconf *tx_conf);
 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			    uint16_t nb_desc, uint16_t fp_rx_q_sz,
 			    const struct rte_eth_rxconf *rx_conf,