diff mbox series

[v5,16/24] net/ngbe: add Tx queue setup and release

Message ID 20210602094108.1575640-17-jiawenwu@trustnetic.com (mailing list archive)
State Changes Requested
Delegated to: Andrew Rybchenko
Headers show
Series net: ngbe PMD | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Jiawen Wu June 2, 2021, 9:41 a.m. UTC
Setup device Tx queue and release Tx queue.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ngbe/ngbe_ethdev.c |   2 +
 drivers/net/ngbe/ngbe_ethdev.h |   6 +
 drivers/net/ngbe/ngbe_rxtx.c   | 212 +++++++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_rxtx.h   |  91 ++++++++++++++
 4 files changed, 311 insertions(+)

Comments

Andrew Rybchenko June 14, 2021, 6:59 p.m. UTC | #1
On 6/2/21 12:41 PM, Jiawen Wu wrote:
> Setup device Tx queue and release Tx queue.
> 
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
>   drivers/net/ngbe/ngbe_ethdev.c |   2 +
>   drivers/net/ngbe/ngbe_ethdev.h |   6 +
>   drivers/net/ngbe/ngbe_rxtx.c   | 212 +++++++++++++++++++++++++++++++++
>   drivers/net/ngbe/ngbe_rxtx.h   |  91 ++++++++++++++
>   4 files changed, 311 insertions(+)
> 
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 8eb41a7a2b..2f8ac48f33 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -663,6 +663,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
>   	.link_update                = ngbe_dev_link_update,
>   	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
>   	.rx_queue_release           = ngbe_dev_rx_queue_release,
> +	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
> +	.tx_queue_release           = ngbe_dev_tx_queue_release,
>   };
>   
>   RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index c324ca7e0f..f52d813a47 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -57,11 +57,17 @@ struct ngbe_adapter {
>   
>   void ngbe_dev_rx_queue_release(void *rxq);
>   
> +void ngbe_dev_tx_queue_release(void *txq);
> +
>   int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>   		uint16_t nb_rx_desc, unsigned int socket_id,
>   		const struct rte_eth_rxconf *rx_conf,
>   		struct rte_mempool *mb_pool);
>   
> +int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
> +		uint16_t nb_tx_desc, unsigned int socket_id,
> +		const struct rte_eth_txconf *tx_conf);
> +
>   int
>   ngbe_dev_link_update_share(struct rte_eth_dev *dev,
>   		int wait_to_complete);
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index 9992983bef..2d8db3245f 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -15,6 +15,99 @@
>   #include "ngbe_ethdev.h"
>   #include "ngbe_rxtx.h"
>   
> +#ifndef DEFAULT_TX_FREE_THRESH
> +#define DEFAULT_TX_FREE_THRESH 32
> +#endif

The define definitely belongs to a header, since
it should be reported in dev_info.

> +
> +/*********************************************************************
> + *
> + *  Queue management functions
> + *
> + **********************************************************************/
> +
> +static void __rte_cold
> +ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
> +{
> +	unsigned int i;
> +
> +	if (txq->sw_ring != NULL) {
> +		for (i = 0; i < txq->nb_tx_desc; i++) {
> +			if (txq->sw_ring[i].mbuf != NULL) {
> +				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
> +				txq->sw_ring[i].mbuf = NULL;
> +			}
> +		}
> +	}
> +}
> +
> +static void __rte_cold
> +ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
> +{
> +	if (txq != NULL &&
> +	    txq->sw_ring != NULL)

Check for txq->sw_ring is not required.

> +		rte_free(txq->sw_ring);
> +}
> +
> +static void __rte_cold
> +ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
> +{
> +	if (txq != NULL && txq->ops != NULL) {
> +		txq->ops->release_mbufs(txq);
> +		txq->ops->free_swring(txq);
> +		rte_free(txq);

Shouldn't we free txq even if ops is NULL?

> +	}
> +}
> +
> +void __rte_cold
> +ngbe_dev_tx_queue_release(void *txq)
> +{
> +	ngbe_tx_queue_release(txq);
> +}
> +
> +/* (Re)set dynamic ngbe_tx_queue fields to defaults */
> +static void __rte_cold
> +ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
> +{
> +	static const struct ngbe_tx_desc zeroed_desc = {0};
> +	struct ngbe_tx_entry *txe = txq->sw_ring;
> +	uint16_t prev, i;
> +
> +	/* Zero out HW ring memory */
> +	for (i = 0; i < txq->nb_tx_desc; i++)
> +		txq->tx_ring[i] = zeroed_desc;
> +
> +	/* Initialize SW ring entries */
> +	prev = (uint16_t)(txq->nb_tx_desc - 1);
> +	for (i = 0; i < txq->nb_tx_desc; i++) {
> +		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];

Why is volatile used above? Please, add a comment.

> +
> +		txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
> +		txe[i].mbuf = NULL;
> +		txe[i].last_id = i;
> +		txe[prev].next_id = i;
> +		prev = i;
> +	}
> +
> +	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
> +	txq->tx_tail = 0;
> +
> +	/*
> +	 * Always allow 1 descriptor to be un-allocated to avoid
> +	 * a H/W race condition
> +	 */
> +	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
> +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
> +	txq->ctx_curr = 0;
> +	memset((void *)&txq->ctx_cache, 0,
> +		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
> +}
> +
> +static const struct ngbe_txq_ops def_txq_ops = {
> +	.release_mbufs = ngbe_tx_queue_release_mbufs,
> +	.free_swring = ngbe_tx_free_swring,
> +	.reset = ngbe_reset_tx_queue,
> +};
> +
>   uint64_t
>   ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
>   {
> @@ -42,6 +135,125 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
>   	return tx_offload_capa;
>   }
>   
> +int __rte_cold
> +ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> +			 uint16_t queue_idx,
> +			 uint16_t nb_desc,
> +			 unsigned int socket_id,
> +			 const struct rte_eth_txconf *tx_conf)
> +{
> +	const struct rte_memzone *tz;
> +	struct ngbe_tx_queue *txq;
> +	struct ngbe_hw     *hw;
> +	uint16_t tx_free_thresh;
> +	uint64_t offloads;
> +
> +	PMD_INIT_FUNC_TRACE();
> +	hw = NGBE_DEV_HW(dev);
> +
> +	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
> +
> +	/*
> +	 * Validate number of transmit descriptors.
> +	 * It must not exceed hardware maximum, and must be multiple
> +	 * of NGBE_ALIGN.
> +	 */
> +	if (nb_desc % NGBE_TXD_ALIGN != 0 ||
> +	    nb_desc > NGBE_RING_DESC_MAX ||
> +	    nb_desc < NGBE_RING_DESC_MIN) {
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
> +	 * descriptors are used or if the number of descriptors required
> +	 * to transmit a packet is greater than the number of free TX
> +	 * descriptors.
> +	 * One descriptor in the TX ring is used as a sentinel to avoid a
> +	 * H/W race condition, hence the maximum threshold constraints.
> +	 * When set to zero use default values.
> +	 */
> +	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
> +			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
> +	if (tx_free_thresh >= (nb_desc - 3)) {
> +		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
> +			     "TX descriptors minus 3. (tx_free_thresh=%u "
> +			     "port=%d queue=%d)",
> +			     (unsigned int)tx_free_thresh,
> +			     (int)dev->data->port_id, (int)queue_idx);
> +		return -(EINVAL);
> +	}
> +
> +	if ((nb_desc % tx_free_thresh) != 0) {

I guess internal parenthesis are not required here.

> +		PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
> +			     "number of TX descriptors. (tx_free_thresh=%u "
> +			     "port=%d queue=%d)", (unsigned int)tx_free_thresh,
> +			     (int)dev->data->port_id, (int)queue_idx);
> +		return -(EINVAL);
> +	}
> +
> +	/* Free memory prior to re-allocation if needed... */
> +	if (dev->data->tx_queues[queue_idx] != NULL) {
> +		ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
> +		dev->data->tx_queues[queue_idx] = NULL;
> +	}
> +
> +	/* First allocate the tx queue data structure */
> +	txq = rte_zmalloc_socket("ethdev TX queue",
> +				 sizeof(struct ngbe_tx_queue),
> +				 RTE_CACHE_LINE_SIZE, socket_id);
> +	if (txq == NULL)
> +		return -ENOMEM;
> +
> +	/*
> +	 * Allocate TX ring hardware descriptors. A memzone large enough to
> +	 * handle the maximum ring size is allocated in order to allow for
> +	 * resizing in later calls to the queue setup function.
> +	 */
> +	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
> +			sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
> +			NGBE_ALIGN, socket_id);
> +	if (tz == NULL) {
> +		ngbe_tx_queue_release(txq);
> +		return -ENOMEM;
> +	}
> +
> +	txq->nb_tx_desc = nb_desc;
> +	txq->tx_free_thresh = tx_free_thresh;
> +	txq->pthresh = tx_conf->tx_thresh.pthresh;
> +	txq->hthresh = tx_conf->tx_thresh.hthresh;
> +	txq->wthresh = tx_conf->tx_thresh.wthresh;
> +	txq->queue_id = queue_idx;
> +	txq->reg_idx = queue_idx;
> +	txq->port_id = dev->data->port_id;
> +	txq->offloads = offloads;
> +	txq->ops = &def_txq_ops;
> +	txq->tx_deferred_start = tx_conf->tx_deferred_start;
> +
> +	txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
> +	txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
> +
> +	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
> +	txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
> +
> +	/* Allocate software ring */
> +	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
> +				sizeof(struct ngbe_tx_entry) * nb_desc,
> +				RTE_CACHE_LINE_SIZE, socket_id);
> +	if (txq->sw_ring == NULL) {
> +		ngbe_tx_queue_release(txq);
> +		return -ENOMEM;
> +	}
> +	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
> +		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
> +
> +	txq->ops->reset(txq);
> +
> +	dev->data->tx_queues[queue_idx] = txq;
> +
> +	return 0;
> +}
> +
>   /**
>    * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
>    *
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index e1676a53b4..2db5cc3f2a 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -43,6 +43,31 @@ struct ngbe_rx_desc {
>   	} qw1; /* also as r.hdr_addr */
>   };
>   
> +/*****************************************************************************
> + * Transmit Descriptor
> + *****************************************************************************/
> +/**
> + * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
> + **/
> +struct ngbe_tx_ctx_desc {
> +	__le32 dw0; /* w.vlan_macip_lens  */

rte_* types should be used

> +	__le32 dw1; /* w.seqnum_seed      */
> +	__le32 dw2; /* w.type_tucmd_mlhl  */
> +	__le32 dw3; /* w.mss_l4len_idx    */
> +};
> +
> +/* @ngbe_tx_ctx_desc.dw3 */
> +#define NGBE_TXD_DD               MS(0, 0x1) /* descriptor done */
> +
> +/**
> + * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
> + **/
> +struct ngbe_tx_desc {
> +	__le64 qw0; /* r.buffer_addr ,  w.reserved    */

rte_le* types should be used

> +	__le32 dw2; /* r.cmd_type_len,  w.nxtseq_seed */
> +	__le32 dw3; /* r.olinfo_status, w.status      */
> +};
> +
>   #define RTE_PMD_NGBE_RX_MAX_BURST 32
>   
>   #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
> @@ -62,6 +87,15 @@ struct ngbe_scattered_rx_entry {
>   	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
>   };
>   
> +/**
> + * Structure associated with each descriptor of the TX ring of a TX queue.
> + */
> +struct ngbe_tx_entry {
> +	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> +	uint16_t next_id; /**< Index of next descriptor in ring. */
> +	uint16_t last_id; /**< Index of last scattered descriptor. */
> +};
> +
>   /**
>    * Structure associated with each RX queue.
>    */
> @@ -98,6 +132,63 @@ struct ngbe_rx_queue {
>   	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
>   };
>   
> +/**
> + * NGBE CTX Constants
> + */
> +enum ngbe_ctx_num {
> +	NGBE_CTX_0    = 0, /**< CTX0 */
> +	NGBE_CTX_1    = 1, /**< CTX1  */
> +	NGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
> +};
> +
> +/**
> + * Structure to check if new context need be built
> + */
> +struct ngbe_ctx_info {
> +	uint64_t flags;           /**< ol_flags for context build. */
> +};
> +
> +/**
> + * Structure associated with each TX queue.
> + */
> +struct ngbe_tx_queue {
> +	/** TX ring virtual address. */
> +	volatile struct ngbe_tx_desc *tx_ring;
> +	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
> +	struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD.*/
> +	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
> +	volatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */
> +	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
> +	uint16_t            tx_tail;       /**< current value of TDT reg. */
> +	/**< Start freeing TX buffers if there are less free descriptors than
> +	 *   this value.
> +	 */
> +	uint16_t            tx_free_thresh;
> +	/** Index to last TX descriptor to have been cleaned. */
> +	uint16_t            last_desc_cleaned;
> +	/** Total number of TX descriptors ready to be allocated. */
> +	uint16_t            nb_tx_free;
> +	uint16_t            tx_next_dd;    /**< next desc to scan for DD bit */
> +	uint16_t            queue_id;      /**< TX queue index. */
> +	uint16_t            reg_idx;       /**< TX queue register index. */
> +	uint16_t            port_id;       /**< Device port identifier. */
> +	uint8_t             pthresh;       /**< Prefetch threshold register. */
> +	uint8_t             hthresh;       /**< Host threshold register. */
> +	uint8_t             wthresh;       /**< Write-back threshold reg. */
> +	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
> +	uint32_t            ctx_curr;      /**< Hardware context states. */
> +	/** Hardware context0 history. */
> +	struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
> +	const struct ngbe_txq_ops *ops;       /**< txq ops */
> +	uint8_t             tx_deferred_start; /**< not in global dev start. */
> +};
> +
> +struct ngbe_txq_ops {
> +	void (*release_mbufs)(struct ngbe_tx_queue *txq);
> +	void (*free_swring)(struct ngbe_tx_queue *txq);
> +	void (*reset)(struct ngbe_tx_queue *txq);
> +};
> +
>   uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
>   uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
>   uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
>
diff mbox series

Patch

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 8eb41a7a2b..2f8ac48f33 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -663,6 +663,8 @@  static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.link_update                = ngbe_dev_link_update,
 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
 	.rx_queue_release           = ngbe_dev_rx_queue_release,
+	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
+	.tx_queue_release           = ngbe_dev_tx_queue_release,
 };
 
 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index c324ca7e0f..f52d813a47 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -57,11 +57,17 @@  struct ngbe_adapter {
 
 void ngbe_dev_rx_queue_release(void *rxq);
 
+void ngbe_dev_tx_queue_release(void *txq);
+
 int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
 		const struct rte_eth_rxconf *rx_conf,
 		struct rte_mempool *mb_pool);
 
+int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_tx_desc, unsigned int socket_id,
+		const struct rte_eth_txconf *tx_conf);
+
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		int wait_to_complete);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 9992983bef..2d8db3245f 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -15,6 +15,99 @@ 
 #include "ngbe_ethdev.h"
 #include "ngbe_rxtx.h"
 
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/*********************************************************************
+ *
+ *  Queue management functions
+ *
+ **********************************************************************/
+
+static void __rte_cold
+ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
+{
+	unsigned int i;
+
+	if (txq->sw_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+static void __rte_cold
+ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
+{
+	if (txq != NULL &&
+	    txq->sw_ring != NULL)
+		rte_free(txq->sw_ring);
+}
+
+static void __rte_cold
+ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->free_swring(txq);
+		rte_free(txq);
+	}
+}
+
+void __rte_cold
+ngbe_dev_tx_queue_release(void *txq)
+{
+	ngbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic ngbe_tx_queue fields to defaults */
+static void __rte_cold
+ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
+{
+	static const struct ngbe_tx_desc zeroed_desc = {0};
+	struct ngbe_tx_entry *txe = txq->sw_ring;
+	uint16_t prev, i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		txq->tx_ring[i] = zeroed_desc;
+
+	/* Initialize SW ring entries */
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
+
+		txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
+		txe[i].mbuf = NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+	txq->tx_tail = 0;
+
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	memset((void *)&txq->ctx_cache, 0,
+		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
+}
+
+static const struct ngbe_txq_ops def_txq_ops = {
+	.release_mbufs = ngbe_tx_queue_release_mbufs,
+	.free_swring = ngbe_tx_free_swring,
+	.reset = ngbe_reset_tx_queue,
+};
+
 uint64_t
 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
@@ -42,6 +135,125 @@  ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
+int __rte_cold
+ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_txconf *tx_conf)
+{
+	const struct rte_memzone *tz;
+	struct ngbe_tx_queue *txq;
+	struct ngbe_hw     *hw;
+	uint16_t tx_free_thresh;
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = NGBE_DEV_HW(dev);
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * Validate number of transmit descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of NGBE_ALIGN.
+	 */
+	if (nb_desc % NGBE_TXD_ALIGN != 0 ||
+	    nb_desc > NGBE_RING_DESC_MAX ||
+	    nb_desc < NGBE_RING_DESC_MIN) {
+		return -EINVAL;
+	}
+
+	/*
+	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required
+	 * to transmit a packet is greater than the number of free TX
+	 * descriptors.
+	 * One descriptor in the TX ring is used as a sentinel to avoid a
+	 * H/W race condition, hence the maximum threshold constraints.
+	 * When set to zero use default values.
+	 */
+	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	if ((nb_desc % tx_free_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_free_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the tx queue data structure */
+	txq = rte_zmalloc_socket("ethdev TX queue",
+				 sizeof(struct ngbe_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL)
+		return -ENOMEM;
+
+	/*
+	 * Allocate TX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+			sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
+			NGBE_ALIGN, socket_id);
+	if (tz == NULL) {
+		ngbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_free_thresh = tx_free_thresh;
+	txq->pthresh = tx_conf->tx_thresh.pthresh;
+	txq->hthresh = tx_conf->tx_thresh.hthresh;
+	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+	txq->reg_idx = queue_idx;
+	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
+	txq->ops = &def_txq_ops;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
+	txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
+
+	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
+	txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
+
+	/* Allocate software ring */
+	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+				sizeof(struct ngbe_tx_entry) * nb_desc,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		ngbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+	txq->ops->reset(txq);
+
+	dev->data->tx_queues[queue_idx] = txq;
+
+	return 0;
+}
+
 /**
  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
  *
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index e1676a53b4..2db5cc3f2a 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -43,6 +43,31 @@  struct ngbe_rx_desc {
 	} qw1; /* also as r.hdr_addr */
 };
 
+/*****************************************************************************
+ * Transmit Descriptor
+ *****************************************************************************/
+/**
+ * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
+ **/
+struct ngbe_tx_ctx_desc {
+	__le32 dw0; /* w.vlan_macip_lens  */
+	__le32 dw1; /* w.seqnum_seed      */
+	__le32 dw2; /* w.type_tucmd_mlhl  */
+	__le32 dw3; /* w.mss_l4len_idx    */
+};
+
+/* @ngbe_tx_ctx_desc.dw3 */
+#define NGBE_TXD_DD               MS(0, 0x1) /* descriptor done */
+
+/**
+ * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
+ **/
+struct ngbe_tx_desc {
+	__le64 qw0; /* r.buffer_addr ,  w.reserved    */
+	__le32 dw2; /* r.cmd_type_len,  w.nxtseq_seed */
+	__le32 dw3; /* r.olinfo_status, w.status      */
+};
+
 #define RTE_PMD_NGBE_RX_MAX_BURST 32
 
 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
@@ -62,6 +87,15 @@  struct ngbe_scattered_rx_entry {
 	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
 };
 
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ngbe_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
 /**
  * Structure associated with each RX queue.
  */
@@ -98,6 +132,63 @@  struct ngbe_rx_queue {
 	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
 };
 
+/**
+ * NGBE CTX Constants
+ */
+enum ngbe_ctx_num {
+	NGBE_CTX_0    = 0, /**< CTX0 */
+	NGBE_CTX_1    = 1, /**< CTX1  */
+	NGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
+};
+
+/**
+ * Structure to check if new context need be built
+ */
+struct ngbe_ctx_info {
+	uint64_t flags;           /**< ol_flags for context build. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct ngbe_tx_queue {
+	/** TX ring virtual address. */
+	volatile struct ngbe_tx_desc *tx_ring;
+	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+	struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD.*/
+	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+	volatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */
+	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t            tx_tail;       /**< current value of TDT reg. */
+	/**< Start freeing TX buffers if there are less free descriptors than
+	 *   this value.
+	 */
+	uint16_t            tx_free_thresh;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t            last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
+	uint16_t            nb_tx_free;
+	uint16_t            tx_next_dd;    /**< next desc to scan for DD bit */
+	uint16_t            queue_id;      /**< TX queue index. */
+	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint16_t            port_id;       /**< Device port identifier. */
+	uint8_t             pthresh;       /**< Prefetch threshold register. */
+	uint8_t             hthresh;       /**< Host threshold register. */
+	uint8_t             wthresh;       /**< Write-back threshold reg. */
+	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint32_t            ctx_curr;      /**< Hardware context states. */
+	/** Hardware context0 history. */
+	struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
+	const struct ngbe_txq_ops *ops;       /**< txq ops */
+	uint8_t             tx_deferred_start; /**< not in global dev start. */
+};
+
+struct ngbe_txq_ops {
+	void (*release_mbufs)(struct ngbe_tx_queue *txq);
+	void (*free_swring)(struct ngbe_tx_queue *txq);
+	void (*reset)(struct ngbe_tx_queue *txq);
+};
+
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);