[v7,3/5] net/enetfec: support queue configuration

Message ID 20211103192045.22240-4-apeksha.gupta@nxp.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series drivers/net: add NXP ENETFEC driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Apeksha Gupta Nov. 3, 2021, 7:20 p.m. UTC
  This patch adds Rx/Tx queue configuration setup operations.
On packet reception the respective BD Ring status bit is set
which is then used for packet processing.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
 drivers/net/enetfec/enet_ethdev.c | 230 +++++++++++++++++++++++++++++-
 drivers/net/enetfec/enet_ethdev.h |  73 ++++++++++
 2 files changed, 302 insertions(+), 1 deletion(-)
  

Comments

Ferruh Yigit Nov. 4, 2021, 6:26 p.m. UTC | #1
On 11/3/2021 7:20 PM, Apeksha Gupta wrote:
> This patch adds Rx/Tx queue configuration setup operations.
> On packet reception the respective BD Ring status bit is set
> which is then used for packet processing.
> 
> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> ---
>   drivers/net/enetfec/enet_ethdev.c | 230 +++++++++++++++++++++++++++++-
>   drivers/net/enetfec/enet_ethdev.h |  73 ++++++++++
>   2 files changed, 302 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
> index 410c395039..aa96093eb8 100644
> --- a/drivers/net/enetfec/enet_ethdev.c
> +++ b/drivers/net/enetfec/enet_ethdev.c
> @@ -45,6 +45,19 @@
>   
>   static uint32_t enetfec_e_cntl;
>   
> +/* Supported Rx offloads */
> +static uint64_t dev_rx_offloads_sup =
> +		DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM |
> +		DEV_RX_OFFLOAD_TCP_CKSUM |
> +		DEV_RX_OFFLOAD_VLAN_STRIP |
> +		DEV_RX_OFFLOAD_CHECKSUM;
> +
> +static uint64_t dev_tx_offloads_sup =
> +		DEV_TX_OFFLOAD_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_UDP_CKSUM |
> +		DEV_TX_OFFLOAD_TCP_CKSUM;
> +

The comment in the previous version seems ignored, copying down:

The macro names are updated in ethdev, can you please update them?
like: DEV_RX_OFFLOAD_IPV4_CKSUM -> RTE_ETH_RX_OFFLOAD_IPV4_CKSUM


>   /*
>    * This function is called to start or restart the ENETFEC during a link
>    * change, transmit timeout, or to reconfigure the ENETFEC. The network
> @@ -204,10 +217,225 @@ enetfec_eth_stop(struct rte_eth_dev *dev)
>   	return 0;
>   }
>   
> +static int
> +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
> +	struct rte_eth_dev_info *dev_info)
> +{
> +	dev_info->max_rx_queues = ENETFEC_MAX_Q;
> +	dev_info->max_tx_queues = ENETFEC_MAX_Q;
> +	dev_info->rx_offload_capa = dev_rx_offloads_sup;
> +	dev_info->tx_offload_capa = dev_tx_offloads_sup;
> +	return 0;
> +}
> +
> +static const unsigned short offset_des_active_rxq[] = {
> +	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
> +};
> +
> +static const unsigned short offset_des_active_txq[] = {
> +	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
> +};
> +
> +static int
> +enetfec_tx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_desc,
> +			unsigned int socket_id __rte_unused,
> +			const struct rte_eth_txconf *tx_conf)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bdp, *bd_base;
> +	struct enetfec_priv_tx_q *txq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +		sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* Tx deferred start is not supported */
> +	if (tx_conf->tx_deferred_start) {
> +		ENETFEC_PMD_ERR("%p:Tx deferred start not supported",
> +			(void *)dev);
> +		return -EINVAL;
> +	}
> +
> +	/* allocate transmit queue */
> +	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
> +	if (txq == NULL) {
> +		ENETFEC_PMD_ERR("transmit queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_desc > MAX_TX_BD_RING_SIZE) {
> +		nb_desc = MAX_TX_BD_RING_SIZE;
> +		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");

Redundant '\n', as macro already adds one.
Can you please search and fix all usages?

> +	}
> +	txq->bd.ring_size = nb_desc;
> +	fep->total_tx_ring_size += txq->bd.ring_size;
> +	fep->tx_queues[queue_idx] = txq;
> +
> +	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
> +		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
> +
> +	/* Set transmit descriptor base. */
> +	txq = fep->tx_queues[queue_idx];
> +	txq->fep = fep;
> +	size = dsize * txq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
> +	txq->bd.queue_id = queue_idx;
> +	txq->bd.base = bd_base;
> +	txq->bd.cur = bd_base;
> +	txq->bd.d_size = dsize;
> +	txq->bd.d_size_log2 = dsize_log2;
> +	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
> +			offset_des_active_txq[queue_idx];
> +	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
> +	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
> +	bdp = txq->bd.base;
> +	bdp = txq->bd.cur;
> +
> +	for (i = 0; i < txq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
> +		if (txq->tx_mbuf[i] != NULL) {
> +			rte_pktmbuf_free(txq->tx_mbuf[i]);
> +			txq->tx_mbuf[i] = NULL;
> +		}
> +		rte_write32(0, &bdp->bd_bufaddr);
> +		bdp = enet_get_nextdesc(bdp, &txq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &txq->bd);
> +	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
> +		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
> +	txq->dirty_tx = bdp;
> +	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
> +	return 0;
> +}
> +
> +static int
> +enetfec_rx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_rx_desc,
> +			unsigned int socket_id __rte_unused,
> +			const struct rte_eth_rxconf *rx_conf,
> +			struct rte_mempool *mb_pool)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bd_base;
> +	struct bufdesc *bdp;
> +	struct enetfec_priv_rx_q *rxq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* Rx deferred start is not supported */
> +	if (rx_conf->rx_deferred_start) {
> +		ENETFEC_PMD_ERR("%p:Rx deferred start not supported",
> +			(void *)dev);
> +		return -EINVAL;
> +	}
> +
> +	/* allocate receive queue */
> +	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
> +	if (rxq == NULL) {
> +		ENETFEC_PMD_ERR("receive queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
> +		nb_rx_desc = MAX_RX_BD_RING_SIZE;
> +		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
> +	}
> +
> +	rxq->bd.ring_size = nb_rx_desc;
> +	fep->total_rx_ring_size += rxq->bd.ring_size;
> +	fep->rx_queues[queue_idx] = rxq;
> +
> +	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
> +		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
> +	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
> +		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
> +
> +	/* Set receive descriptor base. */
> +	rxq = fep->rx_queues[queue_idx];
> +	rxq->pool = mb_pool;
> +	size = dsize * rxq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
> +	rxq->bd.queue_id = queue_idx;
> +	rxq->bd.base = bd_base;
> +	rxq->bd.cur = bd_base;
> +	rxq->bd.d_size = dsize;
> +	rxq->bd.d_size_log2 = dsize_log2;
> +	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
> +			offset_des_active_rxq[queue_idx];
> +	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
> +	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
> +
> +	rxq->fep = fep;
> +	bdp = rxq->bd.base;
> +	rxq->bd.cur = bdp;
> +
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		/* Initialize Rx buffers from pktmbuf pool */
> +		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
> +		if (mbuf == NULL) {
> +			ENETFEC_PMD_ERR("mbuf failed\n");
> +		goto err_alloc;

Wrong indentation.
  

Patch

diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 410c395039..aa96093eb8 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -45,6 +45,19 @@ 
 
 static uint32_t enetfec_e_cntl;
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_CHECKSUM;
+
+static uint64_t dev_tx_offloads_sup =
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM;
+
 /*
  * This function is called to start or restart the ENETFEC during a link
  * change, transmit timeout, or to reconfigure the ENETFEC. The network
@@ -204,10 +217,225 @@  enetfec_eth_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = ENETFEC_MAX_Q;
+	dev_info->max_tx_queues = ENETFEC_MAX_Q;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup;
+	return 0;
+}
+
+static const unsigned short offset_des_active_rxq[] = {
+	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
+};
+
+static int
+enetfec_tx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_desc,
+			unsigned int socket_id __rte_unused,
+			const struct rte_eth_txconf *tx_conf)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+	struct bufdesc *bdp, *bd_base;
+	struct enetfec_priv_tx_q *txq;
+	unsigned int size;
+	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+		sizeof(struct bufdesc);
+	unsigned int dsize_log2 = fls64(dsize);
+
+	/* Tx deferred start is not supported */
+	if (tx_conf->tx_deferred_start) {
+		ENETFEC_PMD_ERR("%p:Tx deferred start not supported",
+			(void *)dev);
+		return -EINVAL;
+	}
+
+	/* allocate transmit queue */
+	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
+	if (txq == NULL) {
+		ENETFEC_PMD_ERR("transmit queue allocation failed");
+		return -ENOMEM;
+	}
+
+	if (nb_desc > MAX_TX_BD_RING_SIZE) {
+		nb_desc = MAX_TX_BD_RING_SIZE;
+		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");
+	}
+	txq->bd.ring_size = nb_desc;
+	fep->total_tx_ring_size += txq->bd.ring_size;
+	fep->tx_queues[queue_idx] = txq;
+
+	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
+
+	/* Set transmit descriptor base. */
+	txq = fep->tx_queues[queue_idx];
+	txq->fep = fep;
+	size = dsize * txq->bd.ring_size;
+	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
+	txq->bd.queue_id = queue_idx;
+	txq->bd.base = bd_base;
+	txq->bd.cur = bd_base;
+	txq->bd.d_size = dsize;
+	txq->bd.d_size_log2 = dsize_log2;
+	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
+			offset_des_active_txq[queue_idx];
+	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
+	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
+	bdp = txq->bd.base;
+	bdp = txq->bd.cur;
+
+	for (i = 0; i < txq->bd.ring_size; i++) {
+		/* Initialize the BD for every fragment in the page. */
+		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+		if (txq->tx_mbuf[i] != NULL) {
+			rte_pktmbuf_free(txq->tx_mbuf[i]);
+			txq->tx_mbuf[i] = NULL;
+		}
+		rte_write32(0, &bdp->bd_bufaddr);
+		bdp = enet_get_nextdesc(bdp, &txq->bd);
+	}
+
+	/* Set the last buffer to wrap */
+	bdp = enet_get_prevdesc(bdp, &txq->bd);
+	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
+		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
+	txq->dirty_tx = bdp;
+	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
+	return 0;
+}
+
+static int
+enetfec_rx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_rx_desc,
+			unsigned int socket_id __rte_unused,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mb_pool)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+	struct bufdesc *bd_base;
+	struct bufdesc *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	unsigned int size;
+	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+			sizeof(struct bufdesc);
+	unsigned int dsize_log2 = fls64(dsize);
+
+	/* Rx deferred start is not supported */
+	if (rx_conf->rx_deferred_start) {
+		ENETFEC_PMD_ERR("%p:Rx deferred start not supported",
+			(void *)dev);
+		return -EINVAL;
+	}
+
+	/* allocate receive queue */
+	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+	if (rxq == NULL) {
+		ENETFEC_PMD_ERR("receive queue allocation failed");
+		return -ENOMEM;
+	}
+
+	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
+		nb_rx_desc = MAX_RX_BD_RING_SIZE;
+		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
+	}
+
+	rxq->bd.ring_size = nb_rx_desc;
+	fep->total_rx_ring_size += rxq->bd.ring_size;
+	fep->rx_queues[queue_idx] = rxq;
+
+	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
+	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
+
+	/* Set receive descriptor base. */
+	rxq = fep->rx_queues[queue_idx];
+	rxq->pool = mb_pool;
+	size = dsize * rxq->bd.ring_size;
+	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
+	rxq->bd.queue_id = queue_idx;
+	rxq->bd.base = bd_base;
+	rxq->bd.cur = bd_base;
+	rxq->bd.d_size = dsize;
+	rxq->bd.d_size_log2 = dsize_log2;
+	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
+			offset_des_active_rxq[queue_idx];
+	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
+	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
+
+	rxq->fep = fep;
+	bdp = rxq->bd.base;
+	rxq->bd.cur = bdp;
+
+	for (i = 0; i < nb_rx_desc; i++) {
+		/* Initialize Rx buffers from pktmbuf pool */
+		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
+		if (mbuf == NULL) {
+			ENETFEC_PMD_ERR("mbuf failed\n");
+		goto err_alloc;
+		}
+
+		/* Get the virtual address & physical address */
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			&bdp->bd_bufaddr);
+
+		rxq->rx_mbuf[i] = mbuf;
+		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
+
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+	}
+
+	/* Initialize the receive buffer descriptors. */
+	bdp = rxq->bd.cur;
+	for (i = 0; i < rxq->bd.ring_size; i++) {
+		/* Initialize the BD for every fragment in the page. */
+		if (rte_read32(&bdp->bd_bufaddr) > 0)
+			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
+				&bdp->bd_sc);
+		else
+			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+	}
+
+	/* Set the last buffer to wrap */
+	bdp = enet_get_prevdesc(bdp, &rxq->bd);
+	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
+		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
+	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
+	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
+	return 0;
+
+err_alloc:
+	for (i = 0; i < nb_rx_desc; i++) {
+		if (rxq->rx_mbuf[i] != NULL) {
+			rte_pktmbuf_free(rxq->rx_mbuf[i]);
+			rxq->rx_mbuf[i] = NULL;
+		}
+	}
+	rte_free(rxq);
+	return errno;
+}
+
 static const struct eth_dev_ops enetfec_ops = {
 	.dev_configure          = enetfec_eth_configure,
 	.dev_start              = enetfec_eth_start,
-	.dev_stop               = enetfec_eth_stop
+	.dev_stop               = enetfec_eth_stop,
+	.dev_infos_get          = enetfec_eth_info,
+	.rx_queue_setup         = enetfec_rx_queue_setup,
+	.tx_queue_setup         = enetfec_tx_queue_setup
 };
 
 static int
diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h
index 0d16e48d12..36202ba6c7 100644
--- a/drivers/net/enetfec/enet_ethdev.h
+++ b/drivers/net/enetfec/enet_ethdev.h
@@ -12,10 +12,14 @@ 
 #define FULL_DUPLEX             0x01
 #define UNKNOWN_DUPLEX          0xff
 
+#define MAX_TX_BD_RING_SIZE     512     /* It should be power of 2 */
+#define MAX_RX_BD_RING_SIZE     512
 #define PKT_MAX_BUF_SIZE        1984
 #define OPT_FRAME_SIZE		(PKT_MAX_BUF_SIZE << 16)
 #define ETH_ALEN		RTE_ETHER_ADDR_LEN
 
+#define __iomem
+
 /*
  * ENETFEC with AVB IP can support maximum 3 rx and tx queues.
  */
@@ -25,6 +29,49 @@ 
 #define writel(v, p) ({*(volatile unsigned int *)(p) = (v); })
 #define readl(p) rte_read32(p)
 
+struct bufdesc {
+	uint16_t		bd_datlen;  /* buffer data length */
+	uint16_t		bd_sc;      /* buffer control & status */
+	uint32_t		bd_bufaddr; /* buffer address */
+};
+
+struct bufdesc_ex {
+	struct			bufdesc desc;
+	uint32_t		bd_esc;
+	uint32_t		bd_prot;
+	uint32_t		bd_bdu;
+	uint32_t		ts;
+	uint16_t		res0[4];
+};
+
+struct bufdesc_prop {
+	int			queue_id;
+	/* Addresses of Tx and Rx buffers */
+	struct bufdesc		*base;
+	struct bufdesc		*last;
+	struct bufdesc		*cur;
+	void __iomem		*active_reg_desc;
+	uint64_t		descr_baseaddr_p;
+	unsigned short		ring_size;
+	unsigned char		d_size;
+	unsigned char		d_size_log2;
+};
+
+struct enetfec_priv_tx_q {
+	struct bufdesc_prop	bd;
+	struct rte_mbuf		*tx_mbuf[MAX_TX_BD_RING_SIZE];
+	struct bufdesc		*dirty_tx;
+	struct rte_mempool	*pool;
+	struct enetfec_private	*fep;
+};
+
+struct enetfec_priv_rx_q {
+	struct bufdesc_prop	bd;
+	struct rte_mbuf		*rx_mbuf[MAX_RX_BD_RING_SIZE];
+	struct rte_mempool	*pool;
+	struct enetfec_private	*fep;
+};
+
 /* Buffer descriptors of FEC are used to track the ring buffers. Buffer
  * descriptor base is x_bd_base. Currently available buffer are x_cur
  * and x_cur. where x is rx or tx. Current buffer is tracked by dirty_tx
@@ -69,4 +116,30 @@  struct enetfec_private {
 	struct enetfec_priv_tx_q *tx_queues[ENETFEC_MAX_Q];
 };
 
+static inline struct
+bufdesc *enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+	return (bdp >= bd->last) ? bd->base
+		: (struct bufdesc *)(((uintptr_t)bdp) + bd->d_size);
+}
+
+static inline int
+fls64(unsigned long word)
+{
+	return (64 - __builtin_clzl(word)) - 1;
+}
+
+static inline struct
+bufdesc *enet_get_prevdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+	return (bdp <= bd->base) ? bd->last
+		: (struct bufdesc *)(((uintptr_t)bdp) - bd->d_size);
+}
+
+static inline int
+enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+	return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
+}
+
 #endif /*__ENETFEC_ETHDEV_H__*/