diff mbox series

[v5,15/24] net/ngbe: add Rx queue setup and release

Message ID 20210602094108.1575640-16-jiawenwu@trustnetic.com (mailing list archive)
State Changes Requested
Delegated to: Andrew Rybchenko
Headers show
Series net: ngbe PMD | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Jiawen Wu June 2, 2021, 9:40 a.m. UTC
Setup device Rx queue and release Rx queue.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ngbe/ngbe_ethdev.c |   9 +
 drivers/net/ngbe/ngbe_ethdev.h |   8 +
 drivers/net/ngbe/ngbe_rxtx.c   | 305 +++++++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_rxtx.h   |  90 ++++++++++
 4 files changed, 412 insertions(+)

Comments

Andrew Rybchenko June 14, 2021, 6:53 p.m. UTC | #1
On 6/2/21 12:40 PM, Jiawen Wu wrote:
> Setup device Rx queue and release Rx queue.
> 
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
>   drivers/net/ngbe/ngbe_ethdev.c |   9 +
>   drivers/net/ngbe/ngbe_ethdev.h |   8 +
>   drivers/net/ngbe/ngbe_rxtx.c   | 305 +++++++++++++++++++++++++++++++++
>   drivers/net/ngbe/ngbe_rxtx.h   |  90 ++++++++++
>   4 files changed, 412 insertions(+)
> 
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 97b6de3aa4..8eb41a7a2b 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -262,12 +262,19 @@ static int
>   ngbe_dev_configure(struct rte_eth_dev *dev)
>   {
>   	struct ngbe_interrupt *intr = NGBE_DEV_INTR(dev);
> +	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
>   
>   	PMD_INIT_FUNC_TRACE();
>   
>   	/* set flag to update link status after init */
>   	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
>   
> +	/*
> +	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
> +	 * allocation Rx preconditions we will reset it.
> +	 */
> +	adapter->rx_bulk_alloc_allowed = true;
> +
>   	return 0;
>   }
>   
> @@ -654,6 +661,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
>   	.dev_configure              = ngbe_dev_configure,
>   	.dev_infos_get              = ngbe_dev_info_get,
>   	.link_update                = ngbe_dev_link_update,
> +	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
> +	.rx_queue_release           = ngbe_dev_rx_queue_release,
>   };
>   
>   RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index 10c23c41d1..c324ca7e0f 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -43,6 +43,7 @@ struct ngbe_interrupt {
>   struct ngbe_adapter {
>   	struct ngbe_hw             hw;
>   	struct ngbe_interrupt      intr;
> +	bool rx_bulk_alloc_allowed;
>   };
>   
>   #define NGBE_DEV_ADAPTER(dev) \
> @@ -54,6 +55,13 @@ struct ngbe_adapter {
>   #define NGBE_DEV_INTR(dev) \
>   	(&((struct ngbe_adapter *)(dev)->data->dev_private)->intr)
>   
> +void ngbe_dev_rx_queue_release(void *rxq);
> +
> +int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
> +		uint16_t nb_rx_desc, unsigned int socket_id,
> +		const struct rte_eth_rxconf *rx_conf,
> +		struct rte_mempool *mb_pool);
> +
>   int
>   ngbe_dev_link_update_share(struct rte_eth_dev *dev,
>   		int wait_to_complete);
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index ae24367b18..9992983bef 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -3,9 +3,14 @@
>    * Copyright(c) 2010-2017 Intel Corporation
>    */
>   
> +#include <sys/queue.h>
> +
>   #include <stdint.h>
>   #include <rte_ethdev.h>
> +#include <ethdev_driver.h>
> +#include <rte_malloc.h>
>   
> +#include "ngbe_logs.h"
>   #include "base/ngbe.h"
>   #include "ngbe_ethdev.h"
>   #include "ngbe_rxtx.h"
> @@ -37,6 +42,166 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
>   	return tx_offload_capa;
>   }
>   
> +/**
> + * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
> + *
> + * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
> + * in the sw_rsc_ring is not set to NULL but rather points to the next
> + * mbuf of this RSC aggregation (that has not been completed yet and still
> + * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
> + * will just free first "nb_segs" segments of the cluster explicitly by calling
> + * an rte_pktmbuf_free_seg().
> + *
> + * @m scattered cluster head
> + */
> +static void __rte_cold
> +ngbe_free_sc_cluster(struct rte_mbuf *m)
> +{
> +	uint16_t i, nb_segs = m->nb_segs;
> +	struct rte_mbuf *next_seg;
> +
> +	for (i = 0; i < nb_segs; i++) {
> +		next_seg = m->next;
> +		rte_pktmbuf_free_seg(m);
> +		m = next_seg;
> +	}
> +}
> +
> +static void __rte_cold
> +ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
> +{
> +	unsigned int i;
> +
> +	if (rxq->sw_ring != NULL) {
> +		for (i = 0; i < rxq->nb_rx_desc; i++) {
> +			if (rxq->sw_ring[i].mbuf != NULL) {
> +				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
> +				rxq->sw_ring[i].mbuf = NULL;
> +			}
> +		}
> +		if (rxq->rx_nb_avail) {
> +			for (i = 0; i < rxq->rx_nb_avail; ++i) {
> +				struct rte_mbuf *mb;
> +
> +				mb = rxq->rx_stage[rxq->rx_next_avail + i];
> +				rte_pktmbuf_free_seg(mb);
> +			}
> +			rxq->rx_nb_avail = 0;
> +		}
> +	}
> +
> +	if (rxq->sw_sc_ring)
> +		for (i = 0; i < rxq->nb_rx_desc; i++)
> +			if (rxq->sw_sc_ring[i].fbuf) {

Compare with NULL

> +				ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
> +				rxq->sw_sc_ring[i].fbuf = NULL;
> +			}
> +}
> +
> +static void __rte_cold
> +ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
> +{
> +	if (rxq != NULL) {
> +		ngbe_rx_queue_release_mbufs(rxq);
> +		rte_free(rxq->sw_ring);
> +		rte_free(rxq->sw_sc_ring);
> +		rte_free(rxq);
> +	}
> +}
> +
> +void __rte_cold
> +ngbe_dev_rx_queue_release(void *rxq)
> +{
> +	ngbe_rx_queue_release(rxq);
> +}
> +
> +/*
> + * Check if Rx Burst Bulk Alloc function can be used.
> + * Return
> + *        0: the preconditions are satisfied and the bulk allocation function
> + *           can be used.
> + *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
> + *           function must be used.
> + */
> +static inline int __rte_cold
> +check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
> +{
> +	int ret = 0;
> +
> +	/*
> +	 * Make sure the following pre-conditions are satisfied:
> +	 *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
> +	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
> +	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
> +	 * Scattered packets are not supported.  This should be checked
> +	 * outside of this function.
> +	 */
> +	if (!(rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST)) {
> +		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
> +			     "rxq->rx_free_thresh=%d, "
> +			     "RTE_PMD_NGBE_RX_MAX_BURST=%d",
> +			     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
> +		ret = -EINVAL;
> +	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
> +		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
> +			     "rxq->rx_free_thresh=%d, "
> +			     "rxq->nb_rx_desc=%d",
> +			     rxq->rx_free_thresh, rxq->nb_rx_desc);
> +		ret = -EINVAL;
> +	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
> +		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
> +			     "rxq->nb_rx_desc=%d, "
> +			     "rxq->rx_free_thresh=%d",
> +			     rxq->nb_rx_desc, rxq->rx_free_thresh);
> +		ret = -EINVAL;
> +	}
> +
> +	return ret;
> +}
> +
> +/* Reset dynamic ngbe_rx_queue fields back to defaults */
> +static void __rte_cold
> +ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
> +{
> +	static const struct ngbe_rx_desc zeroed_desc = {
> +						{{0}, {0} }, {{0}, {0} } };
> +	unsigned int i;
> +	uint16_t len = rxq->nb_rx_desc;
> +
> +	/*
> +	 * By default, the Rx queue setup function allocates enough memory for
> +	 * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
> +	 * extra memory at the end of the descriptor ring to be zero'd out.
> +	 */
> +	if (adapter->rx_bulk_alloc_allowed)
> +		/* zero out extra memory */
> +		len += RTE_PMD_NGBE_RX_MAX_BURST;
> +
> +	/*
> +	 * Zero out HW ring memory. Zero out extra memory at the end of
> +	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
> +	 * reads extra memory as zeros.
> +	 */
> +	for (i = 0; i < len; i++)
> +		rxq->rx_ring[i] = zeroed_desc;
> +
> +	/*
> +	 * initialize extra software ring entries. Space for these extra
> +	 * entries is always allocated
> +	 */
> +	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
> +	for (i = rxq->nb_rx_desc; i < len; ++i)
> +		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
> +
> +	rxq->rx_nb_avail = 0;
> +	rxq->rx_next_avail = 0;
> +	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
> +	rxq->rx_tail = 0;
> +	rxq->nb_rx_hold = 0;
> +	rxq->pkt_first_seg = NULL;
> +	rxq->pkt_last_seg = NULL;
> +}
> +
>   uint64_t
>   ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
>   {
> @@ -65,3 +230,143 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
>   	return offloads;
>   }
>   
> +int __rte_cold
> +ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> +			 uint16_t queue_idx,
> +			 uint16_t nb_desc,
> +			 unsigned int socket_id,
> +			 const struct rte_eth_rxconf *rx_conf,
> +			 struct rte_mempool *mp)
> +{
> +	const struct rte_memzone *rz;
> +	struct ngbe_rx_queue *rxq;
> +	struct ngbe_hw     *hw;
> +	uint16_t len;
> +	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
> +	uint64_t offloads;
> +
> +	PMD_INIT_FUNC_TRACE();
> +	hw = NGBE_DEV_HW(dev);
> +
> +	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
> +
> +	/*
> +	 * Validate number of receive descriptors.
> +	 * It must not exceed hardware maximum, and must be multiple
> +	 * of NGBE_ALIGN.
> +	 */
> +	if (nb_desc % NGBE_RXD_ALIGN != 0 ||
> +			nb_desc > NGBE_RING_DESC_MAX ||
> +			nb_desc < NGBE_RING_DESC_MIN) {
> +		return -EINVAL;
> +	}
> +
> +	/* Free memory prior to re-allocation if needed... */
> +	if (dev->data->rx_queues[queue_idx] != NULL) {
> +		ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
> +		dev->data->rx_queues[queue_idx] = NULL;
> +	}
> +
> +	/* First allocate the rx queue data structure */
> +	rxq = rte_zmalloc_socket("ethdev RX queue",
> +				 sizeof(struct ngbe_rx_queue),
> +				 RTE_CACHE_LINE_SIZE, socket_id);
> +	if (rxq == NULL)
> +		return -ENOMEM;
> +	rxq->mb_pool = mp;
> +	rxq->nb_rx_desc = nb_desc;
> +	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
> +	rxq->queue_id = queue_idx;
> +	rxq->reg_idx = queue_idx;
> +	rxq->port_id = dev->data->port_id;
> +	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
> +		rxq->crc_len = RTE_ETHER_CRC_LEN;
> +	else
> +		rxq->crc_len = 0;
> +	rxq->drop_en = rx_conf->rx_drop_en;
> +	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
> +	rxq->offloads = offloads;
> +	rxq->pkt_type_mask = NGBE_PTID_MASK;
> +
> +	/*
> +	 * Allocate RX ring hardware descriptors. A memzone large enough to
> +	 * handle the maximum ring size is allocated in order to allow for
> +	 * resizing in later calls to the queue setup function.
> +	 */
> +	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
> +				      RX_RING_SZ, NGBE_ALIGN, socket_id);
> +	if (rz == NULL) {
> +		ngbe_rx_queue_release(rxq);
> +		return -ENOMEM;
> +	}
> +
> +	/*
> +	 * Zero init all the descriptors in the ring.
> +	 */
> +	memset(rz->addr, 0, RX_RING_SZ);
> +
> +	rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
> +	rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
> +
> +	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
> +	rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
> +
> +	/*
> +	 * Certain constraints must be met in order to use the bulk buffer
> +	 * allocation Rx burst function. If any of Rx queues doesn't meet them
> +	 * the feature should be disabled for the whole port.
> +	 */
> +	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
> +		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
> +				    "preconditions - canceling the feature for "
> +				    "the whole port[%d]",
> +			     rxq->queue_id, rxq->port_id);
> +		adapter->rx_bulk_alloc_allowed = false;
> +	}
> +
> +	/*
> +	 * Allocate software ring. Allow for space at the end of the
> +	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
> +	 * function does not access an invalid memory region.
> +	 */
> +	len = nb_desc;
> +	if (adapter->rx_bulk_alloc_allowed)
> +		len += RTE_PMD_NGBE_RX_MAX_BURST;
> +
> +	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
> +					  sizeof(struct ngbe_rx_entry) * len,
> +					  RTE_CACHE_LINE_SIZE, socket_id);
> +	if (!rxq->sw_ring) {

compare with NULL

> +		ngbe_rx_queue_release(rxq);
> +		return -ENOMEM;
> +	}
> +
> +	/*
> +	 * Always allocate even if it's not going to be needed in order to
> +	 * simplify the code.
> +	 *
> +	 * This ring is used in Scattered Rx cases and Scattered Rx may
> +	 * be requested in ngbe_dev_rx_init(), which is called later from
> +	 * dev_start() flow.
> +	 */
> +	rxq->sw_sc_ring =
> +		rte_zmalloc_socket("rxq->sw_sc_ring",
> +				  sizeof(struct ngbe_scattered_rx_entry) * len,
> +				  RTE_CACHE_LINE_SIZE, socket_id);
> +	if (!rxq->sw_sc_ring) {

compare with NULL

> +		ngbe_rx_queue_release(rxq);
> +		return -ENOMEM;
> +	}
> +
> +	PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
> +			    "dma_addr=0x%" PRIx64,
> +		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
> +		     rxq->rx_ring_phys_addr);
> +
> +	dev->data->rx_queues[queue_idx] = rxq;
> +
> +	ngbe_reset_rx_queue(adapter, rxq);
> +
> +	return 0;
> +}
> +
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index 39011ee286..e1676a53b4 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -6,7 +6,97 @@
>   #ifndef _NGBE_RXTX_H_
>   #define _NGBE_RXTX_H_
>   
> +/*****************************************************************************
> + * Receive Descriptor
> + *****************************************************************************/
> +struct ngbe_rx_desc {
> +	struct {
> +		union {
> +			__le32 dw0;

rte_* types shuld be used

> +			struct {
> +				__le16 pkt;
> +				__le16 hdr;
> +			} lo;
> +		};
> +		union {
> +			__le32 dw1;
> +			struct {
> +				__le16 ipid;
> +				__le16 csum;
> +			} hi;
> +		};
> +	} qw0; /* also as r.pkt_addr */
> +	struct {
> +		union {
> +			__le32 dw2;
> +			struct {
> +				__le32 status;
> +			} lo;
> +		};
> +		union {
> +			__le32 dw3;
> +			struct {
> +				__le16 len;
> +				__le16 tag;
> +			} hi;
> +		};
> +	} qw1; /* also as r.hdr_addr */
> +};
> +
> +#define RTE_PMD_NGBE_RX_MAX_BURST 32
> +
> +#define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
> +		    sizeof(struct ngbe_rx_desc))
> +
>   #define NGBE_TX_MAX_SEG                    40
> +#define NGBE_PTID_MASK                     0xFF
> +
> +/**
> + * Structure associated with each descriptor of the RX ring of a RX queue.
> + */
> +struct ngbe_rx_entry {
> +	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> +};
> +
> +struct ngbe_scattered_rx_entry {
> +	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
> +};
> +
> +/**
> + * Structure associated with each RX queue.
> + */
> +struct ngbe_rx_queue {
> +	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
> +	volatile struct ngbe_rx_desc *rx_ring; /**< RX ring virtual address. */
> +	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
> +	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> +	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> +	struct ngbe_rx_entry *sw_ring; /**< address of RX software ring. */
> +	/**< address of scattered Rx software ring. */
> +	struct ngbe_scattered_rx_entry *sw_sc_ring;
> +	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
> +	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
> +	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
> +	uint16_t            rx_tail;  /**< current value of RDT register. */
> +	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
> +	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
> +	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
> +	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
> +	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
> +	uint16_t            queue_id; /**< RX queue index. */
> +	uint16_t            reg_idx;  /**< RX queue register index. */
> +	/**< Packet type mask for different NICs. */
> +	uint16_t            pkt_type_mask;
> +	uint16_t            port_id;  /**< Device port identifier. */
> +	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
> +	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
> +	uint8_t             rx_deferred_start; /**< not in global dev start. */
> +	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
> +	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
> +	struct rte_mbuf fake_mbuf;
> +	/** hold packets to return to application */
> +	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
> +};
>   
>   uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
>   uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
>
Jiawen Wu June 15, 2021, 7:50 a.m. UTC | #2
On Tuesday, June 15, 2021 2:53 AM, Andrew Rybchenko wrote:
> On 6/2/21 12:40 PM, Jiawen Wu wrote:
> > Setup device Rx queue and release Rx queue.
> >
> > Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> > ---
> >   drivers/net/ngbe/ngbe_ethdev.c |   9 +
> >   drivers/net/ngbe/ngbe_ethdev.h |   8 +
> >   drivers/net/ngbe/ngbe_rxtx.c   | 305
> +++++++++++++++++++++++++++++++++
> >   drivers/net/ngbe/ngbe_rxtx.h   |  90 ++++++++++
> >   4 files changed, 412 insertions(+)
> >
> > diff --git a/drivers/net/ngbe/ngbe_rxtx.h
> > b/drivers/net/ngbe/ngbe_rxtx.h index 39011ee286..e1676a53b4 100644
> > --- a/drivers/net/ngbe/ngbe_rxtx.h
> > +++ b/drivers/net/ngbe/ngbe_rxtx.h
> > @@ -6,7 +6,97 @@
> >   #ifndef _NGBE_RXTX_H_
> >   #define _NGBE_RXTX_H_
> >
> > +/*****************************************************************************
> > + * Receive Descriptor
> > + *****************************************************************************/
> > +struct ngbe_rx_desc {
> > +	struct {
> > +		union {
> > +			__le32 dw0;
> 
> rte_* types shuld be used

I don't quite understand, should '__le32' be changed to 'rte_*' type?

> 
> > +			struct {
> > +				__le16 pkt;
> > +				__le16 hdr;
> > +			} lo;
> > +		};
> > +		union {
> > +			__le32 dw1;
> > +			struct {
> > +				__le16 ipid;
> > +				__le16 csum;
> > +			} hi;
> > +		};
> > +	} qw0; /* also as r.pkt_addr */
> > +	struct {
> > +		union {
> > +			__le32 dw2;
> > +			struct {
> > +				__le32 status;
> > +			} lo;
> > +		};
> > +		union {
> > +			__le32 dw3;
> > +			struct {
> > +				__le16 len;
> > +				__le16 tag;
> > +			} hi;
> > +		};
> > +	} qw1; /* also as r.hdr_addr */
> > +};
> > +
Andrew Rybchenko June 15, 2021, 8:06 a.m. UTC | #3
On 6/15/21 10:50 AM, Jiawen Wu wrote:
> On Tuesday, June 15, 2021 2:53 AM, Andrew Rybchenko wrote:
>> On 6/2/21 12:40 PM, Jiawen Wu wrote:
>>> Setup device Rx queue and release Rx queue.
>>>
>>> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
>>> ---
>>>   drivers/net/ngbe/ngbe_ethdev.c |   9 +
>>>   drivers/net/ngbe/ngbe_ethdev.h |   8 +
>>>   drivers/net/ngbe/ngbe_rxtx.c   | 305
>> +++++++++++++++++++++++++++++++++
>>>   drivers/net/ngbe/ngbe_rxtx.h   |  90 ++++++++++
>>>   4 files changed, 412 insertions(+)
>>>
>>> diff --git a/drivers/net/ngbe/ngbe_rxtx.h
>>> b/drivers/net/ngbe/ngbe_rxtx.h index 39011ee286..e1676a53b4 100644
>>> --- a/drivers/net/ngbe/ngbe_rxtx.h
>>> +++ b/drivers/net/ngbe/ngbe_rxtx.h
>>> @@ -6,7 +6,97 @@
>>>   #ifndef _NGBE_RXTX_H_
>>>   #define _NGBE_RXTX_H_
>>>
>>> +/*****************************************************************************
>>> + * Receive Descriptor
>>> + *****************************************************************************/
>>> +struct ngbe_rx_desc {
>>> +	struct {
>>> +		union {
>>> +			__le32 dw0;
>>
>> rte_* types shuld be used
> 
> I don't quite understand, should '__le32' be changed to 'rte_*' type?

Yes, since it is native DPDK code, it should use native
DPDK data types. In this particular case it is rte_le32.

> 
>>
>>> +			struct {
>>> +				__le16 pkt;
>>> +				__le16 hdr;
>>> +			} lo;
>>> +		};
>>> +		union {
>>> +			__le32 dw1;
>>> +			struct {
>>> +				__le16 ipid;
>>> +				__le16 csum;
>>> +			} hi;
>>> +		};
>>> +	} qw0; /* also as r.pkt_addr */
>>> +	struct {
>>> +		union {
>>> +			__le32 dw2;
>>> +			struct {
>>> +				__le32 status;
>>> +			} lo;
>>> +		};
>>> +		union {
>>> +			__le32 dw3;
>>> +			struct {
>>> +				__le16 len;
>>> +				__le16 tag;
>>> +			} hi;
>>> +		};
>>> +	} qw1; /* also as r.hdr_addr */
>>> +};
>>> +
> 
> 
>
diff mbox series

Patch

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 97b6de3aa4..8eb41a7a2b 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -262,12 +262,19 @@  static int
 ngbe_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ngbe_interrupt *intr = NGBE_DEV_INTR(dev);
+	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
 
 	PMD_INIT_FUNC_TRACE();
 
 	/* set flag to update link status after init */
 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
 
+	/*
+	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+	 * allocation Rx preconditions we will reset it.
+	 */
+	adapter->rx_bulk_alloc_allowed = true;
+
 	return 0;
 }
 
@@ -654,6 +661,8 @@  static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.dev_configure              = ngbe_dev_configure,
 	.dev_infos_get              = ngbe_dev_info_get,
 	.link_update                = ngbe_dev_link_update,
+	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
+	.rx_queue_release           = ngbe_dev_rx_queue_release,
 };
 
 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 10c23c41d1..c324ca7e0f 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -43,6 +43,7 @@  struct ngbe_interrupt {
 struct ngbe_adapter {
 	struct ngbe_hw             hw;
 	struct ngbe_interrupt      intr;
+	bool rx_bulk_alloc_allowed;
 };
 
 #define NGBE_DEV_ADAPTER(dev) \
@@ -54,6 +55,13 @@  struct ngbe_adapter {
 #define NGBE_DEV_INTR(dev) \
 	(&((struct ngbe_adapter *)(dev)->data->dev_private)->intr)
 
+void ngbe_dev_rx_queue_release(void *rxq);
+
+int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+		uint16_t nb_rx_desc, unsigned int socket_id,
+		const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mb_pool);
+
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		int wait_to_complete);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index ae24367b18..9992983bef 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -3,9 +3,14 @@ 
  * Copyright(c) 2010-2017 Intel Corporation
  */
 
+#include <sys/queue.h>
+
 #include <stdint.h>
 #include <rte_ethdev.h>
+#include <ethdev_driver.h>
+#include <rte_malloc.h>
 
+#include "ngbe_logs.h"
 #include "base/ngbe.h"
 #include "ngbe_ethdev.h"
 #include "ngbe_rxtx.h"
@@ -37,6 +42,166 @@  ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
+/**
+ * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __rte_cold
+ngbe_free_sc_cluster(struct rte_mbuf *m)
+{
+	uint16_t i, nb_segs = m->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < nb_segs; i++) {
+		next_seg = m->next;
+		rte_pktmbuf_free_seg(m);
+		m = next_seg;
+	}
+}
+
+static void __rte_cold
+ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
+{
+	unsigned int i;
+
+	if (rxq->sw_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+				rxq->sw_ring[i].mbuf = NULL;
+			}
+		}
+		if (rxq->rx_nb_avail) {
+			for (i = 0; i < rxq->rx_nb_avail; ++i) {
+				struct rte_mbuf *mb;
+
+				mb = rxq->rx_stage[rxq->rx_next_avail + i];
+				rte_pktmbuf_free_seg(mb);
+			}
+			rxq->rx_nb_avail = 0;
+		}
+	}
+
+	if (rxq->sw_sc_ring)
+		for (i = 0; i < rxq->nb_rx_desc; i++)
+			if (rxq->sw_sc_ring[i].fbuf) {
+				ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+				rxq->sw_sc_ring[i].fbuf = NULL;
+			}
+}
+
+static void __rte_cold
+ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		ngbe_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_ring);
+		rte_free(rxq->sw_sc_ring);
+		rte_free(rxq);
+	}
+}
+
+void __rte_cold
+ngbe_dev_rx_queue_release(void *rxq)
+{
+	ngbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ *        0: the preconditions are satisfied and the bulk allocation function
+ *           can be used.
+ *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ *           function must be used.
+ */
+static inline int __rte_cold
+check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
+{
+	int ret = 0;
+
+	/*
+	 * Make sure the following pre-conditions are satisfied:
+	 *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
+	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
+	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+	 * Scattered packets are not supported.  This should be checked
+	 * outside of this function.
+	 */
+	if (!(rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "RTE_PMD_NGBE_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
+		ret = -EINVAL;
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
+		ret = -EINVAL;
+	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Reset dynamic ngbe_rx_queue fields back to defaults */
+static void __rte_cold
+ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
+{
+	static const struct ngbe_rx_desc zeroed_desc = {
+						{{0}, {0} }, {{0}, {0} } };
+	unsigned int i;
+	uint16_t len = rxq->nb_rx_desc;
+
+	/*
+	 * By default, the Rx queue setup function allocates enough memory for
+	 * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
+	 * extra memory at the end of the descriptor ring to be zero'd out.
+	 */
+	if (adapter->rx_bulk_alloc_allowed)
+		/* zero out extra memory */
+		len += RTE_PMD_NGBE_RX_MAX_BURST;
+
+	/*
+	 * Zero out HW ring memory. Zero out extra memory at the end of
+	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+	 * reads extra memory as zeros.
+	 */
+	for (i = 0; i < len; i++)
+		rxq->rx_ring[i] = zeroed_desc;
+
+	/*
+	 * initialize extra software ring entries. Space for these extra
+	 * entries is always allocated
+	 */
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->nb_rx_desc; i < len; ++i)
+		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
 uint64_t
 ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
@@ -65,3 +230,143 @@  ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
+int __rte_cold
+ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	const struct rte_memzone *rz;
+	struct ngbe_rx_queue *rxq;
+	struct ngbe_hw     *hw;
+	uint16_t len;
+	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = NGBE_DEV_HW(dev);
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * Validate number of receive descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of NGBE_ALIGN.
+	 */
+	if (nb_desc % NGBE_RXD_ALIGN != 0 ||
+			nb_desc > NGBE_RING_DESC_MAX ||
+			nb_desc < NGBE_RING_DESC_MIN) {
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket("ethdev RX queue",
+				 sizeof(struct ngbe_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL)
+		return -ENOMEM;
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+	rxq->reg_idx = queue_idx;
+	rxq->port_id = dev->data->port_id;
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		rxq->crc_len = RTE_ETHER_CRC_LEN;
+	else
+		rxq->crc_len = 0;
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = offloads;
+	rxq->pkt_type_mask = NGBE_PTID_MASK;
+
+	/*
+	 * Allocate RX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+				      RX_RING_SZ, NGBE_ALIGN, socket_id);
+	if (rz == NULL) {
+		ngbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Zero init all the descriptors in the ring.
+	 */
+	memset(rz->addr, 0, RX_RING_SZ);
+
+	rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
+	rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
+
+	rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
+	rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
+
+	/*
+	 * Certain constraints must be met in order to use the bulk buffer
+	 * allocation Rx burst function. If any of Rx queues doesn't meet them
+	 * the feature should be disabled for the whole port.
+	 */
+	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		adapter->rx_bulk_alloc_allowed = false;
+	}
+
+	/*
+	 * Allocate software ring. Allow for space at the end of the
+	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+	 * function does not access an invalid memory region.
+	 */
+	len = nb_desc;
+	if (adapter->rx_bulk_alloc_allowed)
+		len += RTE_PMD_NGBE_RX_MAX_BURST;
+
+	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+					  sizeof(struct ngbe_rx_entry) * len,
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_ring) {
+		ngbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always allocate even if it's not going to be needed in order to
+	 * simplify the code.
+	 *
+	 * This ring is used in Scattered Rx cases and Scattered Rx may
+	 * be requested in ngbe_dev_rx_init(), which is called later from
+	 * dev_start() flow.
+	 */
+	rxq->sw_sc_ring =
+		rte_zmalloc_socket("rxq->sw_sc_ring",
+				  sizeof(struct ngbe_scattered_rx_entry) * len,
+				  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_sc_ring) {
+		ngbe_rx_queue_release(rxq);
+		return -ENOMEM;
+	}
+
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+			    "dma_addr=0x%" PRIx64,
+		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+		     rxq->rx_ring_phys_addr);
+
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	ngbe_reset_rx_queue(adapter, rxq);
+
+	return 0;
+}
+
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index 39011ee286..e1676a53b4 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -6,7 +6,97 @@ 
 #ifndef _NGBE_RXTX_H_
 #define _NGBE_RXTX_H_
 
+/*****************************************************************************
+ * Receive Descriptor
+ *****************************************************************************/
+struct ngbe_rx_desc {
+	struct {
+		union {
+			__le32 dw0;
+			struct {
+				__le16 pkt;
+				__le16 hdr;
+			} lo;
+		};
+		union {
+			__le32 dw1;
+			struct {
+				__le16 ipid;
+				__le16 csum;
+			} hi;
+		};
+	} qw0; /* also as r.pkt_addr */
+	struct {
+		union {
+			__le32 dw2;
+			struct {
+				__le32 status;
+			} lo;
+		};
+		union {
+			__le32 dw3;
+			struct {
+				__le16 len;
+				__le16 tag;
+			} hi;
+		};
+	} qw1; /* also as r.hdr_addr */
+};
+
+#define RTE_PMD_NGBE_RX_MAX_BURST 32
+
+#define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
+		    sizeof(struct ngbe_rx_desc))
+
 #define NGBE_TX_MAX_SEG                    40
+#define NGBE_PTID_MASK                     0xFF
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct ngbe_rx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+struct ngbe_scattered_rx_entry {
+	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct ngbe_rx_queue {
+	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
+	volatile struct ngbe_rx_desc *rx_ring; /**< RX ring virtual address. */
+	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
+	struct ngbe_rx_entry *sw_ring; /**< address of RX software ring. */
+	/**< address of scattered Rx software ring. */
+	struct ngbe_scattered_rx_entry *sw_sc_ring;
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;  /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+	uint16_t            queue_id; /**< RX queue index. */
+	uint16_t            reg_idx;  /**< RX queue register index. */
+	/**< Packet type mask for different NICs. */
+	uint16_t            pkt_type_mask;
+	uint16_t            port_id;  /**< Device port identifier. */
+	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+	uint8_t             rx_deferred_start; /**< not in global dev start. */
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+	struct rte_mbuf fake_mbuf;
+	/** hold packets to return to application */
+	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
+};
 
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);