[v3,2/3] net/i40e: enable direct rearm with separate API

Message ID 20230104073043.1120168-3-feifei.wang2@arm.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series Direct re-arming of buffers on receive side |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Feifei Wang Jan. 4, 2023, 7:30 a.m. UTC
  Add internal API to separate direct rearm operations between
Rx and Tx.

Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
 drivers/net/i40e/i40e_ethdev.c          |  1 +
 drivers/net/i40e/i40e_ethdev.h          |  2 +
 drivers/net/i40e/i40e_rxtx.c            | 19 +++++++++
 drivers/net/i40e/i40e_rxtx.h            |  4 ++
 drivers/net/i40e/i40e_rxtx_vec_common.h | 54 +++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx_vec_neon.c   | 42 +++++++++++++++++++
 6 files changed, 122 insertions(+)
  

Comments

Konstantin Ananyev Feb. 2, 2023, 2:37 p.m. UTC | #1
04/01/2023 07:30, Feifei Wang пишет:
> Add internal API to separate direct rearm operations between
> Rx and Tx.
> 
> Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> ---
>   drivers/net/i40e/i40e_ethdev.c          |  1 +
>   drivers/net/i40e/i40e_ethdev.h          |  2 +
>   drivers/net/i40e/i40e_rxtx.c            | 19 +++++++++
>   drivers/net/i40e/i40e_rxtx.h            |  4 ++
>   drivers/net/i40e/i40e_rxtx_vec_common.h | 54 +++++++++++++++++++++++++
>   drivers/net/i40e/i40e_rxtx_vec_neon.c   | 42 +++++++++++++++++++
>   6 files changed, 122 insertions(+)
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 7726a89d99..29c1ce2470 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -497,6 +497,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
>   	.flow_ops_get                 = i40e_dev_flow_ops_get,
>   	.rxq_info_get                 = i40e_rxq_info_get,
>   	.txq_info_get                 = i40e_txq_info_get,
> +	.rxq_rearm_data_get           = i40e_rxq_rearm_data_get,
>   	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
>   	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
>   	.timesync_enable              = i40e_timesync_enable,
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index fe943a45ff..6a6a2a6d3c 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -1352,6 +1352,8 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
>   	struct rte_eth_rxq_info *qinfo);
>   void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
>   	struct rte_eth_txq_info *qinfo);
> +void i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
> +	struct rte_eth_rxq_rearm_data *rxq_rearm_data);
>   int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
>   			   struct rte_eth_burst_mode *mode);
>   int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
> index 788ffb51c2..d8d801acaf 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -3197,6 +3197,19 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
>   	qinfo->conf.offloads = txq->offloads;
>   }
>   
> +void
> +i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
> +	struct rte_eth_rxq_rearm_data *rxq_rearm_data)
> +{
> +	struct i40e_rx_queue *rxq;
> +
> +	rxq = dev->data->rx_queues[queue_id];
> +
> +	rxq_rearm_data->rx_sw_ring = rxq->sw_ring;
> +	rxq_rearm_data->rearm_start = &rxq->rxrearm_start;
> +	rxq_rearm_data->rearm_nb = &rxq->rxrearm_nb;
> +}
> +
>   #ifdef RTE_ARCH_X86
>   static inline bool
>   get_avx_supported(bool request_avx512)
> @@ -3321,6 +3334,9 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
>   			PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
>   				     dev->data->port_id);
>   			dev->rx_pkt_burst = i40e_recv_pkts_vec;
> +#ifdef RTE_ARCH_ARM64
> +			dev->rx_flush_descriptor = i40e_rx_flush_descriptor_vec;
> +#endif
>   		}
>   #endif /* RTE_ARCH_X86 */
>   	} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
> @@ -3484,6 +3500,9 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
>   			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
>   				     dev->data->port_id);
>   			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
> +#ifdef RTE_ARCH_ARM64
> +			dev->tx_fill_sw_ring = i40e_tx_fill_sw_ring;
> +#endif

As I can see tx_fill_sw_ring() is non ARM specific, any reason to guard 
it with #ifdef ARM?
Actually same ask for rx_flush_descriptor() - can we have generic 
version too?

>   #endif /* RTE_ARCH_X86 */
>   		} else {
>   			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 5e6eecc501..8a29bd89df 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -233,6 +233,10 @@ uint32_t i40e_dev_rx_queue_count(void *rx_queue);
>   int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
>   int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
>   
> +int i40e_tx_fill_sw_ring(void *tx_queue,
> +		struct rte_eth_rxq_rearm_data *rxq_rearm_data);
> +int i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm);
> +
>   uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
>   			    uint16_t nb_pkts);
>   uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
> diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
> index fe1a6ec75e..eb96301a43 100644
> --- a/drivers/net/i40e/i40e_rxtx_vec_common.h
> +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
> @@ -146,6 +146,60 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
>   	return txq->tx_rs_thresh;
>   }
>   
> +int
> +i40e_tx_fill_sw_ring(void *tx_queue,
> +		struct rte_eth_rxq_rearm_data *rxq_rearm_data)
> +{
> +	struct i40e_tx_queue *txq = tx_queue;
> +	struct i40e_tx_entry *txep;
> +	void **rxep;
> +	struct rte_mbuf *m;
> +	int i, n;
> +	int nb_rearm = 0;
> +
> +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> +			txq->nb_tx_free > txq->tx_free_thresh)
> +		return 0;
> +
> +	/* check DD bits on threshold descriptor */
> +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> +			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> +			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> +		return 0;
> +
> +	n = txq->tx_rs_thresh;
> +
> +	/* first buffer to free from S/W ring is at index
> +	 * tx_next_dd - (tx_rs_thresh-1)
> +	 */
> +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> +	rxep = rxq_rearm_data->rx_sw_ring;
> +	rxep += *rxq_rearm_data->rearm_start;
> +
> +	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> +		/* directly put mbufs from Tx to Rx */
> +		for (i = 0; i < n; i++, rxep++, txep++)
> +			*rxep = txep[0].mbuf;
> +	} else {
> +		for (i = 0; i < n; i++, rxep++) {
> +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> +			if (m != NULL) {
> +				*rxep = m;
> +				nb_rearm++;
> +			}
> +		}
> +		n = nb_rearm;
> +	}
> +
> +	/* update counters for Tx */
> +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> +
> +	return n;
> +}
> +
>   static __rte_always_inline void
>   tx_backlog_entry(struct i40e_tx_entry *txep,
>   		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> index 12e6f1cbcb..1509d3223b 100644
> --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
> +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> @@ -739,6 +739,48 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
>   	return nb_pkts;
>   }
>   
> +int
> +i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm)
> +{
> +	struct i40e_rx_queue *rxq = rx_queue;
> +	struct i40e_rx_entry *rxep;
> +	volatile union i40e_rx_desc *rxdp;
> +	uint16_t rx_id;
> +	uint64x2_t dma_addr;
> +	uint64_t paddr;
> +	uint16_t i;
> +
> +	rxdp = rxq->rx_ring + rxq->rxrearm_start;
> +	rxep = &rxq->sw_ring[rxq->rxrearm_start];
> +
> +	for (i = 0; i < nb_rearm; i++) {
> +		/* Initialize rxdp descs */
> +		paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
> +		dma_addr = vdupq_n_u64(paddr);
> +		/* flush desc with pa dma_addr */
> +		vst1q_u64((uint64_t *)&rxdp++->read, dma_addr);
> +	}
> +
> +	/* Update the descriptor initializer index */
> +	rxq->rxrearm_start += nb_rearm;
> +	rx_id = rxq->rxrearm_start - 1;
> +
> +	if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
> +		rxq->rxrearm_start = rxq->rxrearm_start - rxq->nb_rx_desc;
> +		if (!rxq->rxrearm_start)
> +			rx_id = rxq->nb_rx_desc - 1;
> +		else
> +			rx_id = rxq->rxrearm_start - 1;
> +	}
> +	rxq->rxrearm_nb -= nb_rearm;
> +
> +	rte_io_wmb();
> +	/* Update the tail pointer on the NIC */
> +	I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
> +
> +	return 0;
> +}
> +
>   void __rte_cold
>   i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
>   {
  
Feifei Wang Feb. 24, 2023, 9:50 a.m. UTC | #2
> -----邮件原件-----
> 发件人: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> 发送时间: Thursday, February 2, 2023 10:38 PM
> 收件人: Feifei Wang <Feifei.Wang2@arm.com>; Yuying Zhang
> <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>; Ruifeng
> Wang <Ruifeng.Wang@arm.com>
> 抄送: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>
> 主题: Re: [PATCH v3 2/3] net/i40e: enable direct rearm with separate API
> 
> 04/01/2023 07:30, Feifei Wang пишет:
> > Add internal API to separate direct rearm operations between Rx and
> > Tx.
> >
> > Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > ---
> >   drivers/net/i40e/i40e_ethdev.c          |  1 +
> >   drivers/net/i40e/i40e_ethdev.h          |  2 +
> >   drivers/net/i40e/i40e_rxtx.c            | 19 +++++++++
> >   drivers/net/i40e/i40e_rxtx.h            |  4 ++
> >   drivers/net/i40e/i40e_rxtx_vec_common.h | 54
> +++++++++++++++++++++++++
> >   drivers/net/i40e/i40e_rxtx_vec_neon.c   | 42 +++++++++++++++++++
> >   6 files changed, 122 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 7726a89d99..29c1ce2470 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -497,6 +497,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops
> = {
> >   	.flow_ops_get                 = i40e_dev_flow_ops_get,
> >   	.rxq_info_get                 = i40e_rxq_info_get,
> >   	.txq_info_get                 = i40e_txq_info_get,
> > +	.rxq_rearm_data_get           = i40e_rxq_rearm_data_get,
> >   	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
> >   	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
> >   	.timesync_enable              = i40e_timesync_enable,
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index fe943a45ff..6a6a2a6d3c 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -1352,6 +1352,8 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev,
> uint16_t queue_id,
> >   	struct rte_eth_rxq_info *qinfo);
> >   void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
> >   	struct rte_eth_txq_info *qinfo);
> > +void i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t
> queue_id,
> > +	struct rte_eth_rxq_rearm_data *rxq_rearm_data);
> >   int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
> >   			   struct rte_eth_burst_mode *mode);
> >   int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t
> > queue_id, diff --git a/drivers/net/i40e/i40e_rxtx.c
> > b/drivers/net/i40e/i40e_rxtx.c index 788ffb51c2..d8d801acaf 100644
> > --- a/drivers/net/i40e/i40e_rxtx.c
> > +++ b/drivers/net/i40e/i40e_rxtx.c
> > @@ -3197,6 +3197,19 @@ i40e_txq_info_get(struct rte_eth_dev *dev,
> uint16_t queue_id,
> >   	qinfo->conf.offloads = txq->offloads;
> >   }
> >
> > +void
> > +i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
> > +	struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > +	struct i40e_rx_queue *rxq;
> > +
> > +	rxq = dev->data->rx_queues[queue_id];
> > +
> > +	rxq_rearm_data->rx_sw_ring = rxq->sw_ring;
> > +	rxq_rearm_data->rearm_start = &rxq->rxrearm_start;
> > +	rxq_rearm_data->rearm_nb = &rxq->rxrearm_nb; }
> > +
> >   #ifdef RTE_ARCH_X86
> >   static inline bool
> >   get_avx_supported(bool request_avx512) @@ -3321,6 +3334,9 @@
> > i40e_set_rx_function(struct rte_eth_dev *dev)
> >   			PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
> >   				     dev->data->port_id);
> >   			dev->rx_pkt_burst = i40e_recv_pkts_vec;
> > +#ifdef RTE_ARCH_ARM64
> > +			dev->rx_flush_descriptor =
> i40e_rx_flush_descriptor_vec; #endif
> >   		}
> >   #endif /* RTE_ARCH_X86 */
> >   	} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
> > @@ -3484,6 +3500,9 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
> >   			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
> >   				     dev->data->port_id);
> >   			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
> > +#ifdef RTE_ARCH_ARM64
> > +			dev->tx_fill_sw_ring = i40e_tx_fill_sw_ring; #endif
> 
> As I can see tx_fill_sw_ring() is non ARM specific, any reason to guard it with
> #ifdef ARM?
> Actually same ask for rx_flush_descriptor() - can we have generic version too?

Here we consider direct-rearm not enable in other architecture. Agree with that
we need to have generic version to avoid this, I will update in the next version.
 
> 
> >   #endif /* RTE_ARCH_X86 */
> >   		} else {
> >   			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
> diff --git
> > a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index
> > 5e6eecc501..8a29bd89df 100644
> > --- a/drivers/net/i40e/i40e_rxtx.h
> > +++ b/drivers/net/i40e/i40e_rxtx.h
> > @@ -233,6 +233,10 @@ uint32_t i40e_dev_rx_queue_count(void
> *rx_queue);
> >   int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
> >   int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
> >
> > +int i40e_tx_fill_sw_ring(void *tx_queue,
> > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data); int
> > +i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm);
> > +
> >   uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> >   			    uint16_t nb_pkts);
> >   uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, diff --git
> > a/drivers/net/i40e/i40e_rxtx_vec_common.h
> > b/drivers/net/i40e/i40e_rxtx_vec_common.h
> > index fe1a6ec75e..eb96301a43 100644
> > --- a/drivers/net/i40e/i40e_rxtx_vec_common.h
> > +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
> > @@ -146,6 +146,60 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
> >   	return txq->tx_rs_thresh;
> >   }
> >
> > +int
> > +i40e_tx_fill_sw_ring(void *tx_queue,
> > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > +	struct i40e_tx_queue *txq = tx_queue;
> > +	struct i40e_tx_entry *txep;
> > +	void **rxep;
> > +	struct rte_mbuf *m;
> > +	int i, n;
> > +	int nb_rearm = 0;
> > +
> > +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> > +			txq->nb_tx_free > txq->tx_free_thresh)
> > +		return 0;
> > +
> > +	/* check DD bits on threshold descriptor */
> > +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> > +			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> > +
> 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> > +		return 0;
> > +
> > +	n = txq->tx_rs_thresh;
> > +
> > +	/* first buffer to free from S/W ring is at index
> > +	 * tx_next_dd - (tx_rs_thresh-1)
> > +	 */
> > +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> > +	rxep = rxq_rearm_data->rx_sw_ring;
> > +	rxep += *rxq_rearm_data->rearm_start;
> > +
> > +	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> > +		/* directly put mbufs from Tx to Rx */
> > +		for (i = 0; i < n; i++, rxep++, txep++)
> > +			*rxep = txep[0].mbuf;
> > +	} else {
> > +		for (i = 0; i < n; i++, rxep++) {
> > +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> > +			if (m != NULL) {
> > +				*rxep = m;
> > +				nb_rearm++;
> > +			}
> > +		}
> > +		n = nb_rearm;
> > +	}
> > +
> > +	/* update counters for Tx */
> > +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> > +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> > +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> > +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> > +
> > +	return n;
> > +}
> > +
> >   static __rte_always_inline void
> >   tx_backlog_entry(struct i40e_tx_entry *txep,
> >   		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts) diff --git
> > a/drivers/net/i40e/i40e_rxtx_vec_neon.c
> > b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> > index 12e6f1cbcb..1509d3223b 100644
> > --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
> > +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
> > @@ -739,6 +739,48 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict
> tx_queue,
> >   	return nb_pkts;
> >   }
> >
> > +int
> > +i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm) {
> > +	struct i40e_rx_queue *rxq = rx_queue;
> > +	struct i40e_rx_entry *rxep;
> > +	volatile union i40e_rx_desc *rxdp;
> > +	uint16_t rx_id;
> > +	uint64x2_t dma_addr;
> > +	uint64_t paddr;
> > +	uint16_t i;
> > +
> > +	rxdp = rxq->rx_ring + rxq->rxrearm_start;
> > +	rxep = &rxq->sw_ring[rxq->rxrearm_start];
> > +
> > +	for (i = 0; i < nb_rearm; i++) {
> > +		/* Initialize rxdp descs */
> > +		paddr = (rxep[i].mbuf)->buf_iova +
> RTE_PKTMBUF_HEADROOM;
> > +		dma_addr = vdupq_n_u64(paddr);
> > +		/* flush desc with pa dma_addr */
> > +		vst1q_u64((uint64_t *)&rxdp++->read, dma_addr);
> > +	}
> > +
> > +	/* Update the descriptor initializer index */
> > +	rxq->rxrearm_start += nb_rearm;
> > +	rx_id = rxq->rxrearm_start - 1;
> > +
> > +	if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
> > +		rxq->rxrearm_start = rxq->rxrearm_start - rxq->nb_rx_desc;
> > +		if (!rxq->rxrearm_start)
> > +			rx_id = rxq->nb_rx_desc - 1;
> > +		else
> > +			rx_id = rxq->rxrearm_start - 1;
> > +	}
> > +	rxq->rxrearm_nb -= nb_rearm;
> > +
> > +	rte_io_wmb();
> > +	/* Update the tail pointer on the NIC */
> > +	I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
> > +
> > +	return 0;
> > +}
> > +
> >   void __rte_cold
> >   i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
> >   {
  
Konstantin Ananyev Feb. 27, 2023, 7:35 p.m. UTC | #3
> > > +int
> > > +i40e_tx_fill_sw_ring(void *tx_queue,
> > > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > > +	struct i40e_tx_queue *txq = tx_queue;
> > > +	struct i40e_tx_entry *txep;
> > > +	void **rxep;
> > > +	struct rte_mbuf *m;
> > > +	int i, n;
> > > +	int nb_rearm = 0;
> > > +
> > > +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> > > +			txq->nb_tx_free > txq->tx_free_thresh)
> > > +		return 0;
> > > +
> > > +	/* check DD bits on threshold descriptor */
> > > +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> > > +			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> > > +
> > 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> > > +		return 0;
> > > +
> > > +	n = txq->tx_rs_thresh;
> > > +
> > > +	/* first buffer to free from S/W ring is at index
> > > +	 * tx_next_dd - (tx_rs_thresh-1)
> > > +	 */
> > > +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> > > +	rxep = rxq_rearm_data->rx_sw_ring;
> > > +	rxep += *rxq_rearm_data->rearm_start;
> > > +
> > > +	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> > > +		/* directly put mbufs from Tx to Rx */
> > > +		for (i = 0; i < n; i++, rxep++, txep++)
> > > +			*rxep = txep[0].mbuf;
> > > +	} else {
> > > +		for (i = 0; i < n; i++, rxep++) {
> > > +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);

One thing I forgot to ask:
What would happen if this mbuf belongs to different mempool
(not one that we specify at rx_queue_setup())?
Do we need to check it here?
Or would it be upper layer constraint? 
Or...?

> > > +			if (m != NULL) {
> > > +				*rxep = m;
> > > +				nb_rearm++;
> > > +			}
> > > +		}
> > > +		n = nb_rearm;
> > > +	}
> > > +
> > > +	/* update counters for Tx */
> > > +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> > > +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> > > +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> > > +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> > > +
> > > +	return n;
> > > +}
> > > +
  
Feifei Wang Feb. 28, 2023, 2:15 a.m. UTC | #4
> -----邮件原件-----
> 发件人: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 发送时间: Tuesday, February 28, 2023 3:36 AM
> 收件人: Feifei Wang <Feifei.Wang2@arm.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Yuying Zhang
> <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>; Ruifeng
> Wang <Ruifeng.Wang@arm.com>
> 抄送: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>
> 主题: RE: [PATCH v3 2/3] net/i40e: enable direct rearm with separate API
> 
> 
> 
> > > > +int
> > > > +i40e_tx_fill_sw_ring(void *tx_queue,
> > > > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > > > +	struct i40e_tx_queue *txq = tx_queue;
> > > > +	struct i40e_tx_entry *txep;
> > > > +	void **rxep;
> > > > +	struct rte_mbuf *m;
> > > > +	int i, n;
> > > > +	int nb_rearm = 0;
> > > > +
> > > > +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> > > > +			txq->nb_tx_free > txq->tx_free_thresh)
> > > > +		return 0;
> > > > +
> > > > +	/* check DD bits on threshold descriptor */
> > > > +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> > > > +			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> > > > +
> > > 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> > > > +		return 0;
> > > > +
> > > > +	n = txq->tx_rs_thresh;
> > > > +
> > > > +	/* first buffer to free from S/W ring is at index
> > > > +	 * tx_next_dd - (tx_rs_thresh-1)
> > > > +	 */
> > > > +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> > > > +	rxep = rxq_rearm_data->rx_sw_ring;
> > > > +	rxep += *rxq_rearm_data->rearm_start;
> > > > +
> > > > +	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> > > > +		/* directly put mbufs from Tx to Rx */
> > > > +		for (i = 0; i < n; i++, rxep++, txep++)
> > > > +			*rxep = txep[0].mbuf;
> > > > +	} else {
> > > > +		for (i = 0; i < n; i++, rxep++) {
> > > > +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> 
> One thing I forgot to ask:
> What would happen if this mbuf belongs to different mempool (not one that
> we specify at rx_queue_setup())?
> Do we need to check it here?
> Or would it be upper layer constraint?
> Or...?
>

First, 'different mempool' is valid for no FAST_FREE path in tx_free_buffers.

If buffers belong to different mempool, we can have an example here:
Buffer 1 from mempool 1, its recycle path is:
-----------------------------------------------------------------------------------------
1. queue_setup: rearm from mempool 1 into Rx sw-ring
2. rte_eth_Rx_burst: used by user app (Rx)
3. rte_eth_Tx_burst: mount on Tx sw-ring 
4. rte_eth_direct_rearm: free into Rx sw-ring:
           or  
    tx_free_buffers: free into mempool 1 (no fast_free path) 
-----------------------------------------------------------------------------------------

Buffer 2 from mempool 2, its recycle path is:
-----------------------------------------------------------------------------------------
1. queue_setup: rearm from mempool 2 into Rx sw-ring
2. rte_eth_Rx_burst: used by user app (Rx)
3. rte_eth_Tx_burst: mount on Tx sw-ring 
4. rte_eth_direct_rearm: free into Rx sw-ring
           or  
    tx_free_buffers: free into mempool 2 (no fast_free_path)
-----------------------------------------------------------------------------------------

Thus, buffers from Tx different mempools are the same for Rx. The difference point
is that they will be freed into different mempool if the thread  uses generic free buffers.
I think this cannot affect direct-rearm mode, and we do not need to check this.

> > > > +			if (m != NULL) {
> > > > +				*rxep = m;
> > > > +				nb_rearm++;
> > > > +			}
> > > > +		}
> > > > +		n = nb_rearm;
> > > > +	}
> > > > +
> > > > +	/* update counters for Tx */
> > > > +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> > > > +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> > > > +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> > > > +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> > > > +
> > > > +	return n;
> > > > +}
> > > > +
  
Konstantin Ananyev March 7, 2023, 11:01 a.m. UTC | #5
> > -----邮件原件-----
> > 发件人: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > 发送时间: Tuesday, February 28, 2023 3:36 AM
> > 收件人: Feifei Wang <Feifei.Wang2@arm.com>; Konstantin Ananyev
> > <konstantin.v.ananyev@yandex.ru>; Yuying Zhang
> > <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>; Ruifeng
> > Wang <Ruifeng.Wang@arm.com>
> > 抄送: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>
> > 主题: RE: [PATCH v3 2/3] net/i40e: enable direct rearm with separate API
> >
> >
> >
> > > > > +int
> > > > > +i40e_tx_fill_sw_ring(void *tx_queue,
> > > > > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > > > > +	struct i40e_tx_queue *txq = tx_queue;
> > > > > +	struct i40e_tx_entry *txep;
> > > > > +	void **rxep;
> > > > > +	struct rte_mbuf *m;
> > > > > +	int i, n;
> > > > > +	int nb_rearm = 0;
> > > > > +
> > > > > +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> > > > > +			txq->nb_tx_free > txq->tx_free_thresh)
> > > > > +		return 0;
> > > > > +
> > > > > +	/* check DD bits on threshold descriptor */
> > > > > +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> > > > > +			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> > > > > +
> > > > 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> > > > > +		return 0;
> > > > > +
> > > > > +	n = txq->tx_rs_thresh;
> > > > > +
> > > > > +	/* first buffer to free from S/W ring is at index
> > > > > +	 * tx_next_dd - (tx_rs_thresh-1)
> > > > > +	 */
> > > > > +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> > > > > +	rxep = rxq_rearm_data->rx_sw_ring;
> > > > > +	rxep += *rxq_rearm_data->rearm_start;
> > > > > +
> > > > > +	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> > > > > +		/* directly put mbufs from Tx to Rx */
> > > > > +		for (i = 0; i < n; i++, rxep++, txep++)
> > > > > +			*rxep = txep[0].mbuf;
> > > > > +	} else {
> > > > > +		for (i = 0; i < n; i++, rxep++) {
> > > > > +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> >
> > One thing I forgot to ask:
> > What would happen if this mbuf belongs to different mempool (not one that
> > we specify at rx_queue_setup())?
> > Do we need to check it here?
> > Or would it be upper layer constraint?
> > Or...?
> >
> 
> First, 'different mempool' is valid for no FAST_FREE path in tx_free_buffers.
> 
> If buffers belong to different mempool, we can have an example here:
> Buffer 1 from mempool 1, its recycle path is:
> -----------------------------------------------------------------------------------------
> 1. queue_setup: rearm from mempool 1 into Rx sw-ring
> 2. rte_eth_Rx_burst: used by user app (Rx)
> 3. rte_eth_Tx_burst: mount on Tx sw-ring
> 4. rte_eth_direct_rearm: free into Rx sw-ring:
>            or
>     tx_free_buffers: free into mempool 1 (no fast_free path)
> -----------------------------------------------------------------------------------------
> 
> Buffer 2 from mempool 2, its recycle path is:
> -----------------------------------------------------------------------------------------
> 1. queue_setup: rearm from mempool 2 into Rx sw-ring
> 2. rte_eth_Rx_burst: used by user app (Rx)
> 3. rte_eth_Tx_burst: mount on Tx sw-ring
> 4. rte_eth_direct_rearm: free into Rx sw-ring
>            or
>     tx_free_buffers: free into mempool 2 (no fast_free_path)
> -----------------------------------------------------------------------------------------
> 
> Thus, buffers from Tx different mempools are the same for Rx. The difference point
> is that they will be freed into different mempool if the thread  uses generic free buffers.
> I think this cannot affect direct-rearm mode, and we do not need to check this.

I understand that it should work even with multiple mempools.
What I am trying to say - user may not want to use mbufs from particular mempool for RX
(while it is still ok to use it for TX).
Let say user can have a separate mempool with small data-buffers (less then normal MTU)  
to send some 'special' paclets, or even use this memppol with small buffers for zero-copy
updating of packet L2/L3 headers, etc.
Or it could be some 'special' user provided mempool.
That's why I wonder should we allow only mbufs from mempool that is assigned to that RX queue. 

> 
> > > > > +			if (m != NULL) {
> > > > > +				*rxep = m;
> > > > > +				nb_rearm++;
> > > > > +			}
> > > > > +		}
> > > > > +		n = nb_rearm;
> > > > > +	}
> > > > > +
> > > > > +	/* update counters for Tx */
> > > > > +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> > > > > +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> > > > > +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> > > > > +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> > > > > +
> > > > > +	return n;
> > > > > +}
> > > > > +
  
Feifei Wang March 14, 2023, 6:07 a.m. UTC | #6
> -----邮件原件-----
> 发件人: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 发送时间: Tuesday, March 7, 2023 7:01 PM
> 收件人: Feifei Wang <Feifei.Wang2@arm.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Yuying Zhang
> <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>; Ruifeng
> Wang <Ruifeng.Wang@arm.com>
> 抄送: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>; nd <nd@arm.com>
> 主题: RE: [PATCH v3 2/3] net/i40e: enable direct rearm with separate API
> 
> 
> 
> > > -----邮件原件-----
> > > 发件人: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > 发送时间: Tuesday, February 28, 2023 3:36 AM
> > > 收件人: Feifei Wang <Feifei.Wang2@arm.com>; Konstantin Ananyev
> > > <konstantin.v.ananyev@yandex.ru>; Yuying Zhang
> > > <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>;
> > > Ruifeng Wang <Ruifeng.Wang@arm.com>
> > > 抄送: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > > <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>
> > > 主题: RE: [PATCH v3 2/3] net/i40e: enable direct rearm with separate
> > > API
> > >
> > >
> > >
> > > > > > +int
> > > > > > +i40e_tx_fill_sw_ring(void *tx_queue,
> > > > > > +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> > > > > > +	struct i40e_tx_queue *txq = tx_queue;
> > > > > > +	struct i40e_tx_entry *txep;
> > > > > > +	void **rxep;
> > > > > > +	struct rte_mbuf *m;
> > > > > > +	int i, n;
> > > > > > +	int nb_rearm = 0;
> > > > > > +
> > > > > > +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> > > > > > +			txq->nb_tx_free > txq->tx_free_thresh)
> > > > > > +		return 0;
> > > > > > +
> > > > > > +	/* check DD bits on threshold descriptor */
> > > > > > +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> > > > > > +
> 	rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> > > > > > +
> > > > > 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> > > > > > +		return 0;
> > > > > > +
> > > > > > +	n = txq->tx_rs_thresh;
> > > > > > +
> > > > > > +	/* first buffer to free from S/W ring is at index
> > > > > > +	 * tx_next_dd - (tx_rs_thresh-1)
> > > > > > +	 */
> > > > > > +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> > > > > > +	rxep = rxq_rearm_data->rx_sw_ring;
> > > > > > +	rxep += *rxq_rearm_data->rearm_start;
> > > > > > +
> > > > > > +	if (txq->offloads &
> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> > > > > > +		/* directly put mbufs from Tx to Rx */
> > > > > > +		for (i = 0; i < n; i++, rxep++, txep++)
> > > > > > +			*rxep = txep[0].mbuf;
> > > > > > +	} else {
> > > > > > +		for (i = 0; i < n; i++, rxep++) {
> > > > > > +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> > >
> > > One thing I forgot to ask:
> > > What would happen if this mbuf belongs to different mempool (not one
> > > that we specify at rx_queue_setup())?
> > > Do we need to check it here?
> > > Or would it be upper layer constraint?
> > > Or...?
> > >
> >
> > First, 'different mempool' is valid for no FAST_FREE path in tx_free_buffers.
> >
> > If buffers belong to different mempool, we can have an example here:
> > Buffer 1 from mempool 1, its recycle path is:
> > ----------------------------------------------------------------------
> > ------------------- 1. queue_setup: rearm from mempool 1 into Rx
> > sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
> > rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
> > into Rx sw-ring:
> >            or
> >     tx_free_buffers: free into mempool 1 (no fast_free path)
> > ----------------------------------------------------------------------
> > -------------------
> >
> > Buffer 2 from mempool 2, its recycle path is:
> > ----------------------------------------------------------------------
> > ------------------- 1. queue_setup: rearm from mempool 2 into Rx
> > sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
> > rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
> > into Rx sw-ring
> >            or
> >     tx_free_buffers: free into mempool 2 (no fast_free_path)
> > ----------------------------------------------------------------------
> > -------------------
> >
> > Thus, buffers from Tx different mempools are the same for Rx. The
> > difference point is that they will be freed into different mempool if the
> thread  uses generic free buffers.
> > I think this cannot affect direct-rearm mode, and we do not need to check
> this.
> 
> I understand that it should work even with multiple mempools.
> What I am trying to say - user may not want to use mbufs from particular
> mempool for RX (while it is still ok to use it for TX).
> Let say user can have a separate mempool with small data-buffers (less then
> normal MTU) to send some 'special' paclets, or even use this memppol with
> small buffers for zero-copy updating of packet L2/L3 headers, etc.
> Or it could be some 'special' user provided mempool.
> That's why I wonder should we allow only mbufs from mempool that is
> assigned to that RX queue.

Sorry for my misleading. If I understand correctly this time, you means a special
mempool. Maybe its buffer size is very small and this Tx buffer is generated from control plane.

However, if we recycle this Tx buffer into Rx buffer ring, there maybe some error due to its
size is so small.

Thus we can only allow general buffers which is valid for Rx buffer ring. Furthermore, this should be
user's  responsibility to ensure the Tx recycling buffers should be valid. If we check this in the data plane,
it will cost a lot of CPU cycles. At last, what we can do is to add constraint in the notes to remind users.
> 
> >
> > > > > > +			if (m != NULL) {
> > > > > > +				*rxep = m;
> > > > > > +				nb_rearm++;
> > > > > > +			}
> > > > > > +		}
> > > > > > +		n = nb_rearm;
> > > > > > +	}
> > > > > > +
> > > > > > +	/* update counters for Tx */
> > > > > > +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq-
> >tx_rs_thresh);
> > > > > > +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq-
> >tx_rs_thresh);
> > > > > > +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> > > > > > +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> > > > > > +
> > > > > > +	return n;
> > > > > > +}
> > > > > > +
  
Konstantin Ananyev March 19, 2023, 4:11 p.m. UTC | #7
>>>>>>> +int
>>>>>>> +i40e_tx_fill_sw_ring(void *tx_queue,
>>>>>>> +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
>>>>>>> +	struct i40e_tx_queue *txq = tx_queue;
>>>>>>> +	struct i40e_tx_entry *txep;
>>>>>>> +	void **rxep;
>>>>>>> +	struct rte_mbuf *m;
>>>>>>> +	int i, n;
>>>>>>> +	int nb_rearm = 0;
>>>>>>> +
>>>>>>> +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
>>>>>>> +			txq->nb_tx_free > txq->tx_free_thresh)
>>>>>>> +		return 0;
>>>>>>> +
>>>>>>> +	/* check DD bits on threshold descriptor */
>>>>>>> +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
>>>>>>> +
>> 	rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
>>>>>>> +
>>>>>> 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
>>>>>>> +		return 0;
>>>>>>> +
>>>>>>> +	n = txq->tx_rs_thresh;
>>>>>>> +
>>>>>>> +	/* first buffer to free from S/W ring is at index
>>>>>>> +	 * tx_next_dd - (tx_rs_thresh-1)
>>>>>>> +	 */
>>>>>>> +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
>>>>>>> +	rxep = rxq_rearm_data->rx_sw_ring;
>>>>>>> +	rxep += *rxq_rearm_data->rearm_start;
>>>>>>> +
>>>>>>> +	if (txq->offloads &
>> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
>>>>>>> +		/* directly put mbufs from Tx to Rx */
>>>>>>> +		for (i = 0; i < n; i++, rxep++, txep++)
>>>>>>> +			*rxep = txep[0].mbuf;
>>>>>>> +	} else {
>>>>>>> +		for (i = 0; i < n; i++, rxep++) {
>>>>>>> +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
>>>>
>>>> One thing I forgot to ask:
>>>> What would happen if this mbuf belongs to different mempool (not one
>>>> that we specify at rx_queue_setup())?
>>>> Do we need to check it here?
>>>> Or would it be upper layer constraint?
>>>> Or...?
>>>>
>>>
>>> First, 'different mempool' is valid for no FAST_FREE path in tx_free_buffers.
>>>
>>> If buffers belong to different mempool, we can have an example here:
>>> Buffer 1 from mempool 1, its recycle path is:
>>> ----------------------------------------------------------------------
>>> ------------------- 1. queue_setup: rearm from mempool 1 into Rx
>>> sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
>>> rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
>>> into Rx sw-ring:
>>>             or
>>>      tx_free_buffers: free into mempool 1 (no fast_free path)
>>> ----------------------------------------------------------------------
>>> -------------------
>>>
>>> Buffer 2 from mempool 2, its recycle path is:
>>> ----------------------------------------------------------------------
>>> ------------------- 1. queue_setup: rearm from mempool 2 into Rx
>>> sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
>>> rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
>>> into Rx sw-ring
>>>             or
>>>      tx_free_buffers: free into mempool 2 (no fast_free_path)
>>> ----------------------------------------------------------------------
>>> -------------------
>>>
>>> Thus, buffers from Tx different mempools are the same for Rx. The
>>> difference point is that they will be freed into different mempool if the
>> thread  uses generic free buffers.
>>> I think this cannot affect direct-rearm mode, and we do not need to check
>> this.
>>
>> I understand that it should work even with multiple mempools.
>> What I am trying to say - user may not want to use mbufs from particular
>> mempool for RX (while it is still ok to use it for TX).
>> Let say user can have a separate mempool with small data-buffers (less then
>> normal MTU) to send some 'special' paclets, or even use this memppol with
>> small buffers for zero-copy updating of packet L2/L3 headers, etc.
>> Or it could be some 'special' user provided mempool.
>> That's why I wonder should we allow only mbufs from mempool that is
>> assigned to that RX queue.
> 
> Sorry for my misleading. If I understand correctly this time, you means a special
> mempool. Maybe its buffer size is very small and this Tx buffer is generated from control plane.
> 
> However, if we recycle this Tx buffer into Rx buffer ring, there maybe some error due to its
> size is so small.
> 
> Thus we can only allow general buffers which is valid for Rx buffer ring. Furthermore, this should be
> user's  responsibility to ensure the Tx recycling buffers should be valid. If we check this in the data plane,
> it will cost a lot of CPU cycles. At last, what we can do is to add constraint in the notes to remind users.

As I thought: in theory we can add 'struct rte_mempool *mp'
into rte_eth_rxq_rearm_data.
And then:
if (mbuf->pool == rxq_rearm_data->mp)
   /* put mbuf into rearm buffer */
else
   /* free mbuf */
For the 'proper' config (when txq contains mbufs from expected mempool)
the overhead will be minimal.
In other case it might be higher, but still would work and no need for
extra limitations.


>>
>>>
>>>>>>> +			if (m != NULL) {
>>>>>>> +				*rxep = m;
>>>>>>> +				nb_rearm++;
>>>>>>> +			}
>>>>>>> +		}
>>>>>>> +		n = nb_rearm;
>>>>>>> +	}
>>>>>>> +
>>>>>>> +	/* update counters for Tx */
>>>>>>> +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq-
>>> tx_rs_thresh);
>>>>>>> +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq-
>>> tx_rs_thresh);
>>>>>>> +	if (txq->tx_next_dd >= txq->nb_tx_desc)
>>>>>>> +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
>>>>>>> +
>>>>>>> +	return n;
>>>>>>> +}
>>>>>>> +
  
Feifei Wang March 23, 2023, 10:49 a.m. UTC | #8
> -----Original Message-----
> From: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
> Sent: Monday, March 20, 2023 12:11 AM
> To: Feifei Wang <Feifei.Wang2@arm.com>; Konstantin Ananyev
> <konstantin.ananyev@huawei.com>; Yuying Zhang
> <Yuying.Zhang@intel.com>; Beilei Xing <beilei.xing@intel.com>; Ruifeng
> Wang <Ruifeng.Wang@arm.com>
> Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>
> Subject: Re: 回复: [PATCH v3 2/3] net/i40e: enable direct rearm with
> separate API
> 
> 
> >>>>>>> +int
> >>>>>>> +i40e_tx_fill_sw_ring(void *tx_queue,
> >>>>>>> +		struct rte_eth_rxq_rearm_data *rxq_rearm_data) {
> >>>>>>> +	struct i40e_tx_queue *txq = tx_queue;
> >>>>>>> +	struct i40e_tx_entry *txep;
> >>>>>>> +	void **rxep;
> >>>>>>> +	struct rte_mbuf *m;
> >>>>>>> +	int i, n;
> >>>>>>> +	int nb_rearm = 0;
> >>>>>>> +
> >>>>>>> +	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
> >>>>>>> +			txq->nb_tx_free > txq->tx_free_thresh)
> >>>>>>> +		return 0;
> >>>>>>> +
> >>>>>>> +	/* check DD bits on threshold descriptor */
> >>>>>>> +	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> >>>>>>> +
> >> 	rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> >>>>>>> +
> >>>>>> 	rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> >>>>>>> +		return 0;
> >>>>>>> +
> >>>>>>> +	n = txq->tx_rs_thresh;
> >>>>>>> +
> >>>>>>> +	/* first buffer to free from S/W ring is at index
> >>>>>>> +	 * tx_next_dd - (tx_rs_thresh-1)
> >>>>>>> +	 */
> >>>>>>> +	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> >>>>>>> +	rxep = rxq_rearm_data->rx_sw_ring;
> >>>>>>> +	rxep += *rxq_rearm_data->rearm_start;
> >>>>>>> +
> >>>>>>> +	if (txq->offloads &
> >> RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> >>>>>>> +		/* directly put mbufs from Tx to Rx */
> >>>>>>> +		for (i = 0; i < n; i++, rxep++, txep++)
> >>>>>>> +			*rxep = txep[0].mbuf;
> >>>>>>> +	} else {
> >>>>>>> +		for (i = 0; i < n; i++, rxep++) {
> >>>>>>> +			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> >>>>
> >>>> One thing I forgot to ask:
> >>>> What would happen if this mbuf belongs to different mempool (not
> >>>> one that we specify at rx_queue_setup())?
> >>>> Do we need to check it here?
> >>>> Or would it be upper layer constraint?
> >>>> Or...?
> >>>>
> >>>
> >>> First, 'different mempool' is valid for no FAST_FREE path in
> tx_free_buffers.
> >>>
> >>> If buffers belong to different mempool, we can have an example here:
> >>> Buffer 1 from mempool 1, its recycle path is:
> >>> --------------------------------------------------------------------
> >>> --
> >>> ------------------- 1. queue_setup: rearm from mempool 1 into Rx
> >>> sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
> >>> rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
> >>> into Rx sw-ring:
> >>>             or
> >>>      tx_free_buffers: free into mempool 1 (no fast_free path)
> >>> --------------------------------------------------------------------
> >>> --
> >>> -------------------
> >>>
> >>> Buffer 2 from mempool 2, its recycle path is:
> >>> --------------------------------------------------------------------
> >>> --
> >>> ------------------- 1. queue_setup: rearm from mempool 2 into Rx
> >>> sw-ring 2. rte_eth_Rx_burst: used by user app (Rx) 3.
> >>> rte_eth_Tx_burst: mount on Tx sw-ring 4. rte_eth_direct_rearm: free
> >>> into Rx sw-ring
> >>>             or
> >>>      tx_free_buffers: free into mempool 2 (no fast_free_path)
> >>> --------------------------------------------------------------------
> >>> --
> >>> -------------------
> >>>
> >>> Thus, buffers from Tx different mempools are the same for Rx. The
> >>> difference point is that they will be freed into different mempool
> >>> if the
> >> thread  uses generic free buffers.
> >>> I think this cannot affect direct-rearm mode, and we do not need to
> >>> check
> >> this.
> >>
> >> I understand that it should work even with multiple mempools.
> >> What I am trying to say - user may not want to use mbufs from
> >> particular mempool for RX (while it is still ok to use it for TX).
> >> Let say user can have a separate mempool with small data-buffers
> >> (less then normal MTU) to send some 'special' paclets, or even use
> >> this memppol with small buffers for zero-copy updating of packet L2/L3
> headers, etc.
> >> Or it could be some 'special' user provided mempool.
> >> That's why I wonder should we allow only mbufs from mempool that is
> >> assigned to that RX queue.
> >
> > Sorry for my misleading. If I understand correctly this time, you
> > means a special mempool. Maybe its buffer size is very small and this Tx
> buffer is generated from control plane.
> >
> > However, if we recycle this Tx buffer into Rx buffer ring, there maybe
> > some error due to its size is so small.
> >
> > Thus we can only allow general buffers which is valid for Rx buffer
> > ring. Furthermore, this should be user's  responsibility to ensure the
> > Tx recycling buffers should be valid. If we check this in the data plane, it will
> cost a lot of CPU cycles. At last, what we can do is to add constraint in the
> notes to remind users.
> 
> As I thought: in theory we can add 'struct rte_mempool *mp'
> into rte_eth_rxq_rearm_data.
> And then:
> if (mbuf->pool == rxq_rearm_data->mp)
>    /* put mbuf into rearm buffer */
> else
>    /* free mbuf */
> For the 'proper' config (when txq contains mbufs from expected mempool)
> the overhead will be minimal.
> In other case it might be higher, but still would work and no need for extra
> limitations.

It's a good idea. And try to test performance with this change, there is currently
no performance degradation. Thus, I add this check in the latest version.

> 
> 
> >>
> >>>
> >>>>>>> +			if (m != NULL) {
> >>>>>>> +				*rxep = m;
> >>>>>>> +				nb_rearm++;
> >>>>>>> +			}
> >>>>>>> +		}
> >>>>>>> +		n = nb_rearm;
> >>>>>>> +	}
> >>>>>>> +
> >>>>>>> +	/* update counters for Tx */
> >>>>>>> +	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq-
> >>> tx_rs_thresh);
> >>>>>>> +	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq-
> >>> tx_rs_thresh);
> >>>>>>> +	if (txq->tx_next_dd >= txq->nb_tx_desc)
> >>>>>>> +		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> >>>>>>> +
> >>>>>>> +	return n;
> >>>>>>> +}
> >>>>>>> +
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d99..29c1ce2470 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -497,6 +497,7 @@  static const struct eth_dev_ops i40e_eth_dev_ops = {
 	.flow_ops_get                 = i40e_dev_flow_ops_get,
 	.rxq_info_get                 = i40e_rxq_info_get,
 	.txq_info_get                 = i40e_txq_info_get,
+	.rxq_rearm_data_get           = i40e_rxq_rearm_data_get,
 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
 	.timesync_enable              = i40e_timesync_enable,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index fe943a45ff..6a6a2a6d3c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1352,6 +1352,8 @@  void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_rxq_info *qinfo);
 void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_txq_info *qinfo);
+void i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_rearm_data *rxq_rearm_data);
 int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
 			   struct rte_eth_burst_mode *mode);
 int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 788ffb51c2..d8d801acaf 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3197,6 +3197,19 @@  i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.offloads = txq->offloads;
 }
 
+void
+i40e_rxq_rearm_data_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_rearm_data *rxq_rearm_data)
+{
+	struct i40e_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	rxq_rearm_data->rx_sw_ring = rxq->sw_ring;
+	rxq_rearm_data->rearm_start = &rxq->rxrearm_start;
+	rxq_rearm_data->rearm_nb = &rxq->rxrearm_nb;
+}
+
 #ifdef RTE_ARCH_X86
 static inline bool
 get_avx_supported(bool request_avx512)
@@ -3321,6 +3334,9 @@  i40e_set_rx_function(struct rte_eth_dev *dev)
 			PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
 				     dev->data->port_id);
 			dev->rx_pkt_burst = i40e_recv_pkts_vec;
+#ifdef RTE_ARCH_ARM64
+			dev->rx_flush_descriptor = i40e_rx_flush_descriptor_vec;
+#endif
 		}
 #endif /* RTE_ARCH_X86 */
 	} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
@@ -3484,6 +3500,9 @@  i40e_set_tx_function(struct rte_eth_dev *dev)
 			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
 				     dev->data->port_id);
 			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+#ifdef RTE_ARCH_ARM64
+			dev->tx_fill_sw_ring = i40e_tx_fill_sw_ring;
+#endif
 #endif /* RTE_ARCH_X86 */
 		} else {
 			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 5e6eecc501..8a29bd89df 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -233,6 +233,10 @@  uint32_t i40e_dev_rx_queue_count(void *rx_queue);
 int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
+int i40e_tx_fill_sw_ring(void *tx_queue,
+		struct rte_eth_rxq_rearm_data *rxq_rearm_data);
+int i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm);
+
 uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 			    uint16_t nb_pkts);
 uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index fe1a6ec75e..eb96301a43 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -146,6 +146,60 @@  i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	return txq->tx_rs_thresh;
 }
 
+int
+i40e_tx_fill_sw_ring(void *tx_queue,
+		struct rte_eth_rxq_rearm_data *rxq_rearm_data)
+{
+	struct i40e_tx_queue *txq = tx_queue;
+	struct i40e_tx_entry *txep;
+	void **rxep;
+	struct rte_mbuf *m;
+	int i, n;
+	int nb_rearm = 0;
+
+	if (*rxq_rearm_data->rearm_nb < txq->tx_rs_thresh ||
+			txq->nb_tx_free > txq->tx_free_thresh)
+		return 0;
+
+	/* check DD bits on threshold descriptor */
+	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+		return 0;
+
+	n = txq->tx_rs_thresh;
+
+	/* first buffer to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1)
+	 */
+	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+	rxep = rxq_rearm_data->rx_sw_ring;
+	rxep += *rxq_rearm_data->rearm_start;
+
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+		/* directly put mbufs from Tx to Rx */
+		for (i = 0; i < n; i++, rxep++, txep++)
+			*rxep = txep[0].mbuf;
+	} else {
+		for (i = 0; i < n; i++, rxep++) {
+			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (m != NULL) {
+				*rxep = m;
+				nb_rearm++;
+			}
+		}
+		n = nb_rearm;
+	}
+
+	/* update counters for Tx */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return n;
+}
+
 static __rte_always_inline void
 tx_backlog_entry(struct i40e_tx_entry *txep,
 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 12e6f1cbcb..1509d3223b 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -739,6 +739,48 @@  i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 	return nb_pkts;
 }
 
+int
+i40e_rx_flush_descriptor_vec(void *rx_queue, uint16_t nb_rearm)
+{
+	struct i40e_rx_queue *rxq = rx_queue;
+	struct i40e_rx_entry *rxep;
+	volatile union i40e_rx_desc *rxdp;
+	uint16_t rx_id;
+	uint64x2_t dma_addr;
+	uint64_t paddr;
+	uint16_t i;
+
+	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+	rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+	for (i = 0; i < nb_rearm; i++) {
+		/* Initialize rxdp descs */
+		paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
+		dma_addr = vdupq_n_u64(paddr);
+		/* flush desc with pa dma_addr */
+		vst1q_u64((uint64_t *)&rxdp++->read, dma_addr);
+	}
+
+	/* Update the descriptor initializer index */
+	rxq->rxrearm_start += nb_rearm;
+	rx_id = rxq->rxrearm_start - 1;
+
+	if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
+		rxq->rxrearm_start = rxq->rxrearm_start - rxq->nb_rx_desc;
+		if (!rxq->rxrearm_start)
+			rx_id = rxq->nb_rx_desc - 1;
+		else
+			rx_id = rxq->rxrearm_start - 1;
+	}
+	rxq->rxrearm_nb -= nb_rearm;
+
+	rte_io_wmb();
+	/* Update the tail pointer on the NIC */
+	I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
+
+	return 0;
+}
+
 void __rte_cold
 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
 {