[v1] net/axgbe: add support for Scattered Rx
Checks
Commit Message
From: Selwin Sebastian <selwin.sebastian@amd.com>
Enable scattered rx support and add jumbo packet transmit capability
Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 2 +
drivers/net/axgbe/axgbe_ethdev.c | 18 +++-
drivers/net/axgbe/axgbe_rxtx.c | 146 +++++++++++++++++++++++++++++
drivers/net/axgbe/axgbe_rxtx.h | 2 +
5 files changed, 168 insertions(+), 1 deletion(-)
Comments
[AMD Official Use Only - Internal Distribution Only]
Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>
>
>-----Original Message-----
>From: Sebastian, Selwin <Selwin.Sebastian@amd.com>
>Sent: Tuesday, February 25, 2020 6:10 PM
>To: dev@dpdk.org
>Cc: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
>Subject: [PATCH v1] net/axgbe: add support for Scattered Rx
>
>From: Selwin Sebastian <selwin.sebastian@amd.com>
>
>Enable scattered rx support and add jumbo packet transmit capability
>
>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>---
> doc/guides/nics/features/axgbe.ini | 1 +
> drivers/net/axgbe/axgbe_common.h | 2 +
> drivers/net/axgbe/axgbe_ethdev.c | 18 +++-
> drivers/net/axgbe/axgbe_rxtx.c | 146 +++++++++++++++++++++++++++++
> drivers/net/axgbe/axgbe_rxtx.h | 2 +
> 5 files changed, 168 insertions(+), 1 deletion(-)
>
>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>index ab4da559f..0becaa097 100644
>--- a/doc/guides/nics/features/axgbe.ini
>+++ b/doc/guides/nics/features/axgbe.ini
>@@ -7,6 +7,7 @@
> Speed capabilities = Y
> Link status = Y
> Jumbo frame = Y
>+Scattered Rx = Y
> Promiscuous mode = Y
> Allmulticast mode = Y
> RSS hash = Y
>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>index fdb037dd5..fbd46150c 100644
>--- a/drivers/net/axgbe/axgbe_common.h
>+++ b/drivers/net/axgbe/axgbe_common.h
>@@ -1135,6 +1135,8 @@
> #define RX_NORMAL_DESC3_PL_WIDTH 14
> #define RX_NORMAL_DESC3_RSV_INDEX 26
> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>+#define RX_NORMAL_DESC3_LD_INDEX 28
>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>
> #define RX_DESC3_L34T_IPV4_TCP 1
> #define RX_DESC3_L34T_IPV4_UDP 2
>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>index d0b6f091f..eb2f51f89 100644
>--- a/drivers/net/axgbe/axgbe_ethdev.c
>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>@@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>+ DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_IPV4_CKSUM |
> DEV_TX_OFFLOAD_UDP_CKSUM |
>+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
>+ DEV_TX_OFFLOAD_UDP_TSO |
>+ DEV_TX_OFFLOAD_SCTP_CKSUM |
>+ DEV_TX_OFFLOAD_MULTI_SEGS |
> DEV_TX_OFFLOAD_TCP_CKSUM;
>
> if (pdata->hw_feat.rss) {
>@@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> struct rte_pci_device *pci_dev;
> uint32_t reg, mac_lo, mac_hi;
> int ret;
>+ struct rte_eth_dev_info dev_info = { 0 };
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>+ eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
>+
>+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
>+ eth_dev->data->scattered_rx = 1;
>+
>+ /* Scatter Rx handling */
>+ if (eth_dev->data->scattered_rx)
>+ eth_dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>+ else
>+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..57e2bbb34 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.c
>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>@@ -307,6 +307,152 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
>+
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>+ PMD_INIT_FUNC_TRACE();
>+ uint16_t nb_rx = 0;
>+ struct axgbe_rx_queue *rxq = rx_queue;
>+ volatile union axgbe_rx_desc *desc;
>+
>+ uint64_t old_dirty = rxq->dirty;
>+ struct rte_mbuf *first_seg = NULL;
>+ struct rte_mbuf *mbuf, *tmbuf;
>+ unsigned int err;
>+ uint32_t error_status;
>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>+
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>+ while (nb_rx < nb_pkts) {
>+ bool eop = 0;
>+next_desc:
>+ if (unlikely(idx == rxq->nb_desc))
>+ idx = 0;
>+
>+ desc = &rxq->desc[idx];
>+
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>+ break;
>+
>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>+ if (unlikely(!tmbuf)) {
>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>+ " queue_id = %u\n",
>+ (unsigned int)rxq->port_id,
>+ (unsigned int)rxq->queue_id);
>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>+ break;
>+ }
>+
>+ pidx = idx + 1;
>+ if (unlikely(pidx == rxq->nb_desc))
>+ pidx = 0;
>+
>+ rte_prefetch0(rxq->sw_ring[pidx]);
>+ if ((pidx & 0x3) == 0) {
>+ rte_prefetch0(&rxq->desc[pidx]);
>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>+ }
>+
>+ mbuf = rxq->sw_ring[idx];
>+ /* Check for any errors and free mbuf*/
>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, ES);
>+ error_status = 0;
>+ if (unlikely(err)) {
>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>+ rxq->errors++;
>+ rte_pktmbuf_free(mbuf);
>+ goto err_set;
>+ }
>+ }
>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>+
>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, LD)) {
>+ eop = 0;
>+ pkt_len = rxq->buf_size;
>+ data_len = pkt_len;
>+ } else {
>+ eop = 1;
>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, PL);
>+ data_len = pkt_len - rxq->crc_len;
>+ }
>+
>+ if (first_seg != NULL) {
>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>+ rte_mempool_put(rxq->mb_pool,
>+ first_seg);
>+ } else {
>+ first_seg = mbuf;
>+ }
>+
>+ /* Get the RSS hash */
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>+
>+ /* Mbuf populate */
>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>+ mbuf->data_len = data_len;
>+
>+err_set:
>+ rxq->cur++;
>+ rxq->sw_ring[idx++] = tmbuf;
>+ desc->read.baddr =
>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>+ memset((void *)(&desc->read.desc2), 0, 8);
>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>+ rxq->dirty++;
>+
>+ if (!eop) {
>+ rte_pktmbuf_free(mbuf);
>+ goto next_desc;
>+ }
>+
>+ first_seg->pkt_len = pkt_len;
>+ rxq->bytes += pkt_len;
>+ mbuf->next = NULL;
>+
>+ first_seg->port = rxq->port_id;
>+ if (rxq->pdata->rx_csum_enable) {
>+ mbuf->ol_flags = 0;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>+ } else if (unlikely(error_status
>+ == AXGBE_L4_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>+ }
>+ }
>+
>+ rx_pkts[nb_rx++] = first_seg;
>+
>+ /* Setup receipt context for a new packet.*/
>+ first_seg = NULL;
>+ }
>+
>+ /* Save receive context.*/
>+ rxq->pkts += nb_rx;
>+
>+ if (rxq->dirty != old_dirty) {
>+ rte_wmb();
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>+ low32_value(rxq->ring_phys_addr +
>+ (idx * sizeof(union axgbe_rx_desc))));
>+ }
>+ return nb_rx;
>+}
>+
> /* Tx Apis */
> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.h
>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>--
>2.17.1
>
Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>From: Selwin Sebastian <selwin.sebastian@amd.com>
>
>Enable scattered rx support and add jumbo packet receive capability
>
>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>---
> doc/guides/nics/features/axgbe.ini | 1 +
> drivers/net/axgbe/axgbe_common.h | 2 +
> drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
> drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
> drivers/net/axgbe/axgbe_rxtx.h | 2 +
> 5 files changed, 165 insertions(+), 1 deletion(-)
>
>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>index ab4da559f..0becaa097 100644
>--- a/doc/guides/nics/features/axgbe.ini
>+++ b/doc/guides/nics/features/axgbe.ini
>@@ -7,6 +7,7 @@
> Speed capabilities = Y
> Link status = Y
> Jumbo frame = Y
>+Scattered Rx = Y
> Promiscuous mode = Y
> Allmulticast mode = Y
> RSS hash = Y
>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>index fdb037dd5..fbd46150c 100644
>--- a/drivers/net/axgbe/axgbe_common.h
>+++ b/drivers/net/axgbe/axgbe_common.h
>@@ -1135,6 +1135,8 @@
> #define RX_NORMAL_DESC3_PL_WIDTH 14
> #define RX_NORMAL_DESC3_RSV_INDEX 26
> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>+#define RX_NORMAL_DESC3_LD_INDEX 28
>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>
> #define RX_DESC3_L34T_IPV4_TCP 1
> #define RX_DESC3_L34T_IPV4_UDP 2
>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>index d0b6f091f..013c6330d 100644
>--- a/drivers/net/axgbe/axgbe_ethdev.c
>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) {
> struct axgbe_port *pdata = dev->data->dev_private;
> int ret;
>+ struct rte_eth_dev_data *dev_data = dev->data;
>+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
>
> PMD_INIT_FUNC_TRACE();
>
>@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
>
> axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
> axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
>+
>+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
>+ max_pkt_len > pdata->rx_buf_size)
>+ dev_data->scattered_rx = 1;
>+
>+ /* Scatter Rx handling */
>+ if (dev_data->scattered_rx)
>+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>+ else
>+ dev->rx_pkt_burst = &axgbe_recv_pkts;
>+
> return 0;
> }
>
>@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>+ DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
>@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> int ret;
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..8f818eb89 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.c
>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>+ PMD_INIT_FUNC_TRACE();
>+ uint16_t nb_rx = 0;
>+ struct axgbe_rx_queue *rxq = rx_queue;
>+ volatile union axgbe_rx_desc *desc;
>+
>+ uint64_t old_dirty = rxq->dirty;
>+ struct rte_mbuf *first_seg = NULL;
>+ struct rte_mbuf *mbuf, *tmbuf;
>+ unsigned int err;
>+ uint32_t error_status;
>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>+
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>+ while (nb_rx < nb_pkts) {
>+ bool eop = 0;
>+next_desc:
>+ if (unlikely(idx == rxq->nb_desc))
>+ idx = 0;
>+
>+ desc = &rxq->desc[idx];
>+
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>+ break;
>+
>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>+ if (unlikely(!tmbuf)) {
>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>+ " queue_id = %u\n",
>+ (unsigned int)rxq->port_id,
>+ (unsigned int)rxq->queue_id);
>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>+ break;
>+ }
>+
>+ pidx = idx + 1;
>+ if (unlikely(pidx == rxq->nb_desc))
>+ pidx = 0;
>+
>+ rte_prefetch0(rxq->sw_ring[pidx]);
>+ if ((pidx & 0x3) == 0) {
>+ rte_prefetch0(&rxq->desc[pidx]);
>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>+ }
>+
>+ mbuf = rxq->sw_ring[idx];
>+ /* Check for any errors and free mbuf*/
>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, ES);
>+ error_status = 0;
>+ if (unlikely(err)) {
>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>+ rxq->errors++;
>+ rte_pktmbuf_free(mbuf);
>+ goto err_set;
>+ }
>+ }
>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>+
>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, LD)) {
>+ eop = 0;
>+ pkt_len = rxq->buf_size;
>+ data_len = pkt_len;
>+ } else {
>+ eop = 1;
>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, PL);
>+ data_len = pkt_len - rxq->crc_len;
>+ }
>+
>+ if (first_seg != NULL) {
>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>+ rte_mempool_put(rxq->mb_pool,
>+ first_seg);
>+ } else {
>+ first_seg = mbuf;
>+ }
>+
>+ /* Get the RSS hash */
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>+
>+ /* Mbuf populate */
>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>+ mbuf->data_len = data_len;
>+
>+err_set:
>+ rxq->cur++;
>+ rxq->sw_ring[idx++] = tmbuf;
>+ desc->read.baddr =
>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>+ memset((void *)(&desc->read.desc2), 0, 8);
>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>+ rxq->dirty++;
>+
>+ if (!eop) {
>+ rte_pktmbuf_free(mbuf);
>+ goto next_desc;
>+ }
>+
>+ first_seg->pkt_len = pkt_len;
>+ rxq->bytes += pkt_len;
>+ mbuf->next = NULL;
>+
>+ first_seg->port = rxq->port_id;
>+ if (rxq->pdata->rx_csum_enable) {
>+ mbuf->ol_flags = 0;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>+ } else if (unlikely(error_status
>+ == AXGBE_L4_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>+ }
>+ }
>+
>+ rx_pkts[nb_rx++] = first_seg;
>+
>+ /* Setup receipt context for a new packet.*/
>+ first_seg = NULL;
>+ }
>+
>+ /* Save receive context.*/
>+ rxq->pkts += nb_rx;
>+
>+ if (rxq->dirty != old_dirty) {
>+ rte_wmb();
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>+ low32_value(rxq->ring_phys_addr +
>+ (idx * sizeof(union axgbe_rx_desc))));
>+ }
>+ return nb_rx;
>+}
>+
> /* Tx Apis */
> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.h
>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>--
>2.17.1
>
Please scratch this. I just now acked the V3.
Regards,
Ravi
>
>
>-----Original Message-----
>From: Kumar, Ravi1
>Sent: Thursday, March 5, 2020 3:51 PM
>To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; 'dev@dpdk.org' <dev@dpdk.org>
>Subject: RE: [PATCH v1] net/axgbe: add support for Scattered Rx
>
>Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>
>>From: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>>Enable scattered rx support and add jumbo packet receive capability
>>
>>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>>---
>> doc/guides/nics/features/axgbe.ini | 1 +
>> drivers/net/axgbe/axgbe_common.h | 2 +
>> drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
>> drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
>> drivers/net/axgbe/axgbe_rxtx.h | 2 +
>> 5 files changed, 165 insertions(+), 1 deletion(-)
>>
>>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>>index ab4da559f..0becaa097 100644
>>--- a/doc/guides/nics/features/axgbe.ini
>>+++ b/doc/guides/nics/features/axgbe.ini
>>@@ -7,6 +7,7 @@
>> Speed capabilities = Y
>> Link status = Y
>> Jumbo frame = Y
>>+Scattered Rx = Y
>> Promiscuous mode = Y
>> Allmulticast mode = Y
>> RSS hash = Y
>>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>>index fdb037dd5..fbd46150c 100644
>>--- a/drivers/net/axgbe/axgbe_common.h
>>+++ b/drivers/net/axgbe/axgbe_common.h
>>@@ -1135,6 +1135,8 @@
>> #define RX_NORMAL_DESC3_PL_WIDTH 14
>> #define RX_NORMAL_DESC3_RSV_INDEX 26
>> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>>+#define RX_NORMAL_DESC3_LD_INDEX 28
>>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>>
>> #define RX_DESC3_L34T_IPV4_TCP 1
>> #define RX_DESC3_L34T_IPV4_UDP 2
>>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>>index d0b6f091f..013c6330d 100644
>>--- a/drivers/net/axgbe/axgbe_ethdev.c
>>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>>@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) {
>> struct axgbe_port *pdata = dev->data->dev_private;
>> int ret;
>>+ struct rte_eth_dev_data *dev_data = dev->data;
>>+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
>>
>> PMD_INIT_FUNC_TRACE();
>>
>>@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
>>
>> axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
>> axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
>>+
>>+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
>>+ max_pkt_len > pdata->rx_buf_size)
>>+ dev_data->scattered_rx = 1;
>>+
>>+ /* Scatter Rx handling */
>>+ if (dev_data->scattered_rx)
>>+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>>+ else
>>+ dev->rx_pkt_burst = &axgbe_recv_pkts;
>>+
>> return 0;
>> }
>>
>>@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> DEV_RX_OFFLOAD_IPV4_CKSUM |
>> DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM |
>>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>>+ DEV_RX_OFFLOAD_SCATTER |
>> DEV_RX_OFFLOAD_KEEP_CRC;
>>
>> dev_info->tx_offload_capa =
>>@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
>> int ret;
>>
>> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>>
>> /*
>> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..8f818eb89 100644
>>--- a/drivers/net/axgbe/axgbe_rxtx.c
>>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>>@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> return nb_rx;
>> }
>>
>>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>>+ PMD_INIT_FUNC_TRACE();
>>+ uint16_t nb_rx = 0;
>>+ struct axgbe_rx_queue *rxq = rx_queue;
>>+ volatile union axgbe_rx_desc *desc;
>>+
>>+ uint64_t old_dirty = rxq->dirty;
>>+ struct rte_mbuf *first_seg = NULL;
>>+ struct rte_mbuf *mbuf, *tmbuf;
>>+ unsigned int err;
>>+ uint32_t error_status;
>>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>>+
>>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>>+ while (nb_rx < nb_pkts) {
>>+ bool eop = 0;
>>+next_desc:
>>+ if (unlikely(idx == rxq->nb_desc))
>>+ idx = 0;
>>+
>>+ desc = &rxq->desc[idx];
>>+
>>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>>+ break;
>>+
>>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>>+ if (unlikely(!tmbuf)) {
>>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>>+ " queue_id = %u\n",
>>+ (unsigned int)rxq->port_id,
>>+ (unsigned int)rxq->queue_id);
>>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>>+ break;
>>+ }
>>+
>>+ pidx = idx + 1;
>>+ if (unlikely(pidx == rxq->nb_desc))
>>+ pidx = 0;
>>+
>>+ rte_prefetch0(rxq->sw_ring[pidx]);
>>+ if ((pidx & 0x3) == 0) {
>>+ rte_prefetch0(&rxq->desc[pidx]);
>>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>>+ }
>>+
>>+ mbuf = rxq->sw_ring[idx];
>>+ /* Check for any errors and free mbuf*/
>>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, ES);
>>+ error_status = 0;
>>+ if (unlikely(err)) {
>>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>>+ rxq->errors++;
>>+ rte_pktmbuf_free(mbuf);
>>+ goto err_set;
>>+ }
>>+ }
>>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>>+
>>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, LD)) {
>>+ eop = 0;
>>+ pkt_len = rxq->buf_size;
>>+ data_len = pkt_len;
>>+ } else {
>>+ eop = 1;
>>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, PL);
>>+ data_len = pkt_len - rxq->crc_len;
>>+ }
>>+
>>+ if (first_seg != NULL) {
>>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>>+ rte_mempool_put(rxq->mb_pool,
>>+ first_seg);
>>+ } else {
>>+ first_seg = mbuf;
>>+ }
>>+
>>+ /* Get the RSS hash */
>>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>>+
>>+ /* Mbuf populate */
>>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>>+ mbuf->data_len = data_len;
>>+
>>+err_set:
>>+ rxq->cur++;
>>+ rxq->sw_ring[idx++] = tmbuf;
>>+ desc->read.baddr =
>>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>>+ memset((void *)(&desc->read.desc2), 0, 8);
>>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>>+ rxq->dirty++;
>>+
>>+ if (!eop) {
>>+ rte_pktmbuf_free(mbuf);
>>+ goto next_desc;
>>+ }
>>+
>>+ first_seg->pkt_len = pkt_len;
>>+ rxq->bytes += pkt_len;
>>+ mbuf->next = NULL;
>>+
>>+ first_seg->port = rxq->port_id;
>>+ if (rxq->pdata->rx_csum_enable) {
>>+ mbuf->ol_flags = 0;
>>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>>+ } else if (unlikely(error_status
>>+ == AXGBE_L4_CSUM_ERR)) {
>>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>>+ }
>>+ }
>>+
>>+ rx_pkts[nb_rx++] = first_seg;
>>+
>>+ /* Setup receipt context for a new packet.*/
>>+ first_seg = NULL;
>>+ }
>>+
>>+ /* Save receive context.*/
>>+ rxq->pkts += nb_rx;
>>+
>>+ if (rxq->dirty != old_dirty) {
>>+ rte_wmb();
>>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>>+ low32_value(rxq->ring_phys_addr +
>>+ (idx * sizeof(union axgbe_rx_desc))));
>>+ }
>>+ return nb_rx;
>>+}
>>+
>> /* Tx Apis */
>> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>>--- a/drivers/net/axgbe/axgbe_rxtx.h
>>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
>> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
>> struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>>--
>>2.17.1
>>
>
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Jumbo frame = Y
+Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
@@ -1135,6 +1135,8 @@
#define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
@@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_TSO |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
@@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
uint32_t reg, mac_lo, mac_hi;
int ret;
+ struct rte_eth_dev_info dev_info = { 0 };
eth_dev->dev_ops = &axgbe_eth_dev_ops;
- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+ eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+
+ /* Scatter Rx handling */
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
+ else
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -307,6 +307,152 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *first_seg = NULL;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ bool eop = 0;
+next_desc:
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR)
+ && (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, LD)) {
+ eop = 0;
+ pkt_len = rxq->buf_size;
+ data_len = pkt_len;
+ } else {
+ eop = 1;
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, PL);
+ data_len = pkt_len - rxq->crc_len;
+ }
+
+ if (first_seg != NULL) {
+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+ rte_mempool_put(rxq->mb_pool,
+ first_seg);
+ } else {
+ first_seg = mbuf;
+ }
+
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+
+ /* Mbuf populate */
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_len = data_len;
+
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+
+ if (!eop) {
+ rte_pktmbuf_free(mbuf);
+ goto next_desc;
+ }
+
+ first_seg->pkt_len = pkt_len;
+ rxq->bytes += pkt_len;
+ mbuf->next = NULL;
+
+ first_seg->port = rxq->port_id;
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (unlikely(error_status
+ == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ rx_pkts[nb_rx++] = first_seg;
+
+ /* Setup receipt context for a new packet.*/
+ first_seg = NULL;
+ }
+
+ /* Save receive context.*/
+ rxq->pkts += nb_rx;
+
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+ return nb_rx;
+}
+
/* Tx Apis */
static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
{
@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);