[v5,20/24] net/ngbe: support bulk and scatter Rx
Checks
Commit Message
Add bulk allocation receive function, and support scattered Rx rely on
Rx offload.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/ngbe.rst | 1 +
drivers/net/ngbe/ngbe_ethdev.c | 15 +-
drivers/net/ngbe/ngbe_ethdev.h | 8 +
drivers/net/ngbe/ngbe_rxtx.c | 583 +++++++++++++++++++++++++++++++++
drivers/net/ngbe/ngbe_rxtx.h | 2 +
5 files changed, 607 insertions(+), 2 deletions(-)
Comments
On 6/2/21 12:41 PM, Jiawen Wu wrote:
> Add bulk allocation receive function, and support scattered Rx rely on
> Rx offload.
The patch should advertise Rx scatter offload, not earlier.
Corresponding bits of the patch should be here as well.
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
> doc/guides/nics/ngbe.rst | 1 +
> drivers/net/ngbe/ngbe_ethdev.c | 15 +-
> drivers/net/ngbe/ngbe_ethdev.h | 8 +
> drivers/net/ngbe/ngbe_rxtx.c | 583 +++++++++++++++++++++++++++++++++
> drivers/net/ngbe/ngbe_rxtx.h | 2 +
> 5 files changed, 607 insertions(+), 2 deletions(-)
>
> diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
> index 04fa3e90a8..e999e0b580 100644
> --- a/doc/guides/nics/ngbe.rst
> +++ b/doc/guides/nics/ngbe.rst
> @@ -14,6 +14,7 @@ Features
> - Checksum offload
> - Jumbo frames
> - Link state information
> +- Scattered and gather for RX
>
> Prerequisites
> -------------
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 4dab920caa..260bca0e4f 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -112,8 +112,16 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
> eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
> eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
>
> - if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + /*
> + * For secondary processes, we don't initialise any further as primary
> + * has already done this work. Only check we don't need a different
> + * RX and TX function.
RX -> Rx, TX -> Tx
> + */
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> + ngbe_set_rx_function(eth_dev);
> +
> return 0;
> + }
>
> rte_eth_copy_pci_info(eth_dev, pci_dev);
>
> @@ -359,7 +367,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> const uint32_t *
> ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
> {
> - if (dev->rx_pkt_burst == ngbe_recv_pkts)
> + if (dev->rx_pkt_burst == ngbe_recv_pkts ||
> + dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
> + dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
> + dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
I don't understand why 3 flavors of the Rx are added in the single
patch. It looks separate features with separate conditions to use.
> return ngbe_get_supported_ptypes();
>
> return NULL;
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index c0f8483eca..1e21db5e25 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -78,6 +78,14 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev);
> uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>
> +uint16_t ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
> +
> +uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
> + struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> +uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
> + struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> +
> uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
>
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index 9462da5b7a..f633718237 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -321,6 +321,257 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
> return pkt_flags;
> }
>
> +/*
> + * LOOK_AHEAD defines how many desc statuses to check beyond the
> + * current descriptor.
> + * It must be a pound define for optimal performance.
> + * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
> + * function only works with LOOK_AHEAD=8.
> + */
> +#define LOOK_AHEAD 8
> +#if (LOOK_AHEAD != 8)
> +#error "PMD NGBE: LOOK_AHEAD must be 8\n"
> +#endif
> +static inline int
> +ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
> +{
> + volatile struct ngbe_rx_desc *rxdp;
> + struct ngbe_rx_entry *rxep;
> + struct rte_mbuf *mb;
> + uint16_t pkt_len;
> + uint64_t pkt_flags;
> + int nb_dd;
> + uint32_t s[LOOK_AHEAD];
> + uint32_t pkt_info[LOOK_AHEAD];
> + int i, j, nb_rx = 0;
> + uint32_t status;
> +
> + /* get references to current descriptor and S/W ring entry */
> + rxdp = &rxq->rx_ring[rxq->rx_tail];
> + rxep = &rxq->sw_ring[rxq->rx_tail];
> +
> + status = rxdp->qw1.lo.status;
> + /* check to make sure there is at least 1 packet to receive */
> + if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
> + return 0;
> +
> + /*
> + * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
> + * reference packets that are ready to be received.
> + */
> + for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
> + i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
> + /* Read desc statuses backwards to avoid race condition */
> + for (j = 0; j < LOOK_AHEAD; j++)
> + s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
> +
> + rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
> +
> + /* Compute how many status bits were set */
> + for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
> + (s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
> + ;
> +
> + for (j = 0; j < nb_dd; j++)
> + pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
> +
> + nb_rx += nb_dd;
> +
> + /* Translate descriptor info to mbuf format */
> + for (j = 0; j < nb_dd; ++j) {
> + mb = rxep[j].mbuf;
> + pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
> + rxq->crc_len;
> + mb->data_len = pkt_len;
> + mb->pkt_len = pkt_len;
> + mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
> +
> + /* convert descriptor fields to rte mbuf flags */
> + pkt_flags = rx_desc_status_to_pkt_flags(s[j],
> + rxq->vlan_flags);
> + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
> + pkt_flags |=
> + ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
> + mb->ol_flags = pkt_flags;
> + mb->packet_type =
> + ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
> + rxq->pkt_type_mask);
> +
> + if (likely(pkt_flags & PKT_RX_RSS_HASH))
> + mb->hash.rss =
> + rte_le_to_cpu_32(rxdp[j].qw0.dw1);
> + }
> +
> + /* Move mbuf pointers from the S/W ring to the stage */
> + for (j = 0; j < LOOK_AHEAD; ++j)
> + rxq->rx_stage[i + j] = rxep[j].mbuf;
> +
> + /* stop if all requested packets could not be received */
> + if (nb_dd != LOOK_AHEAD)
> + break;
> + }
> +
> + /* clear software ring entries so we can cleanup correctly */
> + for (i = 0; i < nb_rx; ++i)
> + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
> +
> + return nb_rx;
> +}
> +
> +static inline int
> +ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
> +{
> + volatile struct ngbe_rx_desc *rxdp;
> + struct ngbe_rx_entry *rxep;
> + struct rte_mbuf *mb;
> + uint16_t alloc_idx;
> + __le64 dma_addr;
> + int diag, i;
> +
> + /* allocate buffers in bulk directly into the S/W ring */
> + alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
> + rxep = &rxq->sw_ring[alloc_idx];
> + diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
> + rxq->rx_free_thresh);
> + if (unlikely(diag != 0))
> + return -ENOMEM;
> +
> + rxdp = &rxq->rx_ring[alloc_idx];
> + for (i = 0; i < rxq->rx_free_thresh; ++i) {
> + /* populate the static rte mbuf fields */
> + mb = rxep[i].mbuf;
> + if (reset_mbuf)
> + mb->port = rxq->port_id;
> +
> + rte_mbuf_refcnt_set(mb, 1);
> + mb->data_off = RTE_PKTMBUF_HEADROOM;
> +
> + /* populate the descriptors */
> + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
> + NGBE_RXD_HDRADDR(&rxdp[i], 0);
> + NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
> + }
> +
> + /* update state of internal queue structure */
> + rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
> + if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
> + rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
> +
> + /* no errors */
> + return 0;
> +}
> +
> +static inline uint16_t
> +ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
> + int i;
> +
> + /* how many packets are ready to return? */
> + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
> +
> + /* copy mbuf pointers to the application's packet list */
> + for (i = 0; i < nb_pkts; ++i)
> + rx_pkts[i] = stage[i];
> +
> + /* update internal queue state */
> + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
> + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
> +
> + return nb_pkts;
> +}
> +
> +static inline uint16_t
> +ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
> + struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
> + uint16_t nb_rx = 0;
> +
> + /* Any previously recv'd pkts will be returned from the Rx stage */
> + if (rxq->rx_nb_avail)
> + return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
> +
> + /* Scan the H/W ring for packets to receive */
> + nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
> +
> + /* update internal queue state */
> + rxq->rx_next_avail = 0;
> + rxq->rx_nb_avail = nb_rx;
> + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
> +
> + /* if required, allocate new buffers to replenish descriptors */
> + if (rxq->rx_tail > rxq->rx_free_trigger) {
> + uint16_t cur_free_trigger = rxq->rx_free_trigger;
> +
> + if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
> + int i, j;
> +
> + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
> + "queue_id=%u", (uint16_t)rxq->port_id,
> + (uint16_t)rxq->queue_id);
> +
> + dev->data->rx_mbuf_alloc_failed +=
> + rxq->rx_free_thresh;
> +
> + /*
> + * Need to rewind any previous receives if we cannot
> + * allocate new buffers to replenish the old ones.
> + */
> + rxq->rx_nb_avail = 0;
> + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
> + for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
> + rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
> +
> + return 0;
> + }
> +
> + /* update tail pointer */
> + rte_wmb();
> + ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
> + }
> +
> + if (rxq->rx_tail >= rxq->nb_rx_desc)
> + rxq->rx_tail = 0;
> +
> + /* received any packets this loop? */
> + if (rxq->rx_nb_avail)
> + return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
> +
> + return 0;
> +}
> +
> +/* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
> +uint16_t
> +ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + uint16_t nb_rx;
> +
> + if (unlikely(nb_pkts == 0))
> + return 0;
> +
> + if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
> + return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
> +
> + /* request is relatively large, chunk it up */
> + nb_rx = 0;
> + while (nb_pkts) {
> + uint16_t ret, n;
> +
> + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
> + ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
> + nb_rx = (uint16_t)(nb_rx + ret);
> + nb_pkts = (uint16_t)(nb_pkts - ret);
> + if (ret < n)
> + break;
> + }
> +
> + return nb_rx;
> +}
> +
> uint16_t
> ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts)
> @@ -501,6 +752,288 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
> +/**
> + * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
> + *
> + * Fill the following info in the HEAD buffer of the Rx cluster:
> + * - RX port identifier
> + * - hardware offload data, if any:
> + * - RSS flag & hash
> + * - IP checksum flag
> + * - VLAN TCI, if any
> + * - error flags
> + * @head HEAD of the packet cluster
> + * @desc HW descriptor to get data from
> + * @rxq Pointer to the Rx queue
> + */
> +static inline void
> +ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
> + struct ngbe_rx_queue *rxq, uint32_t staterr)
> +{
> + uint32_t pkt_info;
> + uint64_t pkt_flags;
> +
> + head->port = rxq->port_id;
> +
> + /* The vlan_tci field is only valid when PKT_RX_VLAN is
> + * set in the pkt_flags field.
> + */
> + head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
> + pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
> + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
> + pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
> + pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
> + head->ol_flags = pkt_flags;
> + head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
> + rxq->pkt_type_mask);
> +
> + if (likely(pkt_flags & PKT_RX_RSS_HASH))
> + head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
> +}
> +
> +/**
> + * ngbe_recv_pkts_sc - receive handler for scatter case.
> + *
> + * @rx_queue Rx queue handle
> + * @rx_pkts table of received packets
> + * @nb_pkts size of rx_pkts table
> + * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
> + *
> + * Returns the number of received packets/clusters (according to the "bulk
> + * receive" interface).
> + */
> +static inline uint16_t
> +ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
> + bool bulk_alloc)
> +{
> + struct ngbe_rx_queue *rxq = rx_queue;
> + struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
> + volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
> + struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
> + struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
> + uint16_t rx_id = rxq->rx_tail;
> + uint16_t nb_rx = 0;
> + uint16_t nb_hold = rxq->nb_rx_hold;
> + uint16_t prev_id = rxq->rx_tail;
> +
> + while (nb_rx < nb_pkts) {
> + bool eop;
> + struct ngbe_rx_entry *rxe;
> + struct ngbe_scattered_rx_entry *sc_entry;
> + struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
> + struct ngbe_rx_entry *next_rxe = NULL;
> + struct rte_mbuf *first_seg;
> + struct rte_mbuf *rxm;
> + struct rte_mbuf *nmb = NULL;
> + struct ngbe_rx_desc rxd;
> + uint16_t data_len;
> + uint16_t next_id;
> + volatile struct ngbe_rx_desc *rxdp;
> + uint32_t staterr;
> +
> +next_desc:
> + rxdp = &rx_ring[rx_id];
> + staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
> +
> + if (!(staterr & NGBE_RXD_STAT_DD))
> + break;
> +
> + rxd = *rxdp;
> +
> + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
> + "staterr=0x%x data_len=%u",
> + rxq->port_id, rxq->queue_id, rx_id, staterr,
> + rte_le_to_cpu_16(rxd.qw1.hi.len));
> +
> + if (!bulk_alloc) {
> + nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
> + if (nmb == NULL) {
> + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
> + "port_id=%u queue_id=%u",
> + rxq->port_id, rxq->queue_id);
> +
> + dev->data->rx_mbuf_alloc_failed++;
> + break;
> + }
> + } else if (nb_hold > rxq->rx_free_thresh) {
> + uint16_t next_rdt = rxq->rx_free_trigger;
> +
> + if (!ngbe_rx_alloc_bufs(rxq, false)) {
> + rte_wmb();
> + ngbe_set32_relaxed(rxq->rdt_reg_addr,
> + next_rdt);
> + nb_hold -= rxq->rx_free_thresh;
> + } else {
> + PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
> + "port_id=%u queue_id=%u",
> + rxq->port_id, rxq->queue_id);
> +
> + dev->data->rx_mbuf_alloc_failed++;
> + break;
> + }
> + }
> +
> + nb_hold++;
> + rxe = &sw_ring[rx_id];
> + eop = staterr & NGBE_RXD_STAT_EOP;
> +
> + next_id = rx_id + 1;
> + if (next_id == rxq->nb_rx_desc)
> + next_id = 0;
> +
> + /* Prefetch next mbuf while processing current one. */
> + rte_ngbe_prefetch(sw_ring[next_id].mbuf);
> +
> + /*
> + * When next RX descriptor is on a cache-line boundary,
> + * prefetch the next 4 RX descriptors and the next 4 pointers
> + * to mbufs.
> + */
> + if ((next_id & 0x3) == 0) {
> + rte_ngbe_prefetch(&rx_ring[next_id]);
> + rte_ngbe_prefetch(&sw_ring[next_id]);
> + }
> +
> + rxm = rxe->mbuf;
> +
> + if (!bulk_alloc) {
> + __le64 dma =
> + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
> + /*
> + * Update RX descriptor with the physical address of the
> + * new data buffer of the new allocated mbuf.
> + */
> + rxe->mbuf = nmb;
> +
> + rxm->data_off = RTE_PKTMBUF_HEADROOM;
> + NGBE_RXD_HDRADDR(rxdp, 0);
> + NGBE_RXD_PKTADDR(rxdp, dma);
> + } else {
> + rxe->mbuf = NULL;
> + }
> +
> + /*
> + * Set data length & data buffer address of mbuf.
> + */
> + data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
> + rxm->data_len = data_len;
> +
> + if (!eop) {
> + uint16_t nextp_id;
> +
> + nextp_id = next_id;
> + next_sc_entry = &sw_sc_ring[nextp_id];
> + next_rxe = &sw_ring[nextp_id];
> + rte_ngbe_prefetch(next_rxe);
> + }
> +
> + sc_entry = &sw_sc_ring[rx_id];
> + first_seg = sc_entry->fbuf;
> + sc_entry->fbuf = NULL;
> +
> + /*
> + * If this is the first buffer of the received packet,
> + * set the pointer to the first mbuf of the packet and
> + * initialize its context.
> + * Otherwise, update the total length and the number of segments
> + * of the current scattered packet, and update the pointer to
> + * the last mbuf of the current packet.
> + */
> + if (first_seg == NULL) {
> + first_seg = rxm;
> + first_seg->pkt_len = data_len;
> + first_seg->nb_segs = 1;
> + } else {
> + first_seg->pkt_len += data_len;
> + first_seg->nb_segs++;
> + }
> +
> + prev_id = rx_id;
> + rx_id = next_id;
> +
> + /*
> + * If this is not the last buffer of the received packet, update
> + * the pointer to the first mbuf at the NEXTP entry in the
> + * sw_sc_ring and continue to parse the RX ring.
RX -> Rx
> + */
> + if (!eop && next_rxe) {
> + rxm->next = next_rxe->mbuf;
> + next_sc_entry->fbuf = first_seg;
> + goto next_desc;
> + }
> +
> + /* Initialize the first mbuf of the returned packet */
> + ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
> +
> + /* Deal with the case, when HW CRC srip is disabled. */
> + first_seg->pkt_len -= rxq->crc_len;
> + if (unlikely(rxm->data_len <= rxq->crc_len)) {
> + struct rte_mbuf *lp;
> +
> + for (lp = first_seg; lp->next != rxm; lp = lp->next)
> + ;
> +
> + first_seg->nb_segs--;
> + lp->data_len -= rxq->crc_len - rxm->data_len;
> + lp->next = NULL;
> + rte_pktmbuf_free_seg(rxm);
> + } else {
> + rxm->data_len -= rxq->crc_len;
> + }
> +
> + /* Prefetch data of first segment, if configured to do so. */
> + rte_packet_prefetch((char *)first_seg->buf_addr +
> + first_seg->data_off);
> +
> + /*
> + * Store the mbuf address into the next entry of the array
> + * of returned packets.
> + */
> + rx_pkts[nb_rx++] = first_seg;
> + }
> +
> + /*
> + * Record index of the next RX descriptor to probe.
> + */
> + rxq->rx_tail = rx_id;
> +
> + /*
> + * If the number of free RX descriptors is greater than the RX free
RX -> Rx twice
> + * threshold of the queue, advance the Receive Descriptor Tail (RDT)
> + * register.
> + * Update the RDT with the value of the last processed RX descriptor
> + * minus 1, to guarantee that the RDT register is never equal to the
> + * RDH register, which creates a "full" ring situation from the
> + * hardware point of view...
> + */
> + if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
> + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
> + "nb_hold=%u nb_rx=%u",
> + rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
> +
> + rte_wmb();
> + ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
> + nb_hold = 0;
> + }
> +
> + rxq->nb_rx_hold = nb_hold;
> + return nb_rx;
> +}
> +
> +uint16_t
> +ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
> +}
> +
> +uint16_t
> +ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
> +}
> +
> /*********************************************************************
> *
> * Queue management functions
> @@ -1064,6 +1597,54 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> return 0;
> }
>
> +void __rte_cold
> +ngbe_set_rx_function(struct rte_eth_dev *dev)
> +{
> + struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
> +
> + if (dev->data->scattered_rx) {
> + /*
> + * Set the scattered callback: there are bulk and
> + * single allocation versions.
> + */
> + if (adapter->rx_bulk_alloc_allowed) {
> + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
> + "allocation callback (port=%d).",
> + dev->data->port_id);
> + dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
> + } else {
> + PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
> + "single allocation) "
> + "Scattered Rx callback "
> + "(port=%d).",
> + dev->data->port_id);
> +
> + dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
> + }
> + /*
> + * Below we set "simple" callbacks according to port/queues parameters.
> + * If parameters allow we are going to choose between the following
> + * callbacks:
> + * - Bulk Allocation
> + * - Single buffer allocation (the simplest one)
> + */
> + } else if (adapter->rx_bulk_alloc_allowed) {
> + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
> + "satisfied. Rx Burst Bulk Alloc function "
> + "will be used on port=%d.",
> + dev->data->port_id);
> +
> + dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
> + } else {
> + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
> + "satisfied, or Scattered Rx is requested "
> + "(port=%d).",
> + dev->data->port_id);
> +
> + dev->rx_pkt_burst = ngbe_recv_pkts;
> + }
> +}
> +
> /*
> * Initializes Receive Unit.
> */
> @@ -1211,6 +1792,8 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
> wr32(hw, NGBE_SECRXCTL, rdrxctl);
> }
>
> + ngbe_set_rx_function(dev);
> +
> return 0;
> }
>
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index d6b9127cb4..4b8596b24a 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -298,6 +298,8 @@ struct ngbe_txq_ops {
> void (*reset)(struct ngbe_tx_queue *txq);
> };
>
> +void ngbe_set_rx_function(struct rte_eth_dev *dev);
> +
> uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
> uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
> uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
>
@@ -14,6 +14,7 @@ Features
- Checksum offload
- Jumbo frames
- Link state information
+- Scattered and gather for RX
Prerequisites
-------------
@@ -112,8 +112,16 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ngbe_set_rx_function(eth_dev);
+
return 0;
+ }
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -359,7 +367,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
const uint32_t *
ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
- if (dev->rx_pkt_burst == ngbe_recv_pkts)
+ if (dev->rx_pkt_burst == ngbe_recv_pkts ||
+ dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
+ dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
+ dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
return ngbe_get_supported_ptypes();
return NULL;
@@ -78,6 +78,14 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev);
uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -321,6 +321,257 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
return pkt_flags;
}
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD NGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
+{
+ volatile struct ngbe_rx_desc *rxdp;
+ struct ngbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint32_t pkt_info[LOOK_AHEAD];
+ int i, j, nb_rx = 0;
+ uint32_t status;
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ status = rxdp->qw1.lo.status;
+ /* check to make sure there is at least 1 packet to receive */
+ if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = 0; j < LOOK_AHEAD; j++)
+ s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
+
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ /* Compute how many status bits were set */
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+ rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
+ mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss =
+ rte_le_to_cpu_32(rxdp[j].qw0.dw1);
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline int
+ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
+{
+ volatile struct ngbe_rx_desc *rxdp;
+ struct ngbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ __le64 dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return -ENOMEM;
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ if (reset_mbuf)
+ mb->port = rxq->port_id;
+
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* populate the descriptors */
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ NGBE_RXD_HDRADDR(&rxdp[i], 0);
+ NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
+ }
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
+ int i, j;
+
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (uint16_t)rxq->port_id,
+ (uint16_t)rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
+uint16_t
+ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
+ return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
+ ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
uint16_t
ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -501,6 +752,288 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+/**
+ * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
+ struct ngbe_rx_queue *rxq, uint32_t staterr)
+{
+ uint32_t pkt_info;
+ uint64_t pkt_flags;
+
+ head->port = rxq->port_id;
+
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
+ pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
+}
+
+/**
+ * ngbe_recv_pkts_sc - receive handler for scatter case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct ngbe_rx_queue *rxq = rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
+ struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct ngbe_rx_entry *rxe;
+ struct ngbe_scattered_rx_entry *sc_entry;
+ struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
+ struct ngbe_rx_entry *next_rxe = NULL;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb = NULL;
+ struct ngbe_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile struct ngbe_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
+
+ if (!(staterr & NGBE_RXD_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!ngbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ ngbe_set32_relaxed(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & NGBE_RXD_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ngbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_ngbe_prefetch(&rx_ring[next_id]);
+ rte_ngbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ NGBE_RXD_HDRADDR(rxdp, 0);
+ NGBE_RXD_PKTADDR(rxdp, dma);
+ } else {
+ rxe->mbuf = NULL;
+ }
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+
+ nextp_id = next_id;
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_ngbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
+ */
+ if (!eop && next_rxe) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /* Initialize the first mbuf of the returned packet */
+ ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+ /* Deal with the case, when HW CRC srip is disabled. */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else {
+ rxm->data_len -= rxq->crc_len;
+ }
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
+}
+
/*********************************************************************
*
* Queue management functions
@@ -1064,6 +1597,54 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
+void __rte_cold
+ngbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
+
+ if (dev->data->scattered_rx) {
+ /*
+ * Set the scattered callback: there are bulk and
+ * single allocation versions.
+ */
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ngbe_recv_pkts;
+ }
+}
+
/*
* Initializes Receive Unit.
*/
@@ -1211,6 +1792,8 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
wr32(hw, NGBE_SECRXCTL, rdrxctl);
}
+ ngbe_set_rx_function(dev);
+
return 0;
}
@@ -298,6 +298,8 @@ struct ngbe_txq_ops {
void (*reset)(struct ngbe_tx_queue *txq);
};
+void ngbe_set_rx_function(struct rte_eth_dev *dev);
+
uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);