[11/12] net/bnxt: handle multiple packets per loop in vector PMD
diff mbox series

Message ID 20200909155717.29099-1-lance.richardson@broadcom.com
State Accepted
Delegated to: Ajit Khaparde
Headers show
Series
  • net/bnxt: vector PMD improvements
Related show

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Lance Richardson Sept. 9, 2020, 3:57 p.m. UTC
Process four receive descriptors per inner loop in vector mode
burst receive functions.

Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c          |   2 +-
 drivers/net/bnxt/bnxt_rxq.c             |   3 +-
 drivers/net/bnxt/bnxt_rxq.h             |   1 +
 drivers/net/bnxt/bnxt_rxr.c             |  15 +-
 drivers/net/bnxt/bnxt_rxtx_vec_common.h |   2 +
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c   | 365 +++++++++++++++++-------
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c    | 316 ++++++++++++++------
 7 files changed, 508 insertions(+), 196 deletions(-)

Patch
diff mbox series

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 27eba431b8..b658a44303 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -2872,7 +2872,7 @@  bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
 			return RTE_ETH_RX_DESC_DONE;
 	}
 	rx_buf = rxr->rx_buf_ring[cons];
-	if (rx_buf == NULL)
+	if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf)
 		return RTE_ETH_RX_DESC_UNAVAIL;
 
 
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 4ef3b5cb5c..57ba9a1570 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -212,7 +212,8 @@  void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
 		for (i = 0;
 		     i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
 			if (sw_ring[i]) {
-				rte_pktmbuf_free_seg(sw_ring[i]);
+				if (sw_ring[i] != &rxq->fake_mbuf)
+					rte_pktmbuf_free_seg(sw_ring[i]);
 				sw_ring[i] = NULL;
 			}
 		}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index d5ce3b6d58..96c6e06a52 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -39,6 +39,7 @@  struct bnxt_rx_queue {
 	uint32_t			rx_buf_size;
 	struct bnxt_rx_ring_info	*rx_ring;
 	struct bnxt_cp_ring_info	*cp_ring;
+	struct rte_mbuf			fake_mbuf;
 	rte_atomic64_t		rx_mbuf_alloc_fail;
 	const struct rte_memzone *mz;
 };
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 33bd006530..89a964a49b 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -20,6 +20,7 @@ 
 #ifdef RTE_LIBRTE_IEEE1588
 #include "bnxt_hwrm.h"
 #endif
+#include "bnxt_rxtx_vec_common.h"
 
 #include <bnxt_tf_common.h>
 #include <ulp_mark_mgr.h>
@@ -931,7 +932,7 @@  uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			struct rte_mbuf **rx_buf = &rxr->rx_buf_ring[i];
 
 			/* Buffer already allocated for this index. */
-			if (*rx_buf != NULL)
+			if (*rx_buf != NULL && *rx_buf != &rxq->fake_mbuf)
 				continue;
 
 			/* This slot is empty. Alloc buffer for Rx */
@@ -1025,7 +1026,11 @@  int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 	ring->ring_mask = ring->ring_size - 1;
 	ring->bd = (void *)rxr->rx_desc_ring;
 	ring->bd_dma = rxr->rx_desc_mapping;
-	ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
+
+	/* Allocate extra rx ring entries for vector rx. */
+	ring->vmem_size = sizeof(struct rte_mbuf *) *
+				(ring->ring_size + RTE_BNXT_DESCS_PER_LOOP);
+
 	ring->vmem = (void **)&rxr->rx_buf_ring;
 	ring->fw_ring_id = INVALID_HW_RING_ID;
 
@@ -1136,6 +1141,12 @@  int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
 		prod = RING_NEXT(rxr->rx_ring_struct, prod);
 	}
 
+	/* Initialize dummy mbuf pointers for vector mode rx. */
+	for (i = ring->ring_size;
+	     i < ring->ring_size + RTE_BNXT_DESCS_PER_LOOP; i++) {
+		rxr->rx_buf_ring[i] = &rxq->fake_mbuf;
+	}
+
 	ring = rxr->ag_ring_struct;
 	type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
 	bnxt_init_rxbds(ring, type, size);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 819b8290e4..8c10fdfa10 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -75,6 +75,8 @@  bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 	if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
 
+		for (i = 0; i < nb; i++)
+			rx_bufs[i] = &rxq->fake_mbuf;
 		return;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 24f9fc3c39..e7fe9325ab 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -22,52 +22,151 @@ 
  * RX Ring handling
  */
 
-static uint32_t
-bnxt_parse_pkt_type(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
-{
-	uint32_t flags_type, flags2;
-	uint8_t index;
-
-	flags_type = vgetq_lane_u32(mm_rxcmp, 0);
-	flags2 = (uint16_t)vgetq_lane_u32(mm_rxcmp1, 0);
-
-	/*
-	 * Index format:
-	 *     bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
-	 *     bit 1: RX_CMPL_FLAGS2_IP_TYPE
-	 *     bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
-	 *     bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
-	 */
-	index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
-		((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
-			   RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
-		((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
+#define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags)		       \
+{									       \
+	uint32_t tmp, of;						       \
+									       \
+	of = vgetq_lane_u32((rss_flags), (pi)) |			       \
+		   bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))];	       \
+									       \
+	tmp = vgetq_lane_u32((errors), (pi));				       \
+	if (tmp)							       \
+		of |= bnxt_ol_flags_err_table[tmp];			       \
+	(ol_flags) = of;						       \
+}
 
-	return bnxt_ptype_table[index];
+#define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret)      \
+{									       \
+	uint32_t ptype;							       \
+	uint16_t vlan_tci;						       \
+	uint32x4_t r;							       \
+									       \
+	/* Set mbuf pkt_len, data_len, and rss_hash fields. */		       \
+	r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp),       \
+					      (shuf_msk)));		       \
+									       \
+	/* Set packet type. */						       \
+	ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))];      \
+	r = vsetq_lane_u32(ptype, r, 0);				       \
+									       \
+	/* Set vlan_tci. */						       \
+	vlan_tci = vgetq_lane_u32((rxcmp1), 1);				       \
+	r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci,		       \
+				vreinterpretq_u16_u32(r), 5));		       \
+	(ret) = r;							       \
 }
 
-static uint32_t
-bnxt_set_ol_flags(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
+static void
+descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
+	       uint64x2_t mb_init, struct rte_mbuf **mbuf)
 {
-	uint16_t flags_type, errors, flags;
+	const uint8x16_t shuf_msk = {
+		0xFF, 0xFF, 0xFF, 0xFF,    /* pkt_type (zeroes) */
+		2, 3, 0xFF, 0xFF,          /* pkt_len */
+		2, 3,                      /* data_len */
+		0xFF, 0xFF,                /* vlan_tci (zeroes) */
+		12, 13, 14, 15             /* rss hash */
+	};
+	const uint32x4_t flags_type_mask = {
+		RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+		RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+		RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+		RX_PKT_CMPL_FLAGS_ITYPE_MASK
+	};
+	const uint32x4_t flags2_mask1 = {
+		RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+			RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+		RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+			RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+		RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+			RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+		RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+			RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
+	};
+	const uint32x4_t flags2_mask2 = {
+		RX_PKT_CMPL_FLAGS2_IP_TYPE,
+		RX_PKT_CMPL_FLAGS2_IP_TYPE,
+		RX_PKT_CMPL_FLAGS2_IP_TYPE,
+		RX_PKT_CMPL_FLAGS2_IP_TYPE
+	};
+	const uint32x4_t rss_mask = {
+		RX_PKT_CMPL_FLAGS_RSS_VALID,
+		RX_PKT_CMPL_FLAGS_RSS_VALID,
+		RX_PKT_CMPL_FLAGS_RSS_VALID,
+		RX_PKT_CMPL_FLAGS_RSS_VALID
+	};
+	const uint32x4_t flags2_index_mask = {
+		0x1F, 0x1F, 0x1F, 0x1F
+	};
+	const uint32x4_t flags2_error_mask = {
+		0xF, 0xF, 0xF, 0xF
+	};
+	uint32x4_t flags_type, flags2, index, errors, rss_flags;
+	uint32x4_t tmp, ptype_idx;
+	uint64x2_t t0, t1;
 	uint32_t ol_flags;
 
-	/* Extract rxcmp1->flags2. */
-	flags = vgetq_lane_u32(mm_rxcmp1, 0) & 0x1F;
-	/* Extract rxcmp->flags_type. */
-	flags_type = vgetq_lane_u32(mm_rxcmp, 0);
-	/* Extract rxcmp1->errors_v2. */
-	errors = (vgetq_lane_u32(mm_rxcmp1, 2) >> 4) & flags & 0xF;
-
-	ol_flags = bnxt_ol_flags_table[flags & ~errors];
-
-	if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
-		ol_flags |= PKT_RX_RSS_HASH;
-
-	if (errors)
-		ol_flags |= bnxt_ol_flags_err_table[errors];
-
-	return ol_flags;
+	/* Compute packet type table indexes for four packets */
+	t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
+	t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
+
+	flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+							vget_low_u64(t1)));
+	ptype_idx =
+		vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
+
+	t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
+	t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
+
+	flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+						    vget_low_u64(t1)));
+
+	ptype_idx = vorrq_u32(ptype_idx,
+			vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
+	ptype_idx = vorrq_u32(ptype_idx,
+			vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
+
+	/* Extract RSS valid flags for four packets. */
+	rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
+
+	flags2 = vandq_u32(flags2, flags2_index_mask);
+
+	/* Extract errors_v2 fields for four packets. */
+	t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
+	t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
+
+	errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+						    vget_low_u64(t1)));
+
+	/* Compute ol_flags and checksum error indexes for four packets. */
+	errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
+	errors = vandq_u32(errors, flags2);
+
+	index = vbicq_u32(flags2, errors);
+
+	/* Update mbuf rearm_data for four packets. */
+	GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
+	vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
+		  vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+	GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
+	vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
+		  vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+	GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
+	vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
+		  vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+	GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
+	vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
+		  vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+
+	/* Update mbuf rx_descriptor_fields1 for four packets. */
+	GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
+	vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
+	GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
+	vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
+	GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
+	vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
+	GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
+	vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
 }
 
 uint16_t
@@ -77,17 +176,23 @@  bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	struct bnxt_rx_queue *rxq = rx_queue;
 	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+	uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
+	uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
+	struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+	uint64_t valid, desc_valid_mask = ~0UL;
+	const uint32x4_t info3_v_mask = {
+		CMPL_BASE_V, CMPL_BASE_V,
+		CMPL_BASE_V, CMPL_BASE_V
+	};
 	uint32_t raw_cons = cpr->cp_raw_cons;
-	uint32_t cons;
+	uint32_t cons, mbcons;
 	int nb_rx_pkts = 0;
-	struct rx_pkt_cmpl *rxcmp;
-	const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
-	const uint8x16_t shuf_msk = {
-		0xFF, 0xFF, 0xFF, 0xFF,    /* pkt_type (zeroes) */
-		2, 3, 0xFF, 0xFF,          /* pkt_len */
-		2, 3,                      /* data_len */
-		0xFF, 0xFF,                /* vlan_tci (zeroes) */
-		12, 13, 14, 15             /* rss hash */
+	const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
+	const uint32x4_t valid_target = {
+		!!(raw_cons & cp_ring_size),
+		!!(raw_cons & cp_ring_size),
+		!!(raw_cons & cp_ring_size),
+		!!(raw_cons & cp_ring_size)
 	};
 	int i;
 
@@ -101,72 +206,130 @@  bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
 	nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
 
-	/* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP. */
-	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
-	if (!nb_pkts)
-		return 0;
+	cons = raw_cons & (cp_ring_size - 1);
+	mbcons = (raw_cons / 2) & (rx_ring_size - 1);
 
-	/* Handle RX burst request */
-	for (i = 0; i < nb_pkts; i++) {
-		uint32x4_t mm_rxcmp, mm_rxcmp1;
-		struct rx_pkt_cmpl_hi *rxcmp1;
-		uint32x4_t pkt_mb, rearm;
-		uint32_t ptype, ol_flags;
-		struct rte_mbuf *mbuf;
-		uint16_t vlan_tci;
-		uint16x8_t tmp16;
-		uint8x16_t tmp;
+	/* Prefetch first four descriptor pairs. */
+	rte_prefetch0(&cp_desc_ring[cons]);
+	rte_prefetch0(&cp_desc_ring[cons + 4]);
 
-		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+	/* Ensure that we do not go past the ends of the rings. */
+	nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
+					   (cp_ring_size - cons) / 2));
+	/*
+	 * If we are at the end of the ring, ensure that descriptors after the
+	 * last valid entry are not treated as valid. Otherwise, force the
+	 * maximum number of packets to receive to be a multiple of the per-
+	 * loop count.
+	 */
+	if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
+		desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
+	else
+		nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
 
-		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
-		rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cons + 1];
+	/* Handle RX burst request */
+	for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
+				  cons += RTE_BNXT_DESCS_PER_LOOP * 2,
+				  mbcons += RTE_BNXT_DESCS_PER_LOOP) {
+		uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
+		uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
+		uint32x4_t info3_v;
+		uint64x2_t t0, t1;
+		uint32_t num_valid;
+
+		/* Copy four mbuf pointers to output array. */
+		t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
+#ifdef RTE_ARCH_ARM64
+		t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
+#endif
+		vst1q_u64((void *)&rx_pkts[i], t0);
+#ifdef RTE_ARCH_ARM64
+		vst1q_u64((void *)&rx_pkts[i + 2], t1);
+#endif
+
+		/* Prefetch four descriptor pairs for next iteration. */
+		if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
+			rte_prefetch0(&cp_desc_ring[cons + 8]);
+			rte_prefetch0(&cp_desc_ring[cons + 12]);
+		}
 
-		if (!CMP_VALID(rxcmp1, raw_cons + 1, cpr->cp_ring_struct))
+		/*
+		 * Load the four curent descriptors into SSE registers in
+		 * reverse order to ensure consistent state.
+		 */
+		rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
+		rte_cio_rmb();
+		rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
+
+		rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
+		rte_cio_rmb();
+		rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
+
+		t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
+
+		rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
+		rte_cio_rmb();
+		rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
+
+		rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
+		rte_cio_rmb();
+		rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
+
+		t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
+
+		/* Isolate descriptor status flags. */
+		info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+							     vget_low_u64(t1)));
+		info3_v = vandq_u32(info3_v, info3_v_mask);
+		info3_v = veorq_u32(info3_v, valid_target);
+
+		/*
+		 * Pack the 128-bit array of valid descriptor flags into 64
+		 * bits and count the number of set bits in order to determine
+		 * the number of valid descriptors.
+		 */
+		valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
+				      0);
+		/*
+		 * At this point, 'valid' is a 64-bit value containing four
+		 * 16-bit fields, each of which is either 0x0001 or 0x0000.
+		 * Compute number of valid descriptors from the index of
+		 * the highest non-zero field.
+		 */
+		num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
+				(__builtin_clzl(valid & desc_valid_mask) / 16);
+
+		switch (num_valid) {
+		case 4:
+			rxr->rx_buf_ring[mbcons + 3] = NULL;
+			/* FALLTHROUGH */
+		case 3:
+			rxr->rx_buf_ring[mbcons + 2] = NULL;
+			/* FALLTHROUGH */
+		case 2:
+			rxr->rx_buf_ring[mbcons + 1] = NULL;
+			/* FALLTHROUGH */
+		case 1:
+			rxr->rx_buf_ring[mbcons + 0] = NULL;
 			break;
+		case 0:
+			goto out;
+		}
 
-		mm_rxcmp = vld1q_u32((uint32_t *)rxcmp);
-		mm_rxcmp1 = vld1q_u32((uint32_t *)rxcmp);
-		raw_cons += 2;
-		cons = rxcmp->opaque;
-
-		mbuf = rxr->rx_buf_ring[cons];
-		rte_prefetch0(mbuf);
-		rxr->rx_buf_ring[cons] = NULL;
-
-		/* Set fields from mbuf initializer and ol_flags. */
-		ol_flags = bnxt_set_ol_flags(mm_rxcmp, mm_rxcmp1);
-		rearm = vsetq_lane_u32(ol_flags,
-				       vreinterpretq_u32_u64(mbuf_init), 2);
-		vst1q_u32((uint32_t *)&mbuf->rearm_data, rearm);
-
-		/* Set mbuf pkt_len, data_len, and rss_hash fields. */
-		tmp = vqtbl1q_u8(vreinterpretq_u8_u32(mm_rxcmp), shuf_msk);
-		pkt_mb = vreinterpretq_u32_u8(tmp);
-
-		/* Set packet type. */
-		ptype = bnxt_parse_pkt_type(mm_rxcmp, mm_rxcmp1);
-		pkt_mb = vsetq_lane_u32(ptype, pkt_mb, 0);
-
-		/* Set vlan_tci. */
-		vlan_tci = vgetq_lane_u32(mm_rxcmp1, 1);
-		tmp16 = vsetq_lane_u16(vlan_tci,
-				       vreinterpretq_u16_u32(pkt_mb),
-				       5);
-		pkt_mb = vreinterpretq_u32_u16(tmp16);
-
-		/* Store descriptor fields. */
-		vst1q_u32((uint32_t *)&mbuf->rx_descriptor_fields1, pkt_mb);
+		descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]);
+		nb_rx_pkts += num_valid;
 
-		rx_pkts[nb_rx_pkts++] = mbuf;
+		if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
+			break;
 	}
 
+out:
 	if (nb_rx_pkts) {
 		rxr->rx_prod =
 			RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
 
 		rxq->rxrearm_nb += nb_rx_pkts;
-		cpr->cp_raw_cons = raw_cons;
+		cpr->cp_raw_cons += 2 * nb_rx_pkts;
 		cpr->valid =
 			!!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
 		bnxt_db_cq(cpr);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index 7e87555408..362992ceb2 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -1,5 +1,5 @@ 
-// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright(c) 2019 Broadcom All rights reserved. */
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2019-2020 Broadcom All rights reserved. */
 
 #include <inttypes.h>
 #include <stdbool.h>
@@ -8,11 +8,7 @@ 
 #include <rte_byteorder.h>
 #include <rte_malloc.h>
 #include <rte_memory.h>
-#if defined(RTE_ARCH_X86)
-#include <tmmintrin.h>
-#else
-#error "bnxt vector pmd: unsupported target."
-#endif
+#include <rte_vect.h>
 
 #include "bnxt.h"
 #include "bnxt_cpr.h"
@@ -26,52 +22,135 @@ 
  * RX Ring handling
  */
 
-static __m128i
-bnxt_parse_pkt_type(__m128i mm_rxcmp, __m128i mm_rxcmp1)
-{
-	uint32_t flags_type, flags2;
-	uint8_t index;
-
-	flags_type = _mm_extract_epi16(mm_rxcmp, 0);
-	flags2 = _mm_extract_epi32(mm_rxcmp1, 0);
-
-	/*
-	 * Index format:
-	 *     bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
-	 *     bit 1: RX_CMPL_FLAGS2_IP_TYPE
-	 *     bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
-	 *     bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
-	 */
-	index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
-		((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
-			   RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
-		((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
+#define GET_OL_FLAGS(rss_flags, ol_index, errors, pi, ol_flags)		       \
+{									       \
+	uint32_t tmp, of;						       \
+									       \
+	of = _mm_extract_epi32((rss_flags), (pi)) |			       \
+		bnxt_ol_flags_table[_mm_extract_epi32((ol_index), (pi))];      \
+									       \
+	tmp = _mm_extract_epi32((errors), (pi));			       \
+	if (tmp)							       \
+		of |= bnxt_ol_flags_err_table[tmp];			       \
+	(ol_flags) = of;						       \
+}
 
-	return _mm_set_epi32(0, 0, 0, bnxt_ptype_table[index]);
+#define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pi, ret)	       \
+{									       \
+	uint32_t ptype;							       \
+	__m128i r;							       \
+									       \
+	/* Set mbuf pkt_len, data_len, and rss_hash fields. */		       \
+	r = _mm_shuffle_epi8((rxcmp), (shuf_msk));			       \
+									       \
+	/* Set packet type. */						       \
+	ptype = bnxt_ptype_table[_mm_extract_epi32((ptype_idx), (pi))];	       \
+	r = _mm_blend_epi16(r, _mm_set_epi32(0, 0, 0, ptype), 0x3);	       \
+									       \
+	/* Set vlan_tci. */						       \
+	r = _mm_blend_epi16(r, _mm_slli_si128((rxcmp1), 6), 0x20);	       \
+	(ret) = r;							       \
 }
 
-static __m128i
-bnxt_set_ol_flags(__m128i mm_rxcmp, __m128i mm_rxcmp1)
+static inline void
+descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
+	       __m128i mbuf_init, struct rte_mbuf **mbuf)
 {
-	uint16_t flags_type, errors, flags;
+	const __m128i shuf_msk =
+		_mm_set_epi8(15, 14, 13, 12,          /* rss */
+			     0xFF, 0xFF,              /* vlan_tci (zeroes) */
+			     3, 2,                    /* data_len */
+			     0xFF, 0xFF, 3, 2,        /* pkt_len */
+			     0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
+	const __m128i flags_type_mask =
+		_mm_set_epi32(RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+			      RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+			      RX_PKT_CMPL_FLAGS_ITYPE_MASK,
+			      RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+	const __m128i flags2_mask1 =
+		_mm_set_epi32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+				RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+			      RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+				RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+			      RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+				RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
+			      RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+				RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC);
+	const __m128i flags2_mask2 =
+		_mm_set_epi32(RX_PKT_CMPL_FLAGS2_IP_TYPE,
+			      RX_PKT_CMPL_FLAGS2_IP_TYPE,
+			      RX_PKT_CMPL_FLAGS2_IP_TYPE,
+			      RX_PKT_CMPL_FLAGS2_IP_TYPE);
+	const __m128i rss_mask =
+		_mm_set_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID,
+			      RX_PKT_CMPL_FLAGS_RSS_VALID,
+			      RX_PKT_CMPL_FLAGS_RSS_VALID,
+			      RX_PKT_CMPL_FLAGS_RSS_VALID);
+	__m128i t0, t1, flags_type, flags2, index, errors, rss_flags;
+	__m128i ptype_idx;
 	uint32_t ol_flags;
 
-	/* Extract rxcmp1->flags2. */
-	flags = _mm_extract_epi32(mm_rxcmp1, 0) & 0x1F;
-	/* Extract rxcmp->flags_type. */
-	flags_type = _mm_extract_epi16(mm_rxcmp, 0);
-	/* Extract rxcmp1->errors_v2. */
-	errors = (_mm_extract_epi16(mm_rxcmp1, 4) >> 4) & flags & 0xF;
+	/* Compute packet type table indexes for four packets */
+	t0 = _mm_unpacklo_epi32(mm_rxcmp[0], mm_rxcmp[1]);
+	t1 = _mm_unpacklo_epi32(mm_rxcmp[2], mm_rxcmp[3]);
+	flags_type = _mm_unpacklo_epi64(t0, t1);
+	ptype_idx =
+		_mm_srli_epi32(_mm_and_si128(flags_type, flags_type_mask), 9);
 
-	ol_flags = bnxt_ol_flags_table[flags & ~errors];
+	t0 = _mm_unpacklo_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
+	t1 = _mm_unpacklo_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
+	flags2 = _mm_unpacklo_epi64(t0, t1);
 
-	if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
-		ol_flags |= PKT_RX_RSS_HASH;
+	ptype_idx = _mm_or_si128(ptype_idx,
+			_mm_srli_epi32(_mm_and_si128(flags2, flags2_mask1), 2));
+	ptype_idx = _mm_or_si128(ptype_idx,
+			_mm_srli_epi32(_mm_and_si128(flags2, flags2_mask2), 7));
 
-	if (errors)
-		ol_flags |= bnxt_ol_flags_err_table[errors];
+	/* Extract RSS valid flags for four packets. */
+	rss_flags = _mm_srli_epi32(_mm_and_si128(flags_type, rss_mask), 9);
 
-	return _mm_set_epi64x(ol_flags, 0);
+	/* Extract errors_v2 fields for four packets. */
+	t0 = _mm_unpackhi_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
+	t1 = _mm_unpackhi_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
+
+	/* Compute ol_flags and checksum error indexes for four packets. */
+	flags2 = _mm_and_si128(flags2, _mm_set_epi32(0x1F, 0x1F, 0x1F, 0x1F));
+
+	errors = _mm_srli_epi32(_mm_unpacklo_epi64(t0, t1), 4);
+	errors = _mm_and_si128(errors, _mm_set_epi32(0xF, 0xF, 0xF, 0xF));
+	errors = _mm_and_si128(errors, flags2);
+
+	index = _mm_andnot_si128(errors, flags2);
+
+	/* Update mbuf rearm_data for four packets. */
+	GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
+	_mm_store_si128((void *)&mbuf[0]->rearm_data,
+			_mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
+
+	GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
+	_mm_store_si128((void *)&mbuf[1]->rearm_data,
+			_mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
+
+	GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
+	_mm_store_si128((void *)&mbuf[2]->rearm_data,
+			_mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
+
+	GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
+	_mm_store_si128((void *)&mbuf[3]->rearm_data,
+			_mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
+
+	/* Update mbuf rx_descriptor_fields1 for four packes. */
+	GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, t0);
+	_mm_store_si128((void *)&mbuf[0]->rx_descriptor_fields1, t0);
+
+	GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, t0);
+	_mm_store_si128((void *)&mbuf[1]->rx_descriptor_fields1, t0);
+
+	GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, t0);
+	_mm_store_si128((void *)&mbuf[2]->rx_descriptor_fields1, t0);
+
+	GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, t0);
+	_mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
 }
 
 uint16_t
@@ -79,19 +158,23 @@  bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		   uint16_t nb_pkts)
 {
 	struct bnxt_rx_queue *rxq = rx_queue;
+	const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
 	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+	uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
+	uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
+	struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+	uint64_t valid, desc_valid_mask = ~0UL;
+	const __m128i info3_v_mask = _mm_set_epi32(CMPL_BASE_V, CMPL_BASE_V,
+						   CMPL_BASE_V, CMPL_BASE_V);
 	uint32_t raw_cons = cpr->cp_raw_cons;
-	uint32_t cons;
+	uint32_t cons, mbcons;
 	int nb_rx_pkts = 0;
-	struct rx_pkt_cmpl *rxcmp;
-	const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
-	const __m128i shuf_msk =
-		_mm_set_epi8(15, 14, 13, 12,          /* rss */
-			     0xFF, 0xFF,              /* vlan_tci (zeroes) */
-			     3, 2,                    /* data_len */
-			     0xFF, 0xFF, 3, 2,        /* pkt_len */
-			     0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
+	const __m128i valid_target =
+		_mm_set_epi32(!!(raw_cons & cp_ring_size),
+			      !!(raw_cons & cp_ring_size),
+			      !!(raw_cons & cp_ring_size),
+			      !!(raw_cons & cp_ring_size));
 	int i;
 
 	/* If Rx Q was stopped return */
@@ -104,69 +187,120 @@  bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
 	nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
 
+	cons = raw_cons & (cp_ring_size - 1);
+	mbcons = (raw_cons / 2) & (rx_ring_size - 1);
+
+	/* Prefetch first four descriptor pairs. */
+	rte_prefetch0(&cp_desc_ring[cons]);
+	rte_prefetch0(&cp_desc_ring[cons + 4]);
+
+	/* Ensure that we do not go past the ends of the rings. */
+	nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
+					   (cp_ring_size - cons) / 2));
 	/*
-	 * Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP.
-	 * nb_pkts < RTE_BNXT_DESCS_PER_LOOP, just return no packet
+	 * If we are at the end of the ring, ensure that descriptors after the
+	 * last valid entry are not treated as valid. Otherwise, force the
+	 * maximum number of packets to receive to be a multiple of the per-
+	 * loop count.
 	 */
-	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
-	if (!nb_pkts)
-		return 0;
+	if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
+		desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
+	else
+		nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
 
 	/* Handle RX burst request */
-	for (i = 0; i < nb_pkts; i++) {
-		struct rx_pkt_cmpl_hi *rxcmp1;
-		struct rte_mbuf *mbuf;
-		__m128i mm_rxcmp, mm_rxcmp1, pkt_mb, ptype, rearm;
-
-		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+	for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
+				  cons += RTE_BNXT_DESCS_PER_LOOP * 2,
+				  mbcons += RTE_BNXT_DESCS_PER_LOOP) {
+		__m128i rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
+		__m128i rxcmp[RTE_BNXT_DESCS_PER_LOOP];
+		__m128i tmp0, tmp1, info3_v;
+		uint32_t num_valid;
+
+		/* Copy four mbuf pointers to output array. */
+		tmp0 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons]);
+#ifdef RTE_ARCH_X86_64
+		tmp1 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons + 2]);
+#endif
+		_mm_storeu_si128((void *)&rx_pkts[i], tmp0);
+#ifdef RTE_ARCH_X86_64
+		_mm_storeu_si128((void *)&rx_pkts[i + 2], tmp1);
+#endif
 
-		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
-		rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cons + 1];
+		/* Prefetch four descriptor pairs for next iteration. */
+		if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
+			rte_prefetch0(&cp_desc_ring[cons + 8]);
+			rte_prefetch0(&cp_desc_ring[cons + 12]);
+		}
 
-		if (!CMP_VALID(rxcmp1, raw_cons + 1, cpr->cp_ring_struct))
-			break;
+		/*
+		 * Load the four curent descriptors into SSE registers in
+		 * reverse order to ensure consistent state.
+		 */
+		rxcmp1[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 7]);
+		rte_compiler_barrier();
+		rxcmp[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 6]);
 
-		mm_rxcmp = _mm_load_si128((__m128i *)rxcmp);
-		mm_rxcmp1 = _mm_load_si128((__m128i *)rxcmp1);
+		rxcmp1[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 5]);
+		rte_compiler_barrier();
+		rxcmp[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 4]);
 
-		raw_cons += 2;
-		cons = rxcmp->opaque;
+		tmp1 = _mm_unpackhi_epi32(rxcmp1[2], rxcmp1[3]);
 
-		mbuf = rxr->rx_buf_ring[cons];
-		rxr->rx_buf_ring[cons] = NULL;
+		rxcmp1[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 3]);
+		rte_compiler_barrier();
+		rxcmp[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 2]);
 
-		/* Set fields from mbuf initializer and ol_flags. */
-		rearm = _mm_or_si128(mbuf_init,
-				     bnxt_set_ol_flags(mm_rxcmp, mm_rxcmp1));
-		_mm_store_si128((__m128i *)&mbuf->rearm_data, rearm);
+		rxcmp1[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 1]);
+		rte_compiler_barrier();
+		rxcmp[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 0]);
 
-		/* Set mbuf pkt_len, data_len, and rss_hash fields. */
-		pkt_mb = _mm_shuffle_epi8(mm_rxcmp, shuf_msk);
+		tmp0 = _mm_unpackhi_epi32(rxcmp1[0], rxcmp1[1]);
 
-		/* Set packet type. */
-		ptype = bnxt_parse_pkt_type(mm_rxcmp, mm_rxcmp1);
-		pkt_mb = _mm_blend_epi16(pkt_mb, ptype, 0x3);
+		/* Isolate descriptor valid flags. */
+		info3_v = _mm_and_si128(_mm_unpacklo_epi64(tmp0, tmp1),
+					info3_v_mask);
+		info3_v = _mm_xor_si128(info3_v, valid_target);
 
 		/*
-		 * Shift vlan_tci from completion metadata field left six
-		 * bytes and blend into mbuf->rx_descriptor_fields1 to set
-		 * mbuf->vlan_tci.
+		 * Pack the 128-bit array of valid descriptor flags into 64
+		 * bits and count the number of set bits in order to determine
+		 * the number of valid descriptors.
 		 */
-		pkt_mb = _mm_blend_epi16(pkt_mb,
-					 _mm_slli_si128(mm_rxcmp1, 6), 0x20);
+		valid = _mm_cvtsi128_si64(_mm_packs_epi32(info3_v, info3_v));
+		num_valid = __builtin_popcountll(valid & desc_valid_mask);
+
+		switch (num_valid) {
+		case 4:
+			rxr->rx_buf_ring[mbcons + 3] = NULL;
+			/* FALLTHROUGH */
+		case 3:
+			rxr->rx_buf_ring[mbcons + 2] = NULL;
+			/* FALLTHROUGH */
+		case 2:
+			rxr->rx_buf_ring[mbcons + 1] = NULL;
+			/* FALLTHROUGH */
+		case 1:
+			rxr->rx_buf_ring[mbcons + 0] = NULL;
+			break;
+		case 0:
+			goto out;
+		}
 
-		/* Store descriptor fields. */
-		_mm_storeu_si128((void *)&mbuf->rx_descriptor_fields1, pkt_mb);
+		descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts]);
+		nb_rx_pkts += num_valid;
 
-		rx_pkts[nb_rx_pkts++] = mbuf;
+		if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
+			break;
 	}
 
+out:
 	if (nb_rx_pkts) {
 		rxr->rx_prod =
 			RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
 
 		rxq->rxrearm_nb += nb_rx_pkts;
-		cpr->cp_raw_cons = raw_cons;
+		cpr->cp_raw_cons += 2 * nb_rx_pkts;
 		cpr->valid =
 			!!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
 		bnxt_db_cq(cpr);