[v3,2/5] net/ixgbe: fix vector rx burst for ixgbe

Message ID 20200909063636.60205-3-jia.guo@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Qi Zhang
Headers
Series fix vector rx burst for PMDs |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Guo, Jia Sept. 9, 2020, 6:36 a.m. UTC
  The limitation of burst size in vector rx was removed, since it should
retrieve as much received packets as possible. And also the scattered
receive path should use a wrapper function to achieve the goal of
burst maximizing. And do some code cleaning for vector rx path.

Signed-off-by: Jeff Guo <jia.guo@intel.com>
---
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 77 +++++++++++++------------
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c  | 61 +++++++++++---------
 2 files changed, 76 insertions(+), 62 deletions(-)
  

Comments

Feifei Wang Sept. 9, 2020, 9:54 a.m. UTC | #1
Hi, Jeff

> From: dev <dev-bounces@dpdk.org> On Behalf Of Jeff Guo
> Sent: Wednesday, September 9, 2020 2:37 PM
> To: qiming.yang@intel.com; beilei.xing@intel.com; wei.zhao1@intel.com;
> qi.z.zhang@intel.com; jingjing.wu@intel.com
> Cc: bruce.richardson@intel.com; dev@dpdk.org; jia.guo@intel.com;
> helin.zhang@intel.com; mb@smartsharesystems.com; ferruh.yigit@intel.com;
> haiyue.wang@intel.com; stephen@networkplumber.org; barbette@kth.se
> Subject: [dpdk-dev] [PATCH v3 2/5] net/ixgbe: fix vector rx burst for ixgbe
> 
> The limitation of burst size in vector rx was removed, since it should retrieve as
> much received packets as possible. And also the scattered receive path should
> use a wrapper function to achieve the goal of burst maximizing. And do some
> code cleaning for vector rx path.
> 
> Signed-off-by: Jeff Guo <jia.guo@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 77 +++++++++++++------------
This patch has passed the test on aarch64 platform with neon path.
Tested-by: Feifei Wang <Feifei.wang2@arm.com>
> drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c  | 61 +++++++++++---------
>  2 files changed, 76 insertions(+), 62 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
> b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
> index aa27ee177..7692c5d59 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
> @@ -130,17 +130,6 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1,
> uint8x16x2_t sterr_tmp2,  rx_pkts[3]->ol_flags = vol.e[3];  }
> 
> -/*
> - * vPMD raw receive routine, only accept(nb_pkts >=
> RTE_IXGBE_DESCS_PER_LOOP)
> - *
> - * Notice:
> - * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
> - * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
> - * - don't support ol_flags for rss and csum err
> - */
> -
>  #define IXGBE_VPMD_DESC_EOP_MASK0x02020202
>  #define IXGBE_UINT8_BIT(CHAR_BIT * sizeof(uint8_t))
> 
> @@ -206,6 +195,13 @@ desc_to_ptype_v(uint64x2_t descs[4], uint16_t
> pkt_type_mask,  vgetq_lane_u32(tunnel_check, 3));  }
> 
> +/**
> + * vPMD raw receive routine, only accept(nb_pkts >=
> +RTE_IXGBE_DESCS_PER_LOOP)
> + *
> + * Notice:
> + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two  */
>  static inline uint16_t
>  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
>     uint16_t nb_pkts, uint8_t *split_packet) @@ -226,9 +222,6 @@
> _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
>   rxq->crc_len, 0, 0, 0};
> 
> -/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ -nb_pkts =
> RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> -
>  /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */  nb_pkts
> = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
> 
> @@ -382,16 +375,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,  return nb_pkts_recd;  }
> 
> -/*
> - * vPMD receive routine, only accept(nb_pkts >=
> RTE_IXGBE_DESCS_PER_LOOP)
> - *
> - * Notice:
> - * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
> - * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
> - * - don't support ol_flags for rss and csum err
> - */
>  uint16_t
>  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,  uint16_t
> nb_pkts) @@ -399,23 +382,19 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct
> rte_mbuf **rx_pkts,  return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts,
> NULL);  }
> 
> -/*
> - * vPMD receive routine that reassembles scattered packets
> - *
> - * Notice:
> - * - don't support ol_flags for rss and csum err
> - * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
> - * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
> +/**
> + * vPMD receive routine that reassembles single burst of 32 scattered
> +packets
>   */
> -uint16_t
> -ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, -
> uint16_t nb_pkts)
> +static uint16_t
> +ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> +       uint16_t nb_pkts)
>  {
>  struct ixgbe_rx_queue *rxq = rx_queue;
>  uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
> 
> +/* split_flags only can support max of RTE_IXGBE_MAX_RX_BURST */
> +nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> +
>  /* get some new buffers */
>  uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,  split_flags);
> @@ -443,6 +422,32 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct
> rte_mbuf **rx_pkts,  &split_flags[i]);  }
> 
> +/**
> + * vPMD receive routine that reassembles scattered packets.
> + */
> +uint16_t
> +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> +      uint16_t nb_pkts)
> +{
> +uint16_t retval = 0;
> +
> +while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) { uint16_t burst;
> +
> +burst = ixgbe_recv_scattered_burst_vec(rx_queue,
> +       rx_pkts + retval,
> +       RTE_IXGBE_MAX_RX_BURST);
> +retval += burst;
> +nb_pkts -= burst;
> +if (burst < RTE_IXGBE_MAX_RX_BURST)
> +return retval;
> +}
> +
> +return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
> +       rx_pkts + retval,
> +       nb_pkts);
> +}
> +
>  static inline void
>  vtx1(volatile union ixgbe_adv_tx_desc *txdp,  struct rte_mbuf *pkt, uint64_t
> flags) diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> index 517ca3166..cf54ff128 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
> @@ -302,13 +302,11 @@ desc_to_ptype_v(__m128i descs[4], uint16_t
> pkt_type_mask,  get_packet_type(3, pkt_info, etqf_check, tunnel_check);  }
> 
> -/*
> +/**
>   * vPMD raw receive routine, only accept(nb_pkts >=
> RTE_IXGBE_DESCS_PER_LOOP)
>   *
>   * Notice:
>   * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
>   * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
>   */
>  static inline uint16_t
> @@ -344,9 +342,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct
> rte_mbuf **rx_pkts,  __m128i mbuf_init;  uint8_t vlan_flags;
> 
> -/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ -nb_pkts =
> RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> -
>  /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */  nb_pkts
> = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
> 
> @@ -556,15 +551,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq,
> struct rte_mbuf **rx_pkts,  return nb_pkts_recd;  }
> 
> -/*
> - * vPMD receive routine, only accept(nb_pkts >=
> RTE_IXGBE_DESCS_PER_LOOP)
> - *
> - * Notice:
> - * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
> - * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
> - */
>  uint16_t
>  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,  uint16_t
> nb_pkts) @@ -572,22 +558,19 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct
> rte_mbuf **rx_pkts,  return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts,
> NULL);  }
> 
> -/*
> - * vPMD receive routine that reassembles scattered packets
> - *
> - * Notice:
> - * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
> - * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan
> RTE_IXGBE_MAX_RX_BURST
> - *   numbers of DD bit
> - * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
> +/**
> + * vPMD receive routine that reassembles single burst of 32 scattered
> +packets
>   */
> -uint16_t
> -ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, -
> uint16_t nb_pkts)
> +static uint16_t
> +ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> +       uint16_t nb_pkts)
>  {
>  struct ixgbe_rx_queue *rxq = rx_queue;
>  uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
> 
> +/* split_flags only can support max of RTE_IXGBE_MAX_RX_BURST */
> +nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> +
>  /* get some new buffers */
>  uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,  split_flags);
> @@ -615,6 +598,32 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct
> rte_mbuf **rx_pkts,  &split_flags[i]);  }
> 
> +/**
> + * vPMD receive routine that reassembles scattered packets.
> + */
> +uint16_t
> +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> +      uint16_t nb_pkts)
> +{
> +uint16_t retval = 0;
> +
> +while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) { uint16_t burst;
> +
> +burst = ixgbe_recv_scattered_burst_vec(rx_queue,
> +       rx_pkts + retval,
> +       RTE_IXGBE_MAX_RX_BURST);
> +retval += burst;
> +nb_pkts -= burst;
> +if (burst < RTE_IXGBE_MAX_RX_BURST)
> +return retval;
> +}
> +
> +return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
> +       rx_pkts + retval,
> +       nb_pkts);
> +}
> +
>  static inline void
>  vtx1(volatile union ixgbe_adv_tx_desc *txdp,  struct rte_mbuf *pkt, uint64_t
> flags)
> --
> 2.20.1
>
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index aa27ee177..7692c5d59 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -130,17 +130,6 @@  desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
 	rx_pkts[3]->ol_flags = vol.e[3];
 }
 
-/*
- * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- * - don't support ol_flags for rss and csum err
- */
-
 #define IXGBE_VPMD_DESC_EOP_MASK	0x02020202
 #define IXGBE_UINT8_BIT			(CHAR_BIT * sizeof(uint8_t))
 
@@ -206,6 +195,13 @@  desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask,
 				vgetq_lane_u32(tunnel_check, 3));
 }
 
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
 static inline uint16_t
 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		   uint16_t nb_pkts, uint8_t *split_packet)
@@ -226,9 +222,6 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
 				 rxq->crc_len, 0, 0, 0};
 
-	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
 
@@ -382,16 +375,6 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	return nb_pkts_recd;
 }
 
-/*
- * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- * - don't support ol_flags for rss and csum err
- */
 uint16_t
 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts)
@@ -399,23 +382,19 @@  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
-/*
- * vPMD receive routine that reassembles scattered packets
- *
- * Notice:
- * - don't support ol_flags for rss and csum err
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
  */
-uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
+static uint16_t
+ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
 {
 	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
 
+	/* split_flags only can support max of RTE_IXGBE_MAX_RX_BURST */
+	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
 	/* get some new buffers */
 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
 			split_flags);
@@ -443,6 +422,32 @@  ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
+		uint16_t burst;
+
+		burst = ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       RTE_IXGBE_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_IXGBE_MAX_RX_BURST)
+			return retval;
+	}
+
+	return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       nb_pkts);
+}
+
 static inline void
 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 517ca3166..cf54ff128 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -302,13 +302,11 @@  desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
 		get_packet_type(3, pkt_info, etqf_check, tunnel_check);
 }
 
-/*
+/**
  * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
  *
  * Notice:
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 static inline uint16_t
@@ -344,9 +342,6 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	__m128i mbuf_init;
 	uint8_t vlan_flags;
 
-	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
 
@@ -556,15 +551,6 @@  _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	return nb_pkts_recd;
 }
 
-/*
- * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- */
 uint16_t
 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts)
@@ -572,22 +558,19 @@  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
-/*
- * vPMD receive routine that reassembles scattered packets
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
  */
-uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
+static uint16_t
+ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
 {
 	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
 
+	/* split_flags only can support max of RTE_IXGBE_MAX_RX_BURST */
+	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
 	/* get some new buffers */
 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
 			split_flags);
@@ -615,6 +598,32 @@  ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
+		uint16_t burst;
+
+		burst = ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       RTE_IXGBE_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_IXGBE_MAX_RX_BURST)
+			return retval;
+	}
+
+	return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       nb_pkts);
+}
+
 static inline void
 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)