[RFC,1/3] net/virtio: move AVX based Rx and Tx code to separate file
diff mbox series

Message ID 20200911120906.45995-2-joyce.kong@arm.com
State Superseded
Delegated to: Maxime Coquelin
Headers show
Series
  • Vectorize packed ring RX path with NEON
Related show

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Joyce Kong Sept. 11, 2020, 12:09 p.m. UTC
Split out AVX instruction based virtio packed ring Rx and Tx
implementation to a separate file.

Signed-off-by: Phil Yang <phil.yang@arm.com>
---
 drivers/net/virtio/meson.build              |   1 +
 drivers/net/virtio/virtio_rxtx_packed.c     |  37 +++
 drivers/net/virtio/virtio_rxtx_packed.h     | 284 ++++++++++++++++++++
 drivers/net/virtio/virtio_rxtx_packed_avx.c | 264 +-----------------
 4 files changed, 323 insertions(+), 263 deletions(-)
 create mode 100644 drivers/net/virtio/virtio_rxtx_packed.c
 create mode 100644 drivers/net/virtio/virtio_rxtx_packed.h

Patch
diff mbox series

diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
index 3fd6051f4..e1851b0a6 100644
--- a/drivers/net/virtio/meson.build
+++ b/drivers/net/virtio/meson.build
@@ -5,6 +5,7 @@  sources += files('virtio_ethdev.c',
 	'virtio_pci.c',
 	'virtio_rxtx.c',
 	'virtio_rxtx_simple.c',
+	'virtio_rxtx_packed.c',
 	'virtqueue.c')
 deps += ['kvargs', 'bus_pci']
 
diff --git a/drivers/net/virtio/virtio_rxtx_packed.c b/drivers/net/virtio/virtio_rxtx_packed.c
new file mode 100644
index 000000000..e614e19fc
--- /dev/null
+++ b/drivers/net/virtio/virtio_rxtx_packed.c
@@ -0,0 +1,37 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_net.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtio_pci.h"
+#include "virtio_rxtx_packed.h"
+#include "virtqueue.h"
+
+/* Stub for linkage when arch specific implementation is not available */
+__rte_weak uint16_t
+virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
+			struct rte_mbuf **tx_pkts __rte_unused,
+			uint16_t nb_pkts __rte_unused)
+{
+	rte_panic("Wrong weak function linked by linker\n");
+	return 0;
+}
+
+/* Stub for linkage when arch specific implementation is not available */
+__rte_weak uint16_t
+virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
+			    struct rte_mbuf **rx_pkts __rte_unused,
+			    uint16_t nb_pkts __rte_unused)
+{
+	rte_panic("Wrong weak function linked by linker\n");
+	return 0;
+}
diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h
new file mode 100644
index 000000000..b2447843b
--- /dev/null
+++ b/drivers/net/virtio/virtio_rxtx_packed.h
@@ -0,0 +1,284 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#ifndef _VIRTIO_RXTX_PACKED_H_
+#define _VIRTIO_RXTX_PACKED_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_net.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+#define BYTE_SIZE 8
+/* flag bits offset in packed ring desc higher 64bits */
+#define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
+	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
+
+#define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
+	FLAGS_BITS_OFFSET)
+
+/* reference count offset in mbuf rearm data */
+#define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \
+	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
+/* segment number offset in mbuf rearm data */
+#define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \
+	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
+
+/* default rearm data */
+#define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \
+	1ULL << REFCNT_BITS_OFFSET)
+
+/* id bits offset in packed ring desc higher 64bits */
+#define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \
+	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
+
+/* net hdr short size mask */
+#define NET_HDR_MASK 0x3F
+
+#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
+	sizeof(struct vring_packed_desc))
+#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
+
+#ifdef VIRTIO_GCC_UNROLL_PRAGMA
+#define virtio_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
+	for (iter = val; iter < size; iter++)
+#endif
+
+#ifdef VIRTIO_CLANG_UNROLL_PRAGMA
+#define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
+	for (iter = val; iter < size; iter++)
+#endif
+
+#ifdef VIRTIO_ICC_UNROLL_PRAGMA
+#define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
+	for (iter = val; iter < size; iter++)
+#endif
+
+#ifndef virtio_for_each_try_unroll
+#define virtio_for_each_try_unroll(iter, val, num) \
+	for (iter = val; iter < num; iter++)
+#endif
+
+static inline void
+virtio_update_batch_stats(struct virtnet_stats *stats,
+			  uint16_t pkt_len1,
+			  uint16_t pkt_len2,
+			  uint16_t pkt_len3,
+			  uint16_t pkt_len4)
+{
+	stats->bytes += pkt_len1;
+	stats->bytes += pkt_len2;
+	stats->bytes += pkt_len3;
+	stats->bytes += pkt_len4;
+}
+
+static inline int
+virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
+				    struct rte_mbuf *txm)
+{
+	struct virtqueue *vq = txvq->vq;
+	struct virtio_hw *hw = vq->hw;
+	uint16_t hdr_size = hw->vtnet_hdr_size;
+	uint16_t slots, can_push;
+	int16_t need;
+
+	/* How many main ring entries are needed to this Tx?
+	 * any_layout => number of segments
+	 * default    => number of segments + 1
+	 */
+	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
+		   RTE_MBUF_DIRECT(txm) &&
+		   txm->nb_segs == 1 &&
+		   rte_pktmbuf_headroom(txm) >= hdr_size;
+
+	slots = txm->nb_segs + !can_push;
+	need = slots - vq->vq_free_cnt;
+
+	/* Positive value indicates it need free vring descriptors */
+	if (unlikely(need > 0)) {
+		virtio_xmit_cleanup_inorder_packed(vq, need);
+		need = slots - vq->vq_free_cnt;
+		if (unlikely(need > 0)) {
+			PMD_TX_LOG(ERR,
+				   "No free tx descriptors to transmit");
+			return -1;
+		}
+	}
+
+	/* Enqueue Packet buffers */
+	virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);
+
+	txvq->stats.bytes += txm->pkt_len;
+	return 0;
+}
+
+/* Optionally fill offload information in structure */
+static inline int
+virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
+{
+	struct rte_net_hdr_lens hdr_lens;
+	uint32_t hdrlen, ptype;
+	int l4_supported = 0;
+
+	/* nothing to do */
+	if (hdr->flags == 0)
+		return 0;
+
+	/* GSO not support in vec path, skip check */
+	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+
+	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+	m->packet_type = ptype;
+	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
+	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
+	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
+		l4_supported = 1;
+
+	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
+		if (hdr->csum_start <= hdrlen && l4_supported) {
+			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+		} else {
+			/* Unknown proto or tunnel, do sw cksum. We can assume
+			 * the cksum field is in the first segment since the
+			 * buffers we provided to the host are large enough.
+			 * In case of SCTP, this will be wrong since it's a CRC
+			 * but there's nothing we can do.
+			 */
+			uint16_t csum = 0, off;
+
+			rte_raw_cksum_mbuf(m, hdr->csum_start,
+				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
+				&csum);
+			if (likely(csum != 0xffff))
+				csum = ~csum;
+			off = hdr->csum_offset + hdr->csum_start;
+			if (rte_pktmbuf_data_len(m) >= off + 1)
+				*rte_pktmbuf_mtod_offset(m, uint16_t *,
+					off) = csum;
+		}
+	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
+		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+	}
+
+	return 0;
+}
+
+static inline uint16_t
+virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
+				    struct rte_mbuf **rx_pkts)
+{
+	uint16_t used_idx, id;
+	uint32_t len;
+	struct virtqueue *vq = rxvq->vq;
+	struct virtio_hw *hw = vq->hw;
+	uint32_t hdr_size = hw->vtnet_hdr_size;
+	struct virtio_net_hdr *hdr;
+	struct vring_packed_desc *desc;
+	struct rte_mbuf *cookie;
+
+	desc = vq->vq_packed.ring.desc;
+	used_idx = vq->vq_used_cons_idx;
+	if (!desc_is_used(&desc[used_idx], vq))
+		return -1;
+
+	len = desc[used_idx].len;
+	id = desc[used_idx].id;
+	cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+	if (unlikely(cookie == NULL)) {
+		PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+				vq->vq_used_cons_idx);
+		return -1;
+	}
+	rte_prefetch0(cookie);
+	rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+
+	cookie->data_off = RTE_PKTMBUF_HEADROOM;
+	cookie->ol_flags = 0;
+	cookie->pkt_len = (uint32_t)(len - hdr_size);
+	cookie->data_len = (uint32_t)(len - hdr_size);
+
+	hdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +
+					RTE_PKTMBUF_HEADROOM - hdr_size);
+	if (hw->has_rx_offload)
+		virtio_vec_rx_offload(cookie, hdr);
+
+	*rx_pkts = cookie;
+
+	rxvq->stats.bytes += cookie->pkt_len;
+
+	vq->vq_free_cnt++;
+	vq->vq_used_cons_idx++;
+	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+		vq->vq_used_cons_idx -= vq->vq_nentries;
+		vq->vq_packed.used_wrap_counter ^= 1;
+	}
+
+	return 0;
+}
+
+static inline void
+virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
+			      struct rte_mbuf **cookie,
+			      uint16_t num)
+{
+	struct virtqueue *vq = rxvq->vq;
+	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+	uint16_t flags = vq->vq_packed.cached_flags;
+	struct virtio_hw *hw = vq->hw;
+	struct vq_desc_extra *dxp;
+	uint16_t idx, i;
+	uint16_t batch_num, total_num = 0;
+	uint16_t head_idx = vq->vq_avail_idx;
+	uint16_t head_flag = vq->vq_packed.cached_flags;
+	uint64_t addr;
+
+	do {
+		idx = vq->vq_avail_idx;
+
+		batch_num = PACKED_BATCH_SIZE;
+		if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
+			batch_num = vq->vq_nentries - idx;
+		if (unlikely((total_num + batch_num) > num))
+			batch_num = num - total_num;
+
+		virtio_for_each_try_unroll(i, 0, batch_num) {
+			dxp = &vq->vq_descx[idx + i];
+			dxp->cookie = (void *)cookie[total_num + i];
+
+			addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
+				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+			start_dp[idx + i].addr = addr;
+			start_dp[idx + i].len = cookie[total_num + i]->buf_len
+				- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+			if (total_num || i) {
+				virtqueue_store_flags_packed(&start_dp[idx + i],
+						flags, hw->weak_barriers);
+			}
+		}
+
+		vq->vq_avail_idx += batch_num;
+		if (vq->vq_avail_idx >= vq->vq_nentries) {
+			vq->vq_avail_idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+			flags = vq->vq_packed.cached_flags;
+		}
+		total_num += batch_num;
+	} while (total_num < num);
+
+	virtqueue_store_flags_packed(&start_dp[head_idx], head_flag,
+				hw->weak_barriers);
+	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+}
+
+#endif /* _VIRTIO_RXTX_PACKED_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c
index 6a8214725..c8fbb8f2c 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
@@ -13,71 +13,9 @@ 
 #include "virtio_logs.h"
 #include "virtio_ethdev.h"
 #include "virtio_pci.h"
+#include "virtio_rxtx_packed.h"
 #include "virtqueue.h"
 
-#define BYTE_SIZE 8
-/* flag bits offset in packed ring desc higher 64bits */
-#define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
-	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
-
-#define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
-	FLAGS_BITS_OFFSET)
-
-/* reference count offset in mbuf rearm data */
-#define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \
-	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
-/* segment number offset in mbuf rearm data */
-#define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \
-	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
-
-/* default rearm data */
-#define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \
-	1ULL << REFCNT_BITS_OFFSET)
-
-/* id bits offset in packed ring desc higher 64bits */
-#define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \
-	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
-
-/* net hdr short size mask */
-#define NET_HDR_MASK 0x3F
-
-#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
-	sizeof(struct vring_packed_desc))
-#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
-
-#ifdef VIRTIO_GCC_UNROLL_PRAGMA
-#define virtio_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
-	for (iter = val; iter < size; iter++)
-#endif
-
-#ifdef VIRTIO_CLANG_UNROLL_PRAGMA
-#define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
-	for (iter = val; iter < size; iter++)
-#endif
-
-#ifdef VIRTIO_ICC_UNROLL_PRAGMA
-#define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
-	for (iter = val; iter < size; iter++)
-#endif
-
-#ifndef virtio_for_each_try_unroll
-#define virtio_for_each_try_unroll(iter, val, num) \
-	for (iter = val; iter < num; iter++)
-#endif
-
-static inline void
-virtio_update_batch_stats(struct virtnet_stats *stats,
-			  uint16_t pkt_len1,
-			  uint16_t pkt_len2,
-			  uint16_t pkt_len3,
-			  uint16_t pkt_len4)
-{
-	stats->bytes += pkt_len1;
-	stats->bytes += pkt_len2;
-	stats->bytes += pkt_len3;
-	stats->bytes += pkt_len4;
-}
-
 static inline int
 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
 				   struct rte_mbuf **tx_pkts)
@@ -200,46 +138,6 @@  virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
 	return 0;
 }
 
-static inline int
-virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
-				    struct rte_mbuf *txm)
-{
-	struct virtqueue *vq = txvq->vq;
-	struct virtio_hw *hw = vq->hw;
-	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t slots, can_push;
-	int16_t need;
-
-	/* How many main ring entries are needed to this Tx?
-	 * any_layout => number of segments
-	 * default    => number of segments + 1
-	 */
-	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
-		   RTE_MBUF_DIRECT(txm) &&
-		   txm->nb_segs == 1 &&
-		   rte_pktmbuf_headroom(txm) >= hdr_size;
-
-	slots = txm->nb_segs + !can_push;
-	need = slots - vq->vq_free_cnt;
-
-	/* Positive value indicates it need free vring descriptors */
-	if (unlikely(need > 0)) {
-		virtio_xmit_cleanup_inorder_packed(vq, need);
-		need = slots - vq->vq_free_cnt;
-		if (unlikely(need > 0)) {
-			PMD_TX_LOG(ERR,
-				   "No free tx descriptors to transmit");
-			return -1;
-		}
-	}
-
-	/* Enqueue Packet buffers */
-	virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);
-
-	txvq->stats.bytes += txm->pkt_len;
-	return 0;
-}
-
 uint16_t
 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts)
@@ -293,58 +191,6 @@  virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-/* Optionally fill offload information in structure */
-static inline int
-virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
-{
-	struct rte_net_hdr_lens hdr_lens;
-	uint32_t hdrlen, ptype;
-	int l4_supported = 0;
-
-	/* nothing to do */
-	if (hdr->flags == 0)
-		return 0;
-
-	/* GSO not support in vec path, skip check */
-	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
-
-	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
-	m->packet_type = ptype;
-	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
-	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
-	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
-		l4_supported = 1;
-
-	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
-		if (hdr->csum_start <= hdrlen && l4_supported) {
-			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
-		} else {
-			/* Unknown proto or tunnel, do sw cksum. We can assume
-			 * the cksum field is in the first segment since the
-			 * buffers we provided to the host are large enough.
-			 * In case of SCTP, this will be wrong since it's a CRC
-			 * but there's nothing we can do.
-			 */
-			uint16_t csum = 0, off;
-
-			rte_raw_cksum_mbuf(m, hdr->csum_start,
-				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
-				&csum);
-			if (likely(csum != 0xffff))
-				csum = ~csum;
-			off = hdr->csum_offset + hdr->csum_start;
-			if (rte_pktmbuf_data_len(m) >= off + 1)
-				*rte_pktmbuf_mtod_offset(m, uint16_t *,
-					off) = csum;
-		}
-	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
-		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
-	}
-
-	return 0;
-}
-
 static inline uint16_t
 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
 				   struct rte_mbuf **rx_pkts)
@@ -445,114 +291,6 @@  virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
 	return 0;
 }
 
-static uint16_t
-virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
-				    struct rte_mbuf **rx_pkts)
-{
-	uint16_t used_idx, id;
-	uint32_t len;
-	struct virtqueue *vq = rxvq->vq;
-	struct virtio_hw *hw = vq->hw;
-	uint32_t hdr_size = hw->vtnet_hdr_size;
-	struct virtio_net_hdr *hdr;
-	struct vring_packed_desc *desc;
-	struct rte_mbuf *cookie;
-
-	desc = vq->vq_packed.ring.desc;
-	used_idx = vq->vq_used_cons_idx;
-	if (!desc_is_used(&desc[used_idx], vq))
-		return -1;
-
-	len = desc[used_idx].len;
-	id = desc[used_idx].id;
-	cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
-	if (unlikely(cookie == NULL)) {
-		PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
-				vq->vq_used_cons_idx);
-		return -1;
-	}
-	rte_prefetch0(cookie);
-	rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
-
-	cookie->data_off = RTE_PKTMBUF_HEADROOM;
-	cookie->ol_flags = 0;
-	cookie->pkt_len = (uint32_t)(len - hdr_size);
-	cookie->data_len = (uint32_t)(len - hdr_size);
-
-	hdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +
-					RTE_PKTMBUF_HEADROOM - hdr_size);
-	if (hw->has_rx_offload)
-		virtio_vec_rx_offload(cookie, hdr);
-
-	*rx_pkts = cookie;
-
-	rxvq->stats.bytes += cookie->pkt_len;
-
-	vq->vq_free_cnt++;
-	vq->vq_used_cons_idx++;
-	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
-		vq->vq_used_cons_idx -= vq->vq_nentries;
-		vq->vq_packed.used_wrap_counter ^= 1;
-	}
-
-	return 0;
-}
-
-static inline void
-virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
-			      struct rte_mbuf **cookie,
-			      uint16_t num)
-{
-	struct virtqueue *vq = rxvq->vq;
-	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
-	uint16_t flags = vq->vq_packed.cached_flags;
-	struct virtio_hw *hw = vq->hw;
-	struct vq_desc_extra *dxp;
-	uint16_t idx, i;
-	uint16_t batch_num, total_num = 0;
-	uint16_t head_idx = vq->vq_avail_idx;
-	uint16_t head_flag = vq->vq_packed.cached_flags;
-	uint64_t addr;
-
-	do {
-		idx = vq->vq_avail_idx;
-
-		batch_num = PACKED_BATCH_SIZE;
-		if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
-			batch_num = vq->vq_nentries - idx;
-		if (unlikely((total_num + batch_num) > num))
-			batch_num = num - total_num;
-
-		virtio_for_each_try_unroll(i, 0, batch_num) {
-			dxp = &vq->vq_descx[idx + i];
-			dxp->cookie = (void *)cookie[total_num + i];
-
-			addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
-				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-			start_dp[idx + i].addr = addr;
-			start_dp[idx + i].len = cookie[total_num + i]->buf_len
-				- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
-			if (total_num || i) {
-				virtqueue_store_flags_packed(&start_dp[idx + i],
-						flags, hw->weak_barriers);
-			}
-		}
-
-		vq->vq_avail_idx += batch_num;
-		if (vq->vq_avail_idx >= vq->vq_nentries) {
-			vq->vq_avail_idx -= vq->vq_nentries;
-			vq->vq_packed.cached_flags ^=
-				VRING_PACKED_DESC_F_AVAIL_USED;
-			flags = vq->vq_packed.cached_flags;
-		}
-		total_num += batch_num;
-	} while (total_num < num);
-
-	virtqueue_store_flags_packed(&start_dp[head_idx], head_flag,
-				hw->weak_barriers);
-	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
-}
-
 uint16_t
 virtio_recv_pkts_packed_vec(void *rx_queue,
 			    struct rte_mbuf **rx_pkts,