get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77384/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77384,
    "url": "http://patches.dpdk.org/api/patches/77384/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200911120906.45995-2-joyce.kong@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200911120906.45995-2-joyce.kong@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200911120906.45995-2-joyce.kong@arm.com",
    "date": "2020-09-11T12:09:04",
    "name": "[RFC,1/3] net/virtio: move AVX based Rx and Tx code to separate file",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "eeebc5a1bbb369c318c896d97b813d6c46a5f775",
    "submitter": {
        "id": 970,
        "url": "http://patches.dpdk.org/api/people/970/?format=api",
        "name": "Joyce Kong",
        "email": "joyce.kong@arm.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200911120906.45995-2-joyce.kong@arm.com/mbox/",
    "series": [
        {
            "id": 12147,
            "url": "http://patches.dpdk.org/api/series/12147/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12147",
            "date": "2020-09-11T12:09:03",
            "name": "Vectorize packed ring RX path with NEON",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12147/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77384/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/77384/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1027BA04B5;\n\tFri, 11 Sep 2020 14:09:24 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A41FA1C0D7;\n\tFri, 11 Sep 2020 14:09:21 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by dpdk.org (Postfix) with ESMTP id 807111C0D0\n for <dev@dpdk.org>; Fri, 11 Sep 2020 14:09:20 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E8E4911B3;\n Fri, 11 Sep 2020 05:09:19 -0700 (PDT)",
            "from net-arm-thunderx2-03.shanghai.arm.com\n (net-arm-thunderx2-03.shanghai.arm.com [10.169.210.123])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 2E1133F68F;\n Fri, 11 Sep 2020 05:09:16 -0700 (PDT)"
        ],
        "From": "Joyce Kong <joyce.kong@arm.com>",
        "To": "maxime.coquelin@redhat.com",
        "Cc": "jerinj@marvell.com, dev@dpdk.org, nd@arm.com,\n honnappa.nagarahalli@arm.com,\n ruifeng.wang@arm.com, phil.yang@arm.com",
        "Date": "Fri, 11 Sep 2020 20:09:04 +0800",
        "Message-Id": "<20200911120906.45995-2-joyce.kong@arm.com>",
        "X-Mailer": "git-send-email 2.28.0",
        "In-Reply-To": "<20200911120906.45995-1-joyce.kong@arm.com>",
        "References": "<20200911120906.45995-1-joyce.kong@arm.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [RFC 1/3] net/virtio: move AVX based Rx and Tx code to\n\tseparate file",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Split out AVX instruction based virtio packed ring Rx and Tx\nimplementation to a separate file.\n\nSigned-off-by: Phil Yang <phil.yang@arm.com>\n---\n drivers/net/virtio/meson.build              |   1 +\n drivers/net/virtio/virtio_rxtx_packed.c     |  37 +++\n drivers/net/virtio/virtio_rxtx_packed.h     | 284 ++++++++++++++++++++\n drivers/net/virtio/virtio_rxtx_packed_avx.c | 264 +-----------------\n 4 files changed, 323 insertions(+), 263 deletions(-)\n create mode 100644 drivers/net/virtio/virtio_rxtx_packed.c\n create mode 100644 drivers/net/virtio/virtio_rxtx_packed.h",
    "diff": "diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build\nindex 3fd6051f4..e1851b0a6 100644\n--- a/drivers/net/virtio/meson.build\n+++ b/drivers/net/virtio/meson.build\n@@ -5,6 +5,7 @@ sources += files('virtio_ethdev.c',\n \t'virtio_pci.c',\n \t'virtio_rxtx.c',\n \t'virtio_rxtx_simple.c',\n+\t'virtio_rxtx_packed.c',\n \t'virtqueue.c')\n deps += ['kvargs', 'bus_pci']\n \ndiff --git a/drivers/net/virtio/virtio_rxtx_packed.c b/drivers/net/virtio/virtio_rxtx_packed.c\nnew file mode 100644\nindex 000000000..e614e19fc\n--- /dev/null\n+++ b/drivers/net/virtio/virtio_rxtx_packed.c\n@@ -0,0 +1,37 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <errno.h>\n+\n+#include <rte_net.h>\n+\n+#include \"virtio_logs.h\"\n+#include \"virtio_ethdev.h\"\n+#include \"virtio_pci.h\"\n+#include \"virtio_rxtx_packed.h\"\n+#include \"virtqueue.h\"\n+\n+/* Stub for linkage when arch specific implementation is not available */\n+__rte_weak uint16_t\n+virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,\n+\t\t\tstruct rte_mbuf **tx_pkts __rte_unused,\n+\t\t\tuint16_t nb_pkts __rte_unused)\n+{\n+\trte_panic(\"Wrong weak function linked by linker\\n\");\n+\treturn 0;\n+}\n+\n+/* Stub for linkage when arch specific implementation is not available */\n+__rte_weak uint16_t\n+virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,\n+\t\t\t    struct rte_mbuf **rx_pkts __rte_unused,\n+\t\t\t    uint16_t nb_pkts __rte_unused)\n+{\n+\trte_panic(\"Wrong weak function linked by linker\\n\");\n+\treturn 0;\n+}\ndiff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h\nnew file mode 100644\nindex 000000000..b2447843b\n--- /dev/null\n+++ b/drivers/net/virtio/virtio_rxtx_packed.h\n@@ -0,0 +1,284 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#ifndef _VIRTIO_RXTX_PACKED_H_\n+#define _VIRTIO_RXTX_PACKED_H_\n+\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <errno.h>\n+\n+#include <rte_net.h>\n+\n+#include \"virtio_logs.h\"\n+#include \"virtio_ethdev.h\"\n+#include \"virtio_pci.h\"\n+#include \"virtqueue.h\"\n+\n+#define BYTE_SIZE 8\n+/* flag bits offset in packed ring desc higher 64bits */\n+#define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \\\n+\toffsetof(struct vring_packed_desc, len)) * BYTE_SIZE)\n+\n+#define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \\\n+\tFLAGS_BITS_OFFSET)\n+\n+/* reference count offset in mbuf rearm data */\n+#define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \\\n+\toffsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)\n+/* segment number offset in mbuf rearm data */\n+#define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \\\n+\toffsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)\n+\n+/* default rearm data */\n+#define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \\\n+\t1ULL << REFCNT_BITS_OFFSET)\n+\n+/* id bits offset in packed ring desc higher 64bits */\n+#define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \\\n+\toffsetof(struct vring_packed_desc, len)) * BYTE_SIZE)\n+\n+/* net hdr short size mask */\n+#define NET_HDR_MASK 0x3F\n+\n+#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \\\n+\tsizeof(struct vring_packed_desc))\n+#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)\n+\n+#ifdef VIRTIO_GCC_UNROLL_PRAGMA\n+#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"GCC unroll 4\") \\\n+\tfor (iter = val; iter < size; iter++)\n+#endif\n+\n+#ifdef VIRTIO_CLANG_UNROLL_PRAGMA\n+#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"unroll 4\") \\\n+\tfor (iter = val; iter < size; iter++)\n+#endif\n+\n+#ifdef VIRTIO_ICC_UNROLL_PRAGMA\n+#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"unroll (4)\") \\\n+\tfor (iter = val; iter < size; iter++)\n+#endif\n+\n+#ifndef virtio_for_each_try_unroll\n+#define virtio_for_each_try_unroll(iter, val, num) \\\n+\tfor (iter = val; iter < num; iter++)\n+#endif\n+\n+static inline void\n+virtio_update_batch_stats(struct virtnet_stats *stats,\n+\t\t\t  uint16_t pkt_len1,\n+\t\t\t  uint16_t pkt_len2,\n+\t\t\t  uint16_t pkt_len3,\n+\t\t\t  uint16_t pkt_len4)\n+{\n+\tstats->bytes += pkt_len1;\n+\tstats->bytes += pkt_len2;\n+\tstats->bytes += pkt_len3;\n+\tstats->bytes += pkt_len4;\n+}\n+\n+static inline int\n+virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,\n+\t\t\t\t    struct rte_mbuf *txm)\n+{\n+\tstruct virtqueue *vq = txvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tuint16_t hdr_size = hw->vtnet_hdr_size;\n+\tuint16_t slots, can_push;\n+\tint16_t need;\n+\n+\t/* How many main ring entries are needed to this Tx?\n+\t * any_layout => number of segments\n+\t * default    => number of segments + 1\n+\t */\n+\tcan_push = rte_mbuf_refcnt_read(txm) == 1 &&\n+\t\t   RTE_MBUF_DIRECT(txm) &&\n+\t\t   txm->nb_segs == 1 &&\n+\t\t   rte_pktmbuf_headroom(txm) >= hdr_size;\n+\n+\tslots = txm->nb_segs + !can_push;\n+\tneed = slots - vq->vq_free_cnt;\n+\n+\t/* Positive value indicates it need free vring descriptors */\n+\tif (unlikely(need > 0)) {\n+\t\tvirtio_xmit_cleanup_inorder_packed(vq, need);\n+\t\tneed = slots - vq->vq_free_cnt;\n+\t\tif (unlikely(need > 0)) {\n+\t\t\tPMD_TX_LOG(ERR,\n+\t\t\t\t   \"No free tx descriptors to transmit\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* Enqueue Packet buffers */\n+\tvirtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);\n+\n+\ttxvq->stats.bytes += txm->pkt_len;\n+\treturn 0;\n+}\n+\n+/* Optionally fill offload information in structure */\n+static inline int\n+virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)\n+{\n+\tstruct rte_net_hdr_lens hdr_lens;\n+\tuint32_t hdrlen, ptype;\n+\tint l4_supported = 0;\n+\n+\t/* nothing to do */\n+\tif (hdr->flags == 0)\n+\t\treturn 0;\n+\n+\t/* GSO not support in vec path, skip check */\n+\tm->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;\n+\n+\tptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);\n+\tm->packet_type = ptype;\n+\tif ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||\n+\t    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||\n+\t    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)\n+\t\tl4_supported = 1;\n+\n+\tif (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {\n+\t\thdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;\n+\t\tif (hdr->csum_start <= hdrlen && l4_supported) {\n+\t\t\tm->ol_flags |= PKT_RX_L4_CKSUM_NONE;\n+\t\t} else {\n+\t\t\t/* Unknown proto or tunnel, do sw cksum. We can assume\n+\t\t\t * the cksum field is in the first segment since the\n+\t\t\t * buffers we provided to the host are large enough.\n+\t\t\t * In case of SCTP, this will be wrong since it's a CRC\n+\t\t\t * but there's nothing we can do.\n+\t\t\t */\n+\t\t\tuint16_t csum = 0, off;\n+\n+\t\t\trte_raw_cksum_mbuf(m, hdr->csum_start,\n+\t\t\t\trte_pktmbuf_pkt_len(m) - hdr->csum_start,\n+\t\t\t\t&csum);\n+\t\t\tif (likely(csum != 0xffff))\n+\t\t\t\tcsum = ~csum;\n+\t\t\toff = hdr->csum_offset + hdr->csum_start;\n+\t\t\tif (rte_pktmbuf_data_len(m) >= off + 1)\n+\t\t\t\t*rte_pktmbuf_mtod_offset(m, uint16_t *,\n+\t\t\t\t\toff) = csum;\n+\t\t}\n+\t} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {\n+\t\tm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,\n+\t\t\t\t    struct rte_mbuf **rx_pkts)\n+{\n+\tuint16_t used_idx, id;\n+\tuint32_t len;\n+\tstruct virtqueue *vq = rxvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tuint32_t hdr_size = hw->vtnet_hdr_size;\n+\tstruct virtio_net_hdr *hdr;\n+\tstruct vring_packed_desc *desc;\n+\tstruct rte_mbuf *cookie;\n+\n+\tdesc = vq->vq_packed.ring.desc;\n+\tused_idx = vq->vq_used_cons_idx;\n+\tif (!desc_is_used(&desc[used_idx], vq))\n+\t\treturn -1;\n+\n+\tlen = desc[used_idx].len;\n+\tid = desc[used_idx].id;\n+\tcookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;\n+\tif (unlikely(cookie == NULL)) {\n+\t\tPMD_DRV_LOG(ERR, \"vring descriptor with no mbuf cookie at %u\",\n+\t\t\t\tvq->vq_used_cons_idx);\n+\t\treturn -1;\n+\t}\n+\trte_prefetch0(cookie);\n+\trte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));\n+\n+\tcookie->data_off = RTE_PKTMBUF_HEADROOM;\n+\tcookie->ol_flags = 0;\n+\tcookie->pkt_len = (uint32_t)(len - hdr_size);\n+\tcookie->data_len = (uint32_t)(len - hdr_size);\n+\n+\thdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +\n+\t\t\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size);\n+\tif (hw->has_rx_offload)\n+\t\tvirtio_vec_rx_offload(cookie, hdr);\n+\n+\t*rx_pkts = cookie;\n+\n+\trxvq->stats.bytes += cookie->pkt_len;\n+\n+\tvq->vq_free_cnt++;\n+\tvq->vq_used_cons_idx++;\n+\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n+\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n+\t\tvq->vq_packed.used_wrap_counter ^= 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,\n+\t\t\t      struct rte_mbuf **cookie,\n+\t\t\t      uint16_t num)\n+{\n+\tstruct virtqueue *vq = rxvq->vq;\n+\tstruct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;\n+\tuint16_t flags = vq->vq_packed.cached_flags;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tstruct vq_desc_extra *dxp;\n+\tuint16_t idx, i;\n+\tuint16_t batch_num, total_num = 0;\n+\tuint16_t head_idx = vq->vq_avail_idx;\n+\tuint16_t head_flag = vq->vq_packed.cached_flags;\n+\tuint64_t addr;\n+\n+\tdo {\n+\t\tidx = vq->vq_avail_idx;\n+\n+\t\tbatch_num = PACKED_BATCH_SIZE;\n+\t\tif (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))\n+\t\t\tbatch_num = vq->vq_nentries - idx;\n+\t\tif (unlikely((total_num + batch_num) > num))\n+\t\t\tbatch_num = num - total_num;\n+\n+\t\tvirtio_for_each_try_unroll(i, 0, batch_num) {\n+\t\t\tdxp = &vq->vq_descx[idx + i];\n+\t\t\tdxp->cookie = (void *)cookie[total_num + i];\n+\n+\t\t\taddr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +\n+\t\t\t\tRTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;\n+\t\t\tstart_dp[idx + i].addr = addr;\n+\t\t\tstart_dp[idx + i].len = cookie[total_num + i]->buf_len\n+\t\t\t\t- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;\n+\t\t\tif (total_num || i) {\n+\t\t\t\tvirtqueue_store_flags_packed(&start_dp[idx + i],\n+\t\t\t\t\t\tflags, hw->weak_barriers);\n+\t\t\t}\n+\t\t}\n+\n+\t\tvq->vq_avail_idx += batch_num;\n+\t\tif (vq->vq_avail_idx >= vq->vq_nentries) {\n+\t\t\tvq->vq_avail_idx -= vq->vq_nentries;\n+\t\t\tvq->vq_packed.cached_flags ^=\n+\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n+\t\t\tflags = vq->vq_packed.cached_flags;\n+\t\t}\n+\t\ttotal_num += batch_num;\n+\t} while (total_num < num);\n+\n+\tvirtqueue_store_flags_packed(&start_dp[head_idx], head_flag,\n+\t\t\t\thw->weak_barriers);\n+\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);\n+}\n+\n+#endif /* _VIRTIO_RXTX_PACKED_H_ */\ndiff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c\nindex 6a8214725..c8fbb8f2c 100644\n--- a/drivers/net/virtio/virtio_rxtx_packed_avx.c\n+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c\n@@ -13,71 +13,9 @@\n #include \"virtio_logs.h\"\n #include \"virtio_ethdev.h\"\n #include \"virtio_pci.h\"\n+#include \"virtio_rxtx_packed.h\"\n #include \"virtqueue.h\"\n \n-#define BYTE_SIZE 8\n-/* flag bits offset in packed ring desc higher 64bits */\n-#define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \\\n-\toffsetof(struct vring_packed_desc, len)) * BYTE_SIZE)\n-\n-#define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \\\n-\tFLAGS_BITS_OFFSET)\n-\n-/* reference count offset in mbuf rearm data */\n-#define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \\\n-\toffsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)\n-/* segment number offset in mbuf rearm data */\n-#define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \\\n-\toffsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)\n-\n-/* default rearm data */\n-#define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \\\n-\t1ULL << REFCNT_BITS_OFFSET)\n-\n-/* id bits offset in packed ring desc higher 64bits */\n-#define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \\\n-\toffsetof(struct vring_packed_desc, len)) * BYTE_SIZE)\n-\n-/* net hdr short size mask */\n-#define NET_HDR_MASK 0x3F\n-\n-#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \\\n-\tsizeof(struct vring_packed_desc))\n-#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)\n-\n-#ifdef VIRTIO_GCC_UNROLL_PRAGMA\n-#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"GCC unroll 4\") \\\n-\tfor (iter = val; iter < size; iter++)\n-#endif\n-\n-#ifdef VIRTIO_CLANG_UNROLL_PRAGMA\n-#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"unroll 4\") \\\n-\tfor (iter = val; iter < size; iter++)\n-#endif\n-\n-#ifdef VIRTIO_ICC_UNROLL_PRAGMA\n-#define virtio_for_each_try_unroll(iter, val, size) _Pragma(\"unroll (4)\") \\\n-\tfor (iter = val; iter < size; iter++)\n-#endif\n-\n-#ifndef virtio_for_each_try_unroll\n-#define virtio_for_each_try_unroll(iter, val, num) \\\n-\tfor (iter = val; iter < num; iter++)\n-#endif\n-\n-static inline void\n-virtio_update_batch_stats(struct virtnet_stats *stats,\n-\t\t\t  uint16_t pkt_len1,\n-\t\t\t  uint16_t pkt_len2,\n-\t\t\t  uint16_t pkt_len3,\n-\t\t\t  uint16_t pkt_len4)\n-{\n-\tstats->bytes += pkt_len1;\n-\tstats->bytes += pkt_len2;\n-\tstats->bytes += pkt_len3;\n-\tstats->bytes += pkt_len4;\n-}\n-\n static inline int\n virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,\n \t\t\t\t   struct rte_mbuf **tx_pkts)\n@@ -200,46 +138,6 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,\n \treturn 0;\n }\n \n-static inline int\n-virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,\n-\t\t\t\t    struct rte_mbuf *txm)\n-{\n-\tstruct virtqueue *vq = txvq->vq;\n-\tstruct virtio_hw *hw = vq->hw;\n-\tuint16_t hdr_size = hw->vtnet_hdr_size;\n-\tuint16_t slots, can_push;\n-\tint16_t need;\n-\n-\t/* How many main ring entries are needed to this Tx?\n-\t * any_layout => number of segments\n-\t * default    => number of segments + 1\n-\t */\n-\tcan_push = rte_mbuf_refcnt_read(txm) == 1 &&\n-\t\t   RTE_MBUF_DIRECT(txm) &&\n-\t\t   txm->nb_segs == 1 &&\n-\t\t   rte_pktmbuf_headroom(txm) >= hdr_size;\n-\n-\tslots = txm->nb_segs + !can_push;\n-\tneed = slots - vq->vq_free_cnt;\n-\n-\t/* Positive value indicates it need free vring descriptors */\n-\tif (unlikely(need > 0)) {\n-\t\tvirtio_xmit_cleanup_inorder_packed(vq, need);\n-\t\tneed = slots - vq->vq_free_cnt;\n-\t\tif (unlikely(need > 0)) {\n-\t\t\tPMD_TX_LOG(ERR,\n-\t\t\t\t   \"No free tx descriptors to transmit\");\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\t/* Enqueue Packet buffers */\n-\tvirtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);\n-\n-\ttxvq->stats.bytes += txm->pkt_len;\n-\treturn 0;\n-}\n-\n uint16_t\n virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tuint16_t nb_pkts)\n@@ -293,58 +191,6 @@ virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \treturn nb_tx;\n }\n \n-/* Optionally fill offload information in structure */\n-static inline int\n-virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)\n-{\n-\tstruct rte_net_hdr_lens hdr_lens;\n-\tuint32_t hdrlen, ptype;\n-\tint l4_supported = 0;\n-\n-\t/* nothing to do */\n-\tif (hdr->flags == 0)\n-\t\treturn 0;\n-\n-\t/* GSO not support in vec path, skip check */\n-\tm->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;\n-\n-\tptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);\n-\tm->packet_type = ptype;\n-\tif ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||\n-\t    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||\n-\t    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)\n-\t\tl4_supported = 1;\n-\n-\tif (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {\n-\t\thdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;\n-\t\tif (hdr->csum_start <= hdrlen && l4_supported) {\n-\t\t\tm->ol_flags |= PKT_RX_L4_CKSUM_NONE;\n-\t\t} else {\n-\t\t\t/* Unknown proto or tunnel, do sw cksum. We can assume\n-\t\t\t * the cksum field is in the first segment since the\n-\t\t\t * buffers we provided to the host are large enough.\n-\t\t\t * In case of SCTP, this will be wrong since it's a CRC\n-\t\t\t * but there's nothing we can do.\n-\t\t\t */\n-\t\t\tuint16_t csum = 0, off;\n-\n-\t\t\trte_raw_cksum_mbuf(m, hdr->csum_start,\n-\t\t\t\trte_pktmbuf_pkt_len(m) - hdr->csum_start,\n-\t\t\t\t&csum);\n-\t\t\tif (likely(csum != 0xffff))\n-\t\t\t\tcsum = ~csum;\n-\t\t\toff = hdr->csum_offset + hdr->csum_start;\n-\t\t\tif (rte_pktmbuf_data_len(m) >= off + 1)\n-\t\t\t\t*rte_pktmbuf_mtod_offset(m, uint16_t *,\n-\t\t\t\t\toff) = csum;\n-\t\t}\n-\t} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {\n-\t\tm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n-\t}\n-\n-\treturn 0;\n-}\n-\n static inline uint16_t\n virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,\n \t\t\t\t   struct rte_mbuf **rx_pkts)\n@@ -445,114 +291,6 @@ virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,\n \treturn 0;\n }\n \n-static uint16_t\n-virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,\n-\t\t\t\t    struct rte_mbuf **rx_pkts)\n-{\n-\tuint16_t used_idx, id;\n-\tuint32_t len;\n-\tstruct virtqueue *vq = rxvq->vq;\n-\tstruct virtio_hw *hw = vq->hw;\n-\tuint32_t hdr_size = hw->vtnet_hdr_size;\n-\tstruct virtio_net_hdr *hdr;\n-\tstruct vring_packed_desc *desc;\n-\tstruct rte_mbuf *cookie;\n-\n-\tdesc = vq->vq_packed.ring.desc;\n-\tused_idx = vq->vq_used_cons_idx;\n-\tif (!desc_is_used(&desc[used_idx], vq))\n-\t\treturn -1;\n-\n-\tlen = desc[used_idx].len;\n-\tid = desc[used_idx].id;\n-\tcookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;\n-\tif (unlikely(cookie == NULL)) {\n-\t\tPMD_DRV_LOG(ERR, \"vring descriptor with no mbuf cookie at %u\",\n-\t\t\t\tvq->vq_used_cons_idx);\n-\t\treturn -1;\n-\t}\n-\trte_prefetch0(cookie);\n-\trte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));\n-\n-\tcookie->data_off = RTE_PKTMBUF_HEADROOM;\n-\tcookie->ol_flags = 0;\n-\tcookie->pkt_len = (uint32_t)(len - hdr_size);\n-\tcookie->data_len = (uint32_t)(len - hdr_size);\n-\n-\thdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +\n-\t\t\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size);\n-\tif (hw->has_rx_offload)\n-\t\tvirtio_vec_rx_offload(cookie, hdr);\n-\n-\t*rx_pkts = cookie;\n-\n-\trxvq->stats.bytes += cookie->pkt_len;\n-\n-\tvq->vq_free_cnt++;\n-\tvq->vq_used_cons_idx++;\n-\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n-\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n-\t\tvq->vq_packed.used_wrap_counter ^= 1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static inline void\n-virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,\n-\t\t\t      struct rte_mbuf **cookie,\n-\t\t\t      uint16_t num)\n-{\n-\tstruct virtqueue *vq = rxvq->vq;\n-\tstruct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;\n-\tuint16_t flags = vq->vq_packed.cached_flags;\n-\tstruct virtio_hw *hw = vq->hw;\n-\tstruct vq_desc_extra *dxp;\n-\tuint16_t idx, i;\n-\tuint16_t batch_num, total_num = 0;\n-\tuint16_t head_idx = vq->vq_avail_idx;\n-\tuint16_t head_flag = vq->vq_packed.cached_flags;\n-\tuint64_t addr;\n-\n-\tdo {\n-\t\tidx = vq->vq_avail_idx;\n-\n-\t\tbatch_num = PACKED_BATCH_SIZE;\n-\t\tif (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))\n-\t\t\tbatch_num = vq->vq_nentries - idx;\n-\t\tif (unlikely((total_num + batch_num) > num))\n-\t\t\tbatch_num = num - total_num;\n-\n-\t\tvirtio_for_each_try_unroll(i, 0, batch_num) {\n-\t\t\tdxp = &vq->vq_descx[idx + i];\n-\t\t\tdxp->cookie = (void *)cookie[total_num + i];\n-\n-\t\t\taddr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +\n-\t\t\t\tRTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;\n-\t\t\tstart_dp[idx + i].addr = addr;\n-\t\t\tstart_dp[idx + i].len = cookie[total_num + i]->buf_len\n-\t\t\t\t- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;\n-\t\t\tif (total_num || i) {\n-\t\t\t\tvirtqueue_store_flags_packed(&start_dp[idx + i],\n-\t\t\t\t\t\tflags, hw->weak_barriers);\n-\t\t\t}\n-\t\t}\n-\n-\t\tvq->vq_avail_idx += batch_num;\n-\t\tif (vq->vq_avail_idx >= vq->vq_nentries) {\n-\t\t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\t\tvq->vq_packed.cached_flags ^=\n-\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n-\t\t\tflags = vq->vq_packed.cached_flags;\n-\t\t}\n-\t\ttotal_num += batch_num;\n-\t} while (total_num < num);\n-\n-\tvirtqueue_store_flags_packed(&start_dp[head_idx], head_flag,\n-\t\t\t\thw->weak_barriers);\n-\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);\n-}\n-\n uint16_t\n virtio_recv_pkts_packed_vec(void *rx_queue,\n \t\t\t    struct rte_mbuf **rx_pkts,\n",
    "prefixes": [
        "RFC",
        "1/3"
    ]
}