get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/69321/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 69321,
    "url": "https://patches.dpdk.org/api/patches/69321/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200426021943.43158-6-yong.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200426021943.43158-6-yong.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200426021943.43158-6-yong.liu@intel.com",
    "date": "2020-04-26T02:19:39",
    "name": "[v10,5/9] net/virtio: reuse packed ring functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7424fef2a062a459205de970845624dbd694ef8c",
    "submitter": {
        "id": 17,
        "url": "https://patches.dpdk.org/api/people/17/?format=api",
        "name": "Marvin Liu",
        "email": "yong.liu@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200426021943.43158-6-yong.liu@intel.com/mbox/",
    "series": [
        {
            "id": 9638,
            "url": "https://patches.dpdk.org/api/series/9638/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=9638",
            "date": "2020-04-26T02:19:35",
            "name": "[v10,1/9] net/virtio: add Rx free threshold setting",
            "version": 10,
            "mbox": "https://patches.dpdk.org/series/9638/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/69321/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/69321/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E9C8BA00C5;\n\tSun, 26 Apr 2020 04:20:37 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 673011C0D1;\n\tSun, 26 Apr 2020 04:20:08 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n by dpdk.org (Postfix) with ESMTP id 60CC01C038\n for <dev@dpdk.org>; Sun, 26 Apr 2020 04:19:56 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 25 Apr 2020 19:19:55 -0700",
            "from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56])\n by orsmga005.jf.intel.com with ESMTP; 25 Apr 2020 19:19:54 -0700"
        ],
        "IronPort-SDR": [
            "\n 6StlhJy9WRR4RofVnaUAijcx6Vx06dDwNVVRf4DcnW/HeScDR7VBp7t9ra9GqFsMP+hu8wIYvc\n XDgrF4BbcuHQ==",
            "\n mdNhhdI7NmIGMF6Myb+EPe/XZQ4jgJv6lfpUGkRhXm+9viBayjbuwvt21p8J7+jphtTa+7eyWl\n qEMzmFsEUJPA=="
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.73,318,1583222400\"; d=\"scan'208\";a=\"431277937\"",
        "From": "Marvin Liu <yong.liu@intel.com>",
        "To": "maxime.coquelin@redhat.com, xiaolong.ye@intel.com, zhihong.wang@intel.com",
        "Cc": "dev@dpdk.org,\n\tMarvin Liu <yong.liu@intel.com>",
        "Date": "Sun, 26 Apr 2020 10:19:39 +0800",
        "Message-Id": "<20200426021943.43158-6-yong.liu@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200426021943.43158-1-yong.liu@intel.com>",
        "References": "<20200313174230.74661-1-yong.liu@intel.com>\n <20200426021943.43158-1-yong.liu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v10 5/9] net/virtio: reuse packed ring functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Move offload, xmit cleanup and packed xmit enqueue function to header\nfile. These functions will be reused by packed ring vectorized path.\n\nSigned-off-by: Marvin Liu <yong.liu@intel.com>",
    "diff": "diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex 84f4cf946..a549991aa 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -89,23 +89,6 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)\n \tdp->next = VQ_RING_DESC_CHAIN_END;\n }\n \n-static void\n-vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)\n-{\n-\tstruct vq_desc_extra *dxp;\n-\n-\tdxp = &vq->vq_descx[id];\n-\tvq->vq_free_cnt += dxp->ndescs;\n-\n-\tif (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)\n-\t\tvq->vq_desc_head_idx = id;\n-\telse\n-\t\tvq->vq_descx[vq->vq_desc_tail_idx].next = id;\n-\n-\tvq->vq_desc_tail_idx = id;\n-\tdxp->next = VQ_RING_DESC_CHAIN_END;\n-}\n-\n void\n virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)\n {\n@@ -264,130 +247,6 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,\n \treturn i;\n }\n \n-#ifndef DEFAULT_TX_FREE_THRESH\n-#define DEFAULT_TX_FREE_THRESH 32\n-#endif\n-\n-static void\n-virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)\n-{\n-\tuint16_t used_idx, id, curr_id, free_cnt = 0;\n-\tuint16_t size = vq->vq_nentries;\n-\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc;\n-\tstruct vq_desc_extra *dxp;\n-\n-\tused_idx = vq->vq_used_cons_idx;\n-\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n-\t * and wait for used desc in virtqueue.\n-\t */\n-\twhile (num > 0 && desc_is_used(&desc[used_idx], vq)) {\n-\t\tid = desc[used_idx].id;\n-\t\tdo {\n-\t\t\tcurr_id = used_idx;\n-\t\t\tdxp = &vq->vq_descx[used_idx];\n-\t\t\tused_idx += dxp->ndescs;\n-\t\t\tfree_cnt += dxp->ndescs;\n-\t\t\tnum -= dxp->ndescs;\n-\t\t\tif (used_idx >= size) {\n-\t\t\t\tused_idx -= size;\n-\t\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n-\t\t\t}\n-\t\t\tif (dxp->cookie != NULL) {\n-\t\t\t\trte_pktmbuf_free(dxp->cookie);\n-\t\t\t\tdxp->cookie = NULL;\n-\t\t\t}\n-\t\t} while (curr_id != id);\n-\t}\n-\tvq->vq_used_cons_idx = used_idx;\n-\tvq->vq_free_cnt += free_cnt;\n-}\n-\n-static void\n-virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)\n-{\n-\tuint16_t used_idx, id;\n-\tuint16_t size = vq->vq_nentries;\n-\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc;\n-\tstruct vq_desc_extra *dxp;\n-\n-\tused_idx = vq->vq_used_cons_idx;\n-\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n-\t * and wait for used desc in virtqueue.\n-\t */\n-\twhile (num-- && desc_is_used(&desc[used_idx], vq)) {\n-\t\tid = desc[used_idx].id;\n-\t\tdxp = &vq->vq_descx[id];\n-\t\tvq->vq_used_cons_idx += dxp->ndescs;\n-\t\tif (vq->vq_used_cons_idx >= size) {\n-\t\t\tvq->vq_used_cons_idx -= size;\n-\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n-\t\t}\n-\t\tvq_ring_free_id_packed(vq, id);\n-\t\tif (dxp->cookie != NULL) {\n-\t\t\trte_pktmbuf_free(dxp->cookie);\n-\t\t\tdxp->cookie = NULL;\n-\t\t}\n-\t\tused_idx = vq->vq_used_cons_idx;\n-\t}\n-}\n-\n-/* Cleanup from completed transmits. */\n-static inline void\n-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)\n-{\n-\tif (in_order)\n-\t\tvirtio_xmit_cleanup_inorder_packed(vq, num);\n-\telse\n-\t\tvirtio_xmit_cleanup_normal_packed(vq, num);\n-}\n-\n-static void\n-virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)\n-{\n-\tuint16_t i, used_idx, desc_idx;\n-\tfor (i = 0; i < num; i++) {\n-\t\tstruct vring_used_elem *uep;\n-\t\tstruct vq_desc_extra *dxp;\n-\n-\t\tused_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n-\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n-\n-\t\tdesc_idx = (uint16_t) uep->id;\n-\t\tdxp = &vq->vq_descx[desc_idx];\n-\t\tvq->vq_used_cons_idx++;\n-\t\tvq_ring_free_chain(vq, desc_idx);\n-\n-\t\tif (dxp->cookie != NULL) {\n-\t\t\trte_pktmbuf_free(dxp->cookie);\n-\t\t\tdxp->cookie = NULL;\n-\t\t}\n-\t}\n-}\n-\n-/* Cleanup from completed inorder transmits. */\n-static __rte_always_inline void\n-virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)\n-{\n-\tuint16_t i, idx = vq->vq_used_cons_idx;\n-\tint16_t free_cnt = 0;\n-\tstruct vq_desc_extra *dxp = NULL;\n-\n-\tif (unlikely(num == 0))\n-\t\treturn;\n-\n-\tfor (i = 0; i < num; i++) {\n-\t\tdxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];\n-\t\tfree_cnt += dxp->ndescs;\n-\t\tif (dxp->cookie != NULL) {\n-\t\t\trte_pktmbuf_free(dxp->cookie);\n-\t\t\tdxp->cookie = NULL;\n-\t\t}\n-\t}\n-\n-\tvq->vq_free_cnt += free_cnt;\n-\tvq->vq_used_cons_idx = idx;\n-}\n-\n static inline int\n virtqueue_enqueue_refill_inorder(struct virtqueue *vq,\n \t\t\tstruct rte_mbuf **cookies,\n@@ -562,68 +421,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)\n }\n \n \n-/* avoid write operation when necessary, to lessen cache issues */\n-#define ASSIGN_UNLESS_EQUAL(var, val) do {\t\\\n-\tif ((var) != (val))\t\t\t\\\n-\t\t(var) = (val);\t\t\t\\\n-} while (0)\n-\n-#define virtqueue_clear_net_hdr(_hdr) do {\t\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);\t\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);\t\\\n-\tASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);\t\\\n-} while (0)\n-\n-static inline void\n-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,\n-\t\t\tstruct rte_mbuf *cookie,\n-\t\t\tbool offload)\n-{\n-\tif (offload) {\n-\t\tif (cookie->ol_flags & PKT_TX_TCP_SEG)\n-\t\t\tcookie->ol_flags |= PKT_TX_TCP_CKSUM;\n-\n-\t\tswitch (cookie->ol_flags & PKT_TX_L4_MASK) {\n-\t\tcase PKT_TX_UDP_CKSUM:\n-\t\t\thdr->csum_start = cookie->l2_len + cookie->l3_len;\n-\t\t\thdr->csum_offset = offsetof(struct rte_udp_hdr,\n-\t\t\t\tdgram_cksum);\n-\t\t\thdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;\n-\t\t\tbreak;\n-\n-\t\tcase PKT_TX_TCP_CKSUM:\n-\t\t\thdr->csum_start = cookie->l2_len + cookie->l3_len;\n-\t\t\thdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);\n-\t\t\thdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;\n-\t\t\tbreak;\n-\n-\t\tdefault:\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->flags, 0);\n-\t\t\tbreak;\n-\t\t}\n \n-\t\t/* TCP Segmentation Offload */\n-\t\tif (cookie->ol_flags & PKT_TX_TCP_SEG) {\n-\t\t\thdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?\n-\t\t\t\tVIRTIO_NET_HDR_GSO_TCPV6 :\n-\t\t\t\tVIRTIO_NET_HDR_GSO_TCPV4;\n-\t\t\thdr->gso_size = cookie->tso_segsz;\n-\t\t\thdr->hdr_len =\n-\t\t\t\tcookie->l2_len +\n-\t\t\t\tcookie->l3_len +\n-\t\t\t\tcookie->l4_len;\n-\t\t} else {\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);\n-\t\t\tASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);\n-\t\t}\n-\t}\n-}\n \n static inline void\n virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,\n@@ -725,102 +523,6 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,\n \tvirtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);\n }\n \n-static inline void\n-virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n-\t\t\t      uint16_t needed, int can_push, int in_order)\n-{\n-\tstruct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;\n-\tstruct vq_desc_extra *dxp;\n-\tstruct virtqueue *vq = txvq->vq;\n-\tstruct vring_packed_desc *start_dp, *head_dp;\n-\tuint16_t idx, id, head_idx, head_flags;\n-\tint16_t head_size = vq->hw->vtnet_hdr_size;\n-\tstruct virtio_net_hdr *hdr;\n-\tuint16_t prev;\n-\tbool prepend_header = false;\n-\n-\tid = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;\n-\n-\tdxp = &vq->vq_descx[id];\n-\tdxp->ndescs = needed;\n-\tdxp->cookie = cookie;\n-\n-\thead_idx = vq->vq_avail_idx;\n-\tidx = head_idx;\n-\tprev = head_idx;\n-\tstart_dp = vq->vq_packed.ring.desc;\n-\n-\thead_dp = &vq->vq_packed.ring.desc[idx];\n-\thead_flags = cookie->next ? VRING_DESC_F_NEXT : 0;\n-\thead_flags |= vq->vq_packed.cached_flags;\n-\n-\tif (can_push) {\n-\t\t/* prepend cannot fail, checked by caller */\n-\t\thdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,\n-\t\t\t\t\t      -head_size);\n-\t\tprepend_header = true;\n-\n-\t\t/* if offload disabled, it is not zeroed below, do it now */\n-\t\tif (!vq->hw->has_tx_offload)\n-\t\t\tvirtqueue_clear_net_hdr(hdr);\n-\t} else {\n-\t\t/* setup first tx ring slot to point to header\n-\t\t * stored in reserved region.\n-\t\t */\n-\t\tstart_dp[idx].addr  = txvq->virtio_net_hdr_mem +\n-\t\t\tRTE_PTR_DIFF(&txr[idx].tx_hdr, txr);\n-\t\tstart_dp[idx].len   = vq->hw->vtnet_hdr_size;\n-\t\thdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;\n-\t\tidx++;\n-\t\tif (idx >= vq->vq_nentries) {\n-\t\t\tidx -= vq->vq_nentries;\n-\t\t\tvq->vq_packed.cached_flags ^=\n-\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n-\t\t}\n-\t}\n-\n-\tvirtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);\n-\n-\tdo {\n-\t\tuint16_t flags;\n-\n-\t\tstart_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);\n-\t\tstart_dp[idx].len  = cookie->data_len;\n-\t\tif (prepend_header) {\n-\t\t\tstart_dp[idx].addr -= head_size;\n-\t\t\tstart_dp[idx].len += head_size;\n-\t\t\tprepend_header = false;\n-\t\t}\n-\n-\t\tif (likely(idx != head_idx)) {\n-\t\t\tflags = cookie->next ? VRING_DESC_F_NEXT : 0;\n-\t\t\tflags |= vq->vq_packed.cached_flags;\n-\t\t\tstart_dp[idx].flags = flags;\n-\t\t}\n-\t\tprev = idx;\n-\t\tidx++;\n-\t\tif (idx >= vq->vq_nentries) {\n-\t\t\tidx -= vq->vq_nentries;\n-\t\t\tvq->vq_packed.cached_flags ^=\n-\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n-\t\t}\n-\t} while ((cookie = cookie->next) != NULL);\n-\n-\tstart_dp[prev].id = id;\n-\n-\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);\n-\tvq->vq_avail_idx = idx;\n-\n-\tif (!in_order) {\n-\t\tvq->vq_desc_head_idx = dxp->next;\n-\t\tif (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n-\t\t\tvq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;\n-\t}\n-\n-\tvirtqueue_store_flags_packed(head_dp, head_flags,\n-\t\t\t\t     vq->hw->weak_barriers);\n-}\n-\n static inline void\n virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t\t\tuint16_t needed, int use_indirect, int can_push,\n@@ -1246,7 +948,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)\n \treturn 0;\n }\n \n-#define VIRTIO_MBUF_BURST_SZ 64\n #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))\n uint16_t\n virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 6301c56b2..ca1c10499 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -10,6 +10,7 @@\n #include <rte_atomic.h>\n #include <rte_memory.h>\n #include <rte_mempool.h>\n+#include <rte_net.h>\n \n #include \"virtio_pci.h\"\n #include \"virtio_ring.h\"\n@@ -18,8 +19,10 @@\n \n struct rte_mbuf;\n \n+#define DEFAULT_TX_FREE_THRESH 32\n #define DEFAULT_RX_FREE_THRESH 32\n \n+#define VIRTIO_MBUF_BURST_SZ 64\n /*\n  * Per virtio_ring.h in Linux.\n  *     For virtio_pci on SMP, we don't need to order with respect to MMIO\n@@ -560,4 +563,303 @@ virtqueue_notify(struct virtqueue *vq)\n #define VIRTQUEUE_DUMP(vq) do { } while (0)\n #endif\n \n+/* avoid write operation when necessary, to lessen cache issues */\n+#define ASSIGN_UNLESS_EQUAL(var, val) do {\t\\\n+\ttypeof(var) var_ = (var);\t\t\\\n+\ttypeof(val) val_ = (val);\t\t\\\n+\tif ((var_) != (val_))\t\t\t\\\n+\t\t(var_) = (val_);\t\t\\\n+} while (0)\n+\n+#define virtqueue_clear_net_hdr(hdr) do {\t\t\\\n+\ttypeof(hdr) hdr_ = (hdr);\t\t\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);\t\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);\t\\\n+\tASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);\t\\\n+} while (0)\n+\n+static inline void\n+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,\n+\t\t\tstruct rte_mbuf *cookie,\n+\t\t\tbool offload)\n+{\n+\tif (offload) {\n+\t\tif (cookie->ol_flags & PKT_TX_TCP_SEG)\n+\t\t\tcookie->ol_flags |= PKT_TX_TCP_CKSUM;\n+\n+\t\tswitch (cookie->ol_flags & PKT_TX_L4_MASK) {\n+\t\tcase PKT_TX_UDP_CKSUM:\n+\t\t\thdr->csum_start = cookie->l2_len + cookie->l3_len;\n+\t\t\thdr->csum_offset = offsetof(struct rte_udp_hdr,\n+\t\t\t\tdgram_cksum);\n+\t\t\thdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;\n+\t\t\tbreak;\n+\n+\t\tcase PKT_TX_TCP_CKSUM:\n+\t\t\thdr->csum_start = cookie->l2_len + cookie->l3_len;\n+\t\t\thdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);\n+\t\t\thdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->flags, 0);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* TCP Segmentation Offload */\n+\t\tif (cookie->ol_flags & PKT_TX_TCP_SEG) {\n+\t\t\thdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?\n+\t\t\t\tVIRTIO_NET_HDR_GSO_TCPV6 :\n+\t\t\t\tVIRTIO_NET_HDR_GSO_TCPV4;\n+\t\t\thdr->gso_size = cookie->tso_segsz;\n+\t\t\thdr->hdr_len =\n+\t\t\t\tcookie->l2_len +\n+\t\t\t\tcookie->l3_len +\n+\t\t\t\tcookie->l4_len;\n+\t\t} else {\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);\n+\t\t}\n+\t}\n+}\n+\n+static inline void\n+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n+\t\t\t      uint16_t needed, int can_push, int in_order)\n+{\n+\tstruct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;\n+\tstruct vq_desc_extra *dxp;\n+\tstruct virtqueue *vq = txvq->vq;\n+\tstruct vring_packed_desc *start_dp, *head_dp;\n+\tuint16_t idx, id, head_idx, head_flags;\n+\tint16_t head_size = vq->hw->vtnet_hdr_size;\n+\tstruct virtio_net_hdr *hdr;\n+\tuint16_t prev;\n+\tbool prepend_header = false;\n+\n+\tid = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;\n+\n+\tdxp = &vq->vq_descx[id];\n+\tdxp->ndescs = needed;\n+\tdxp->cookie = cookie;\n+\n+\thead_idx = vq->vq_avail_idx;\n+\tidx = head_idx;\n+\tprev = head_idx;\n+\tstart_dp = vq->vq_packed.ring.desc;\n+\n+\thead_dp = &vq->vq_packed.ring.desc[idx];\n+\thead_flags = cookie->next ? VRING_DESC_F_NEXT : 0;\n+\thead_flags |= vq->vq_packed.cached_flags;\n+\n+\tif (can_push) {\n+\t\t/* prepend cannot fail, checked by caller */\n+\t\thdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,\n+\t\t\t\t\t      -head_size);\n+\t\tprepend_header = true;\n+\n+\t\t/* if offload disabled, it is not zeroed below, do it now */\n+\t\tif (!vq->hw->has_tx_offload)\n+\t\t\tvirtqueue_clear_net_hdr(hdr);\n+\t} else {\n+\t\t/* setup first tx ring slot to point to header\n+\t\t * stored in reserved region.\n+\t\t */\n+\t\tstart_dp[idx].addr  = txvq->virtio_net_hdr_mem +\n+\t\t\tRTE_PTR_DIFF(&txr[idx].tx_hdr, txr);\n+\t\tstart_dp[idx].len   = vq->hw->vtnet_hdr_size;\n+\t\thdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;\n+\t\tidx++;\n+\t\tif (idx >= vq->vq_nentries) {\n+\t\t\tidx -= vq->vq_nentries;\n+\t\t\tvq->vq_packed.cached_flags ^=\n+\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n+\t\t}\n+\t}\n+\n+\tvirtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);\n+\n+\tdo {\n+\t\tuint16_t flags;\n+\n+\t\tstart_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);\n+\t\tstart_dp[idx].len  = cookie->data_len;\n+\t\tif (prepend_header) {\n+\t\t\tstart_dp[idx].addr -= head_size;\n+\t\t\tstart_dp[idx].len += head_size;\n+\t\t\tprepend_header = false;\n+\t\t}\n+\n+\t\tif (likely(idx != head_idx)) {\n+\t\t\tflags = cookie->next ? VRING_DESC_F_NEXT : 0;\n+\t\t\tflags |= vq->vq_packed.cached_flags;\n+\t\t\tstart_dp[idx].flags = flags;\n+\t\t}\n+\t\tprev = idx;\n+\t\tidx++;\n+\t\tif (idx >= vq->vq_nentries) {\n+\t\t\tidx -= vq->vq_nentries;\n+\t\t\tvq->vq_packed.cached_flags ^=\n+\t\t\t\tVRING_PACKED_DESC_F_AVAIL_USED;\n+\t\t}\n+\t} while ((cookie = cookie->next) != NULL);\n+\n+\tstart_dp[prev].id = id;\n+\n+\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);\n+\tvq->vq_avail_idx = idx;\n+\n+\tif (!in_order) {\n+\t\tvq->vq_desc_head_idx = dxp->next;\n+\t\tif (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n+\t\t\tvq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;\n+\t}\n+\n+\tvirtqueue_store_flags_packed(head_dp, head_flags,\n+\t\t\t\t     vq->hw->weak_barriers);\n+}\n+\n+static void\n+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)\n+{\n+\tstruct vq_desc_extra *dxp;\n+\n+\tdxp = &vq->vq_descx[id];\n+\tvq->vq_free_cnt += dxp->ndescs;\n+\n+\tif (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)\n+\t\tvq->vq_desc_head_idx = id;\n+\telse\n+\t\tvq->vq_descx[vq->vq_desc_tail_idx].next = id;\n+\n+\tvq->vq_desc_tail_idx = id;\n+\tdxp->next = VQ_RING_DESC_CHAIN_END;\n+}\n+\n+static void\n+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)\n+{\n+\tuint16_t used_idx, id, curr_id, free_cnt = 0;\n+\tuint16_t size = vq->vq_nentries;\n+\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc;\n+\tstruct vq_desc_extra *dxp;\n+\n+\tused_idx = vq->vq_used_cons_idx;\n+\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n+\t * and wait for used desc in virtqueue.\n+\t */\n+\twhile (num > 0 && desc_is_used(&desc[used_idx], vq)) {\n+\t\tid = desc[used_idx].id;\n+\t\tdo {\n+\t\t\tcurr_id = used_idx;\n+\t\t\tdxp = &vq->vq_descx[used_idx];\n+\t\t\tused_idx += dxp->ndescs;\n+\t\t\tfree_cnt += dxp->ndescs;\n+\t\t\tnum -= dxp->ndescs;\n+\t\t\tif (used_idx >= size) {\n+\t\t\t\tused_idx -= size;\n+\t\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n+\t\t\t}\n+\t\t\tif (dxp->cookie != NULL) {\n+\t\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\t\tdxp->cookie = NULL;\n+\t\t\t}\n+\t\t} while (curr_id != id);\n+\t}\n+\tvq->vq_used_cons_idx = used_idx;\n+\tvq->vq_free_cnt += free_cnt;\n+}\n+\n+static void\n+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)\n+{\n+\tuint16_t used_idx, id;\n+\tuint16_t size = vq->vq_nentries;\n+\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc;\n+\tstruct vq_desc_extra *dxp;\n+\n+\tused_idx = vq->vq_used_cons_idx;\n+\t/* desc_is_used has a load-acquire or rte_cio_rmb inside\n+\t * and wait for used desc in virtqueue.\n+\t */\n+\twhile (num-- && desc_is_used(&desc[used_idx], vq)) {\n+\t\tid = desc[used_idx].id;\n+\t\tdxp = &vq->vq_descx[id];\n+\t\tvq->vq_used_cons_idx += dxp->ndescs;\n+\t\tif (vq->vq_used_cons_idx >= size) {\n+\t\t\tvq->vq_used_cons_idx -= size;\n+\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n+\t\t}\n+\t\tvq_ring_free_id_packed(vq, id);\n+\t\tif (dxp->cookie != NULL) {\n+\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\tdxp->cookie = NULL;\n+\t\t}\n+\t\tused_idx = vq->vq_used_cons_idx;\n+\t}\n+}\n+\n+/* Cleanup from completed transmits. */\n+static inline void\n+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)\n+{\n+\tif (in_order)\n+\t\tvirtio_xmit_cleanup_inorder_packed(vq, num);\n+\telse\n+\t\tvirtio_xmit_cleanup_normal_packed(vq, num);\n+}\n+\n+static inline void\n+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)\n+{\n+\tuint16_t i, used_idx, desc_idx;\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct vring_used_elem *uep;\n+\t\tstruct vq_desc_extra *dxp;\n+\n+\t\tused_idx = (uint16_t)(vq->vq_used_cons_idx &\n+\t\t\t\t(vq->vq_nentries - 1));\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n+\n+\t\tdesc_idx = (uint16_t)uep->id;\n+\t\tdxp = &vq->vq_descx[desc_idx];\n+\t\tvq->vq_used_cons_idx++;\n+\t\tvq_ring_free_chain(vq, desc_idx);\n+\n+\t\tif (dxp->cookie != NULL) {\n+\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\tdxp->cookie = NULL;\n+\t\t}\n+\t}\n+}\n+\n+/* Cleanup from completed inorder transmits. */\n+static __rte_always_inline void\n+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)\n+{\n+\tuint16_t i, idx = vq->vq_used_cons_idx;\n+\tint16_t free_cnt = 0;\n+\tstruct vq_desc_extra *dxp = NULL;\n+\n+\tif (unlikely(num == 0))\n+\t\treturn;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tdxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];\n+\t\tfree_cnt += dxp->ndescs;\n+\t\tif (dxp->cookie != NULL) {\n+\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\tdxp->cookie = NULL;\n+\t\t}\n+\t}\n+\n+\tvq->vq_free_cnt += free_cnt;\n+\tvq->vq_used_cons_idx = idx;\n+}\n #endif /* _VIRTQUEUE_H_ */\n",
    "prefixes": [
        "v10",
        "5/9"
    ]
}