get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/85769/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 85769,
    "url": "http://patches.dpdk.org/api/patches/85769/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201228071610.105028-3-Cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201228071610.105028-3-Cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201228071610.105028-3-Cheng1.jiang@intel.com",
    "date": "2020-12-28T07:16:10",
    "name": "[v5,2/2] examples/vhost: refactor vhost data path",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8c4c594b4ba6400874d194d23b5e1b988f049833",
    "submitter": {
        "id": 1530,
        "url": "http://patches.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201228071610.105028-3-Cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 14479,
            "url": "http://patches.dpdk.org/api/series/14479/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=14479",
            "date": "2020-12-28T07:16:08",
            "name": "examples/vhost: sample code refactor",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/14479/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/85769/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/85769/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 16DE8A09FF;\n\tMon, 28 Dec 2020 08:28:46 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 05601CA16;\n\tMon, 28 Dec 2020 08:28:21 +0100 (CET)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n by dpdk.org (Postfix) with ESMTP id 4B6942C2D\n for <dev@dpdk.org>; Mon, 28 Dec 2020 08:28:19 +0100 (CET)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 27 Dec 2020 23:28:17 -0800",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.119.112])\n by orsmga006.jf.intel.com with ESMTP; 27 Dec 2020 23:28:10 -0800"
        ],
        "IronPort-SDR": [
            "\n YfuoPQKQeGGep899j3oRgNpeVLGKRJzBEqanMmTsxa3s6en1gz5DbPmYrwf7sNP0yTlZ+r2m1C\n jWfXS5bixKXA==",
            "\n 5OngMtxBgn3Wck0qV2J4d3AgFTAo816iJza9K9iYxDieWBiyxWiWcCi8Z1lyjHs1dBSlk6xxCE\n CQmnOHJbhHAQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9847\"; a=\"194832409\"",
            "E=Sophos;i=\"5.78,454,1599548400\"; d=\"scan'208\";a=\"194832409\"",
            "E=Sophos;i=\"5.78,454,1599548400\"; d=\"scan'208\";a=\"347502468\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <Cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "dev@dpdk.org, Jiayu.Hu@intel.com, YvonneX.Yang@intel.com,\n Cheng Jiang <Cheng1.jiang@intel.com>",
        "Date": "Mon, 28 Dec 2020 07:16:10 +0000",
        "Message-Id": "<20201228071610.105028-3-Cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201228071610.105028-1-Cheng1.jiang@intel.com>",
        "References": "<20201218113327.70528-1-Cheng1.jiang@intel.com>\n <20201228071610.105028-1-Cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v5 2/2] examples/vhost: refactor vhost data path",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Change the vm2vm data path to batch enqueue for better performance.\nSupport latest async vhost API, refactor vhost async data path,\nreplace rte_atomicNN_xxx to atomic_XXX and clean some codes.\n\nSigned-off-by: Cheng Jiang <Cheng1.jiang@intel.com>\n---\n examples/vhost/main.c | 214 ++++++++++++++++++++++++++++++++----------\n examples/vhost/main.h |   7 +-\n 2 files changed, 166 insertions(+), 55 deletions(-)",
    "diff": "diff --git a/examples/vhost/main.c b/examples/vhost/main.c\nindex 8d8c3038b..45976c93c 100644\n--- a/examples/vhost/main.c\n+++ b/examples/vhost/main.c\n@@ -179,9 +179,22 @@ struct mbuf_table {\n \tstruct rte_mbuf *m_table[MAX_PKT_BURST];\n };\n \n+struct vhost_bufftable {\n+\tuint32_t len;\n+\tuint64_t pre_tsc;\n+\tstruct rte_mbuf *m_table[MAX_PKT_BURST];\n+};\n+\n /* TX queue for each data core. */\n struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];\n \n+/*\n+ * Vhost TX buffer for each data core.\n+ * Every data core maintains a TX buffer for every vhost device,\n+ * which is used for batch pkts enqueue for higher performance.\n+ */\n+struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];\n+\n #define MBUF_TABLE_DRAIN_TSC\t((rte_get_tsc_hz() + US_PER_S - 1) \\\n \t\t\t\t / US_PER_S * BURST_TX_DRAIN_US)\n #define VLAN_HLEN       4\n@@ -804,39 +817,112 @@ unlink_vmdq(struct vhost_dev *vdev)\n \t}\n }\n \n+static inline void\n+free_pkts(struct rte_mbuf **pkts, uint16_t n)\n+{\n+\twhile (n--)\n+\t\trte_pktmbuf_free(pkts[n]);\n+}\n+\n static __rte_always_inline void\n-virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,\n+complete_async_pkts(struct vhost_dev *vdev)\n+{\n+\tstruct rte_mbuf *p_cpl[MAX_PKT_BURST];\n+\tuint16_t complete_count;\n+\n+\tcomplete_count = rte_vhost_poll_enqueue_completed(vdev->vid,\n+\t\t\t\t\tVIRTIO_RXQ, p_cpl, MAX_PKT_BURST);\n+\tif (complete_count) {\n+\t\tatomic_fetch_sub(&vdev->nr_async_pkts, complete_count);\n+\t\tfree_pkts(p_cpl, complete_count);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,\n \t    struct rte_mbuf *m)\n {\n \tuint16_t ret;\n-\tstruct rte_mbuf *m_cpl[1];\n \n \tif (builtin_net_driver) {\n \t\tret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);\n-\t} else if (async_vhost_driver) {\n-\t\tret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,\n-\t\t\t\t\t\t&m, 1);\n-\n-\t\tif (likely(ret))\n-\t\t\tdst_vdev->nr_async_pkts++;\n-\n-\t\twhile (likely(dst_vdev->nr_async_pkts)) {\n-\t\t\tif (rte_vhost_poll_enqueue_completed(dst_vdev->vid,\n-\t\t\t\t\tVIRTIO_RXQ, m_cpl, 1))\n-\t\t\t\tdst_vdev->nr_async_pkts--;\n-\t\t}\n \t} else {\n \t\tret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);\n \t}\n \n \tif (enable_stats) {\n-\t\trte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);\n-\t\trte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);\n+\t\tatomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1);\n+\t\tatomic_fetch_add(&dst_vdev->stats.rx_atomic, ret);\n \t\tsrc_vdev->stats.tx_total++;\n \t\tsrc_vdev->stats.tx += ret;\n \t}\n }\n \n+static __rte_always_inline void\n+drain_vhost(struct vhost_dev *vdev)\n+{\n+\tuint16_t ret;\n+\tuint64_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;\n+\tuint16_t nr_xmit = vhost_txbuff[buff_idx]->len;\n+\tstruct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;\n+\n+\tif (builtin_net_driver) {\n+\t\tret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);\n+\t} else if (async_vhost_driver) {\n+\t\tuint32_t cpu_cpl_nr = 0;\n+\t\tuint16_t enqueue_fail = 0;\n+\t\tstruct rte_mbuf *m_cpu_cpl[nr_xmit];\n+\n+\t\tcomplete_async_pkts(vdev);\n+\t\tret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,\n+\t\t\t\t\tm, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);\n+\t\tatomic_fetch_add(&vdev->nr_async_pkts, ret - cpu_cpl_nr);\n+\n+\t\tif (cpu_cpl_nr)\n+\t\t\tfree_pkts(m_cpu_cpl, cpu_cpl_nr);\n+\n+\t\tenqueue_fail = nr_xmit - ret;\n+\t\tif (enqueue_fail)\n+\t\t\tfree_pkts(&m[ret], nr_xmit - ret);\n+\t} else {\n+\t\tret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,\n+\t\t\t\t\t\tm, nr_xmit);\n+\t}\n+\n+\tif (enable_stats) {\n+\t\tatomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit);\n+\t\tatomic_fetch_add(&vdev->stats.rx_atomic, ret);\n+\t}\n+\n+\tif (!async_vhost_driver)\n+\t\tfree_pkts(m, nr_xmit);\n+}\n+\n+static __rte_always_inline void\n+drain_vhost_table(void)\n+{\n+\tuint16_t lcore_id = rte_lcore_id();\n+\tstruct vhost_bufftable *vhost_txq;\n+\tstruct vhost_dev *vdev;\n+\tuint64_t cur_tsc;\n+\n+\tTAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {\n+\t\tvhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE\n+\t\t\t\t\t\t+ vdev->vid];\n+\n+\t\tcur_tsc = rte_rdtsc();\n+\t\tif (unlikely(cur_tsc - vhost_txq->pre_tsc\n+\t\t\t\t> MBUF_TABLE_DRAIN_TSC)) {\n+\t\t\tRTE_LOG_DP(DEBUG, VHOST_DATA,\n+\t\t\t\t\"Vhost TX queue drained after timeout with burst size %u\\n\",\n+\t\t\t\tvhost_txq->len);\n+\t\t\tdrain_vhost(vdev);\n+\t\t\tvhost_txq->len = 0;\n+\t\t\tvhost_txq->pre_tsc = cur_tsc;\n+\t\t}\n+\t}\n+}\n+\n /*\n  * Check if the packet destination MAC address is for a local device. If so then put\n  * the packet on that devices RX queue. If not then return.\n@@ -846,7 +932,8 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)\n {\n \tstruct rte_ether_hdr *pkt_hdr;\n \tstruct vhost_dev *dst_vdev;\n-\n+\tstruct vhost_bufftable *vhost_txq;\n+\tuint16_t lcore_id = rte_lcore_id();\n \tpkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);\n \n \tdst_vdev = find_vhost_dev(&pkt_hdr->d_addr);\n@@ -869,7 +956,19 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)\n \t\treturn 0;\n \t}\n \n-\tvirtio_xmit(dst_vdev, vdev, m);\n+\tvhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE + dst_vdev->vid];\n+\tvhost_txq->m_table[vhost_txq->len++] = m;\n+\n+\tif (enable_stats) {\n+\t\tvdev->stats.tx_total++;\n+\t\tvdev->stats.tx++;\n+\t}\n+\n+\tif (unlikely(vhost_txq->len == MAX_PKT_BURST)) {\n+\t\tdrain_vhost(dst_vdev);\n+\t\tvhost_txq->len = 0;\n+\t\tvhost_txq->pre_tsc = rte_rdtsc();\n+\t}\n \treturn 0;\n }\n \n@@ -940,13 +1039,6 @@ static void virtio_tx_offload(struct rte_mbuf *m)\n \ttcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);\n }\n \n-static inline void\n-free_pkts(struct rte_mbuf **pkts, uint16_t n)\n-{\n-\twhile (n--)\n-\t\trte_pktmbuf_free(pkts[n]);\n-}\n-\n static __rte_always_inline void\n do_drain_mbuf_table(struct mbuf_table *tx_q)\n {\n@@ -979,16 +1071,14 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)\n \n \t\tTAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {\n \t\t\tif (vdev2 != vdev)\n-\t\t\t\tvirtio_xmit(vdev2, vdev, m);\n+\t\t\t\tsync_virtio_xmit(vdev2, vdev, m);\n \t\t}\n \t\tgoto queue2nic;\n \t}\n \n \t/*check if destination is local VM*/\n-\tif ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {\n-\t\trte_pktmbuf_free(m);\n+\tif ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))\n \t\treturn;\n-\t}\n \n \tif (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {\n \t\tif (unlikely(find_local_dest(vdev, m, &offset,\n@@ -1073,19 +1163,6 @@ drain_mbuf_table(struct mbuf_table *tx_q)\n \t}\n }\n \n-static __rte_always_inline void\n-complete_async_pkts(struct vhost_dev *vdev, uint16_t qid)\n-{\n-\tstruct rte_mbuf *p_cpl[MAX_PKT_BURST];\n-\tuint16_t complete_count;\n-\n-\tcomplete_count = rte_vhost_poll_enqueue_completed(vdev->vid,\n-\t\t\t\t\t\tqid, p_cpl, MAX_PKT_BURST);\n-\tvdev->nr_async_pkts -= complete_count;\n-\tif (complete_count)\n-\t\tfree_pkts(p_cpl, complete_count);\n-}\n-\n static __rte_always_inline void\n drain_eth_rx(struct vhost_dev *vdev)\n {\n@@ -1095,9 +1172,6 @@ drain_eth_rx(struct vhost_dev *vdev)\n \trx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,\n \t\t\t\t    pkts, MAX_PKT_BURST);\n \n-\twhile (likely(vdev->nr_async_pkts))\n-\t\tcomplete_async_pkts(vdev, VIRTIO_RXQ);\n-\n \tif (!rx_count)\n \t\treturn;\n \n@@ -1123,17 +1197,31 @@ drain_eth_rx(struct vhost_dev *vdev)\n \t\tenqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,\n \t\t\t\t\t\tpkts, rx_count);\n \t} else if (async_vhost_driver) {\n+\t\tuint32_t cpu_cpl_nr = 0;\n+\t\tuint16_t enqueue_fail = 0;\n+\t\tstruct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];\n+\n+\t\tcomplete_async_pkts(vdev);\n \t\tenqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,\n-\t\t\t\t\tVIRTIO_RXQ, pkts, rx_count);\n-\t\tvdev->nr_async_pkts += enqueue_count;\n+\t\t\t\t\tVIRTIO_RXQ, pkts, rx_count,\n+\t\t\t\t\tm_cpu_cpl, &cpu_cpl_nr);\n+\t\tatomic_fetch_add(&vdev->nr_async_pkts,\n+\t\t\t\t\tenqueue_count - cpu_cpl_nr);\n+\t\tif (cpu_cpl_nr)\n+\t\t\tfree_pkts(m_cpu_cpl, cpu_cpl_nr);\n+\n+\t\tenqueue_fail = rx_count - enqueue_count;\n+\t\tif (enqueue_fail)\n+\t\t\tfree_pkts(&pkts[enqueue_count], enqueue_fail);\n+\n \t} else {\n \t\tenqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,\n \t\t\t\t\t\tpkts, rx_count);\n \t}\n \n \tif (enable_stats) {\n-\t\trte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);\n-\t\trte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);\n+\t\tatomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count);\n+\t\tatomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count);\n \t}\n \n \tif (!async_vhost_driver)\n@@ -1202,7 +1290,7 @@ switch_worker(void *arg __rte_unused)\n \n \twhile(1) {\n \t\tdrain_mbuf_table(tx_q);\n-\n+\t\tdrain_vhost_table();\n \t\t/*\n \t\t * Inform the configuration core that we have exited the\n \t\t * linked list and that no devices are in use if requested.\n@@ -1243,6 +1331,7 @@ destroy_device(int vid)\n {\n \tstruct vhost_dev *vdev = NULL;\n \tint lcore;\n+\tuint16_t i;\n \n \tTAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {\n \t\tif (vdev->vid == vid)\n@@ -1256,6 +1345,9 @@ destroy_device(int vid)\n \t\trte_pause();\n \t}\n \n+\tfor (i = 0; i < RTE_MAX_LCORE; i++)\n+\t\trte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);\n+\n \tif (builtin_net_driver)\n \t\tvs_vhost_net_remove(vdev);\n \n@@ -1298,6 +1390,7 @@ static int\n new_device(int vid)\n {\n \tint lcore, core_add = 0;\n+\tuint16_t i;\n \tuint32_t device_num_min = num_devices;\n \tstruct vhost_dev *vdev;\n \tvdev = rte_zmalloc(\"vhost device\", sizeof(*vdev), RTE_CACHE_LINE_SIZE);\n@@ -1309,6 +1402,19 @@ new_device(int vid)\n \t}\n \tvdev->vid = vid;\n \n+\tfor (i = 0; i < RTE_MAX_LCORE; i++) {\n+\t\tvhost_txbuff[i * MAX_VHOST_DEVICE + vid]\n+\t\t\t= rte_zmalloc(\"vhost bufftable\",\n+\t\t\t\tsizeof(struct vhost_bufftable),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\n+\t\tif (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {\n+\t\t\tRTE_LOG(INFO, VHOST_DATA,\n+\t\t\t  \"(%d) couldn't allocate memory for vhost TX\\n\", vid);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n \tif (builtin_net_driver)\n \t\tvs_vhost_net_setup(vdev);\n \n@@ -1343,12 +1449,15 @@ new_device(int vid)\n \tif (async_vhost_driver) {\n \t\tstruct rte_vhost_async_features f;\n \t\tstruct rte_vhost_async_channel_ops channel_ops;\n+\n \t\tif (strncmp(dma_type, \"ioat\", 4) == 0) {\n \t\t\tchannel_ops.transfer_data = ioat_transfer_data_cb;\n \t\t\tchannel_ops.check_completed_copies =\n \t\t\t\tioat_check_completed_copies_cb;\n+\n \t\t\tf.async_inorder = 1;\n \t\t\tf.async_threshold = 256;\n+\n \t\t\treturn rte_vhost_async_channel_register(vid, VIRTIO_RXQ,\n \t\t\t\tf.intval, &channel_ops);\n \t\t}\n@@ -1392,8 +1501,8 @@ print_stats(__rte_unused void *arg)\n \t\t\ttx         = vdev->stats.tx;\n \t\t\ttx_dropped = tx_total - tx;\n \n-\t\t\trx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);\n-\t\t\trx         = rte_atomic64_read(&vdev->stats.rx_atomic);\n+\t\t\trx_total   = atomic_load(&vdev->stats.rx_total_atomic);\n+\t\t\trx         = atomic_load(&vdev->stats.rx_atomic);\n \t\t\trx_dropped = rx_total - rx;\n \n \t\t\tprintf(\"Statistics for device %d\\n\"\n@@ -1592,6 +1701,7 @@ main(int argc, char *argv[])\n \t/* Register vhost user driver to handle vhost messages. */\n \tfor (i = 0; i < nb_sockets; i++) {\n \t\tchar *file = socket_files + i * PATH_MAX;\n+\n \t\tif (async_vhost_driver)\n \t\t\tflags = flags | RTE_VHOST_USER_ASYNC_COPY;\n \ndiff --git a/examples/vhost/main.h b/examples/vhost/main.h\nindex 4317b6ae8..6aa798a3e 100644\n--- a/examples/vhost/main.h\n+++ b/examples/vhost/main.h\n@@ -8,6 +8,7 @@\n #include <sys/queue.h>\n \n #include <rte_ether.h>\n+#include <stdatomic.h>\n \n /* Macros for printing using RTE_LOG */\n #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1\n@@ -21,8 +22,8 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};\n struct device_statistics {\n \tuint64_t\ttx;\n \tuint64_t\ttx_total;\n-\trte_atomic64_t\trx_atomic;\n-\trte_atomic64_t\trx_total_atomic;\n+\tatomic_int_least64_t\trx_atomic;\n+\tatomic_int_least64_t\trx_total_atomic;\n };\n \n struct vhost_queue {\n@@ -51,7 +52,7 @@ struct vhost_dev {\n \tuint64_t features;\n \tsize_t hdr_len;\n \tuint16_t nr_vrings;\n-\tuint16_t nr_async_pkts;\n+\tatomic_int_least16_t nr_async_pkts;\n \tstruct rte_vhost_memory *mem;\n \tstruct device_statistics stats;\n \tTAILQ_ENTRY(vhost_dev) global_vdev_entry;\n",
    "prefixes": [
        "v5",
        "2/2"
    ]
}