get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/120969/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 120969,
    "url": "http://patches.dpdk.org/api/patches/120969/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221216073844.24448-3-cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221216073844.24448-3-cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221216073844.24448-3-cheng1.jiang@intel.com",
    "date": "2022-12-16T07:38:43",
    "name": "[RFC,2/3] vhost: add batch enqueue in async vhost packed ring",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5e9ee37c2aab0fd7473b7d61e3a0b8449cc1d118",
    "submitter": {
        "id": 1530,
        "url": "http://patches.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221216073844.24448-3-cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 26160,
            "url": "http://patches.dpdk.org/api/series/26160/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=26160",
            "date": "2022-12-16T07:38:41",
            "name": "Async vhost packed ring optimization",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/26160/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/120969/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/120969/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B7E69A0542;\n\tFri, 16 Dec 2022 09:25:03 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id AB46C42D0B;\n\tFri, 16 Dec 2022 09:25:03 +0100 (CET)",
            "from mga06.intel.com (mga06b.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 7E8B140685\n for <dev@dpdk.org>; Fri, 16 Dec 2022 09:25:01 +0100 (CET)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 16 Dec 2022 00:25:00 -0800",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.118.237])\n by orsmga001.jf.intel.com with ESMTP; 16 Dec 2022 00:24:57 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1671179101; x=1702715101;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=q81ewjW6FncRZY78Hc3PvEVRytBnhct6NiLnFS0AXVY=;\n b=Og2r13+NFNQ3XuYtmG+USNw5haQge4ntkNeyI1e2aZ0JObj7EUFnbeZe\n BMzc+SIS7tttKoA4NdqlUOCoETtCaDO1XNGRvcCpmeMz2N9nyHbVCZmA6\n 2ugIUdMP688PcNBvry7vpTgd2LQNdq44DPSrS1jlShQQKOVAJrX8Op7r8\n iSnheHAf1cIaiAz76v+8Ap/DqrDzt+u01GMP+RoGKDnQ+AwVR4qaUiU3n\n j359ioum2L5Cg1pZxe1fm3k19lSFDVJewSUZy4TJn4TysTuXUhe/4FW1b\n ledQ7QYxWWRal6MOCDrBNvU/ra0IQyxroghyFT36cdqxPRxFmrvAEIJ56 A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10562\"; a=\"381137613\"",
            "E=Sophos;i=\"5.96,249,1665471600\"; d=\"scan'208\";a=\"381137613\"",
            "E=McAfee;i=\"6500,9779,10562\"; a=\"682221164\"",
            "E=Sophos;i=\"5.96,249,1665471600\"; d=\"scan'208\";a=\"682221164\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,\n wenwux.ma@intel.com,\n yuanx.wang@intel.com, yvonnex.yang@intel.com, xingguang.he@intel.com,\n Cheng Jiang <cheng1.jiang@intel.com>",
        "Subject": "[RFC 2/3] vhost: add batch enqueue in async vhost packed ring",
        "Date": "Fri, 16 Dec 2022 07:38:43 +0000",
        "Message-Id": "<20221216073844.24448-3-cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.35.1",
        "In-Reply-To": "<20221216073844.24448-1-cheng1.jiang@intel.com>",
        "References": "<20221216073844.24448-1-cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add batch enqueue function in asynchronous vhost packed ring to\nimprove the performance. Chained mbufs are not supported, it will\nbe handled in single enqueue function.\n\nSigned-off-by: Cheng Jiang <cheng1.jiang@intel.com>\n---\n lib/vhost/virtio_net.c | 159 ++++++++++++++++++++++++++++++++++++++++-\n 1 file changed, 157 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex 22f97d4f77..b87405ba54 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -432,6 +432,24 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,\n \tvq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);\n }\n \n+static __rte_always_inline void\n+vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,\n+\t\t\t\t uint64_t *lens,\n+\t\t\t\t uint16_t *ids)\n+{\n+\tuint16_t i;\n+\tstruct vhost_async *async = vq->async;\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\tasync->buffers_packed[async->buffer_idx_packed].id  = ids[i];\n+\t\tasync->buffers_packed[async->buffer_idx_packed].len = lens[i];\n+\t\tasync->buffers_packed[async->buffer_idx_packed].count = 1;\n+\t\tasync->buffer_idx_packed++;\n+\t\tif (async->buffer_idx_packed >= vq->size)\n+\t\t\tasync->buffer_idx_packed -= vq->size;\n+\t}\n+}\n+\n static __rte_always_inline void\n vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,\n \t\t\t\t\t  uint16_t id)\n@@ -1451,6 +1469,58 @@ virtio_dev_rx_sync_batch_check(struct virtio_net *dev,\n \treturn 0;\n }\n \n+static __rte_always_inline int\n+virtio_dev_rx_async_batch_check(struct vhost_virtqueue *vq,\n+\t\t\t   struct rte_mbuf **pkts,\n+\t\t\t   uint64_t *desc_addrs,\n+\t\t\t   uint64_t *lens,\n+\t\t\t   int16_t dma_id,\n+\t\t\t   uint16_t vchan_id)\n+{\n+\tbool wrap_counter = vq->avail_wrap_counter;\n+\tstruct vring_packed_desc *descs = vq->desc_packed;\n+\tuint16_t avail_idx = vq->last_avail_idx;\n+\tuint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\tuint16_t i;\n+\n+\tif (unlikely(avail_idx & PACKED_BATCH_MASK))\n+\t\treturn -1;\n+\n+\tif (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))\n+\t\treturn -1;\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\tif (unlikely(pkts[i]->next != NULL))\n+\t\t\treturn -1;\n+\t\tif (unlikely(!desc_is_avail(&descs[avail_idx + i],\n+\t\t\t\t\t    wrap_counter)))\n+\t\t\treturn -1;\n+\t}\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n+\t\tlens[i] = descs[avail_idx + i].len;\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\tif (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))\n+\t\t\treturn -1;\n+\t}\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n+\t\tdesc_addrs[i] =  descs[avail_idx + i].addr;\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\tif (unlikely(!desc_addrs[i]))\n+\t\t\treturn -1;\n+\t\tif (unlikely(lens[i] != descs[avail_idx + i].len))\n+\t\t\treturn -1;\n+\t}\n+\n+\tif (rte_dma_burst_capacity(dma_id, vchan_id) < PACKED_BATCH_SIZE)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n static __rte_always_inline void\n virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,\n \t\t\t   struct vhost_virtqueue *vq,\n@@ -1850,6 +1920,78 @@ virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \treturn 0;\n }\n \n+static __rte_always_inline void\n+virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net *dev,\n+\t\t\t   struct vhost_virtqueue *vq,\n+\t\t\t   struct rte_mbuf **pkts,\n+\t\t\t   uint64_t *desc_addrs,\n+\t\t\t   uint64_t *lens)\n+{\n+\tuint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\tstruct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];\n+\tstruct vring_packed_desc *descs = vq->desc_packed;\n+\tstruct vhost_async *async = vq->async;\n+\tuint16_t avail_idx = vq->last_avail_idx;\n+\tuint32_t mbuf_offset = 0;\n+\tuint16_t ids[PACKED_BATCH_SIZE];\n+\tuint64_t mapped_len[PACKED_BATCH_SIZE];\n+\tvoid *host_iova[PACKED_BATCH_SIZE];\n+\tuintptr_t desc;\n+\tuint16_t i;\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\trte_prefetch0((void *)(uintptr_t)desc_addrs[i]);\n+\t\tdesc = vhost_iova_to_vva(dev, vq, desc_addrs[i], &lens[i], VHOST_ACCESS_RW);\n+\t\thdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc;\n+\t\tlens[i] = pkts[i]->pkt_len +\n+\t\t\tsizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\t}\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n+\t\tvirtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);\n+\n+\tvq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\thost_iova[i] = (void *)(uintptr_t)gpa_to_first_hpa(dev,\n+\t\t\tdesc_addrs[i] + buf_offset, lens[i], &mapped_len[i]);\n+\t}\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n+\t\tasync_iter_initialize(dev, async);\n+\t\tasync_iter_add_iovec(dev, async,\n+\t\t\t\t(void *)(uintptr_t)rte_pktmbuf_iova_offset(pkts[i], mbuf_offset),\n+\t\t\t\thost_iova[i],\n+\t\t\t\tmapped_len[i]);\n+\t\tasync->iter_idx++;\n+\t}\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n+\t\tvhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr, lens[i]);\n+\n+\tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n+\t\tids[i] = descs[avail_idx + i].id;\n+\n+\tvhost_async_shadow_enqueue_packed_batch(vq, lens, ids);\n+}\n+\n+static __rte_always_inline int\n+virtio_dev_rx_async_packed_batch(struct virtio_net *dev,\n+\t\t\t   struct vhost_virtqueue *vq,\n+\t\t\t   struct rte_mbuf **pkts,\n+\t\t\t   int16_t dma_id, uint16_t vchan_id)\n+{\n+\tuint64_t desc_addrs[PACKED_BATCH_SIZE];\n+\tuint64_t lens[PACKED_BATCH_SIZE];\n+\n+\tif (virtio_dev_rx_async_batch_check(vq, pkts, desc_addrs, lens, dma_id, vchan_id) == -1)\n+\t\treturn -1;\n+\n+\tvirtio_dev_rx_async_packed_batch_enqueue(dev, vq, pkts, desc_addrs, lens);\n+\n+\treturn 0;\n+}\n+\n static __rte_always_inline void\n dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,\n \t\t\tuint32_t nr_err, uint32_t *pkt_idx)\n@@ -1885,7 +2027,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue\n \tstruct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)\n {\n \tuint32_t pkt_idx = 0;\n-\tuint32_t remained = count;\n \tuint16_t n_xfer;\n \tuint16_t num_buffers;\n \tuint16_t num_descs;\n@@ -1894,10 +2035,25 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue\n \tstruct async_inflight_info *pkts_info = async->pkts_info;\n \tuint32_t pkt_err = 0;\n \tuint16_t slot_idx = 0;\n+\tuint16_t i;\n \n \tdo {\n \t\trte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);\n \n+\t\tif (count - pkt_idx >= PACKED_BATCH_SIZE) {\n+\t\t\tif (!virtio_dev_rx_async_packed_batch(dev, vq, &pkts[pkt_idx],\n+\t\t\t\t\tdma_id, vchan_id)) {\n+\t\t\t\tfor (i = 0; i < PACKED_BATCH_SIZE; i++) {\n+\t\t\t\t\tslot_idx = (async->pkts_idx + pkt_idx) % vq->size;\n+\t\t\t\t\tpkts_info[slot_idx].descs = 1;\n+\t\t\t\t\tpkts_info[slot_idx].nr_buffers = 1;\n+\t\t\t\t\tpkts_info[slot_idx].mbuf = pkts[pkt_idx];\n+\t\t\t\t\tpkt_idx++;\n+\t\t\t\t}\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t}\n+\n \t\tnum_buffers = 0;\n \t\tnum_descs = 0;\n \t\tif (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],\n@@ -1911,7 +2067,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue\n \t\tpkts_info[slot_idx].mbuf = pkts[pkt_idx];\n \n \t\tpkt_idx++;\n-\t\tremained--;\n \t\tvq_inc_last_avail_packed(vq, num_descs);\n \t} while (pkt_idx < count);\n \n",
    "prefixes": [
        "RFC",
        "2/3"
    ]
}