get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/60373/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 60373,
    "url": "https://patches.dpdk.org/api/patches/60373/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20191001221935.12140-1-fbl@sysclose.org/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191001221935.12140-1-fbl@sysclose.org>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191001221935.12140-1-fbl@sysclose.org",
    "date": "2019-10-01T22:19:35",
    "name": "vhost: add support to large linear mbufs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e38814b1be0060b051cc0a0b6d5fba334ca768ba",
    "submitter": {
        "id": 159,
        "url": "https://patches.dpdk.org/api/people/159/?format=api",
        "name": "Flavio Leitner",
        "email": "fbl@sysclose.org"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20191001221935.12140-1-fbl@sysclose.org/mbox/",
    "series": [
        {
            "id": 6659,
            "url": "https://patches.dpdk.org/api/series/6659/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6659",
            "date": "2019-10-01T22:19:35",
            "name": "vhost: add support to large linear mbufs",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/6659/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/60373/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/60373/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 921B61BDFD;\n\tWed,  2 Oct 2019 00:20:03 +0200 (CEST)",
            "from sysclose.org (smtp.sysclose.org [69.164.214.230])\n\tby dpdk.org (Postfix) with ESMTP id B11463256\n\tfor <dev@dpdk.org>; Wed,  2 Oct 2019 00:20:02 +0200 (CEST)",
            "by sysclose.org (Postfix, from userid 5001)\n\tid 5A2796630; Tue,  1 Oct 2019 22:20:22 +0000 (UTC)",
            "from localhost (unknown [177.183.215.210])\n\tby sysclose.org (Postfix) with ESMTPSA id E07A065B4;\n\tTue,  1 Oct 2019 22:20:20 +0000 (UTC)"
        ],
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 sysclose.org 5A2796630",
            "OpenDKIM Filter v2.11.0 sysclose.org E07A065B4"
        ],
        "DKIM-Signature": [
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=sysclose.org;\n\ts=201903; t=1569968422;\n\tbh=1C7ZvkiuS4fyRZkVddFkjhBlXf0uta/yUEuodym4qyQ=;\n\th=From:To:Cc:Subject:Date:From;\n\tb=JAjcJJcaZeN7tL2re6awAyeDaX3Ci7SCHLeDO9vQMNpxgOq6hwomA9lBLG9hPZPkC\n\tbEsKKxWc6dmqkfZUq9zCaVSAQKL42WDtRs801fcC4wCezCiRorJsztnMHDubTxhtG7\n\t7Y7dQzd6PqfVJ2+i8mKYHRvMinsH3mYOwVvg28lPl21F85zYnKkJf8rQV3XweCRLFC\n\tGRF24S/yMyK4tuMYiWZ0fFdxIQ2zTByAubYAi8tGifPaDH7nVGMVBQPb4Ry354wFt4\n\tky4w/m67Tp9f8PHsw2Xf40JH8MkQOPC+FeQhY3yZBjr34OsHPT7pXTwKZnk02L0mLF\n\t5H/YK3WWBdpSg==",
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=sysclose.org;\n\ts=201903; t=1569968421;\n\tbh=1C7ZvkiuS4fyRZkVddFkjhBlXf0uta/yUEuodym4qyQ=;\n\th=From:To:Cc:Subject:Date:From;\n\tb=ONh6ZjsZTY3HwpnCMDY5Obt3uNCB2+ETJwYpt3hQSj0MB1yQbjSM8AsmPiBK4OEd/\n\tnvFqklanMnkfDCtpusY262AGH7WDFVUNQZIWt19zRrOl5ELcyvKvnXN0ihbkjL8MF7\n\tPM5fCJjNMW/Rcvwa26+zJxpEGfzej3b/DzP5utMDiEpI9hdb2c/uggdBjhr15OAqmB\n\tEekJjllk5346sBDeyuxsV47RQ1g68bw59D6ZHAbhhUN9VibbiiPMji4Z+v3nhjohox\n\tw0+DKfUw1+8rvKY74OZDglVSJ1iBUoQoKVRpA57DNBYVUSORorae23Ks33/Myd12Wg\n\tW93YxeHeizfhw=="
        ],
        "X-Spam-Checker-Version": "SpamAssassin 3.4.0 (2014-02-07) on mail.sysclose.org",
        "X-Spam-Level": "",
        "X-Spam-Status": "No, score=-1.1 required=5.0 tests=ALL_TRUSTED,DKIM_SIGNED,\n\tDKIM_VALID,\n\tDKIM_VALID_AU autolearn=ham autolearn_force=no version=3.4.0",
        "From": "Flavio Leitner <fbl@sysclose.org>",
        "To": "dev@dpdk.org",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>,\n\tTiwei Bie <tiwei.bie@intel.com>, Zhihong Wang <zhihong.wang@intel.com>,\n\tObrembski MichalX <michalx.obrembski@intel.com>,\n\tStokes Ian <ian.stokes@intel.com>",
        "Date": "Tue,  1 Oct 2019 19:19:35 -0300",
        "Message-Id": "<20191001221935.12140-1-fbl@sysclose.org>",
        "X-Mailer": "git-send-email 2.20.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH] vhost: add support to large linear mbufs",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The rte_vhost_dequeue_burst supports two ways of dequeuing data. If\nthe data fits into a buffer, then all data is copied and a single\nlinear buffer is returned. Otherwise it allocates additional mbufs\nand chains them together to return a multiple segments mbuf.\n\nWhile that covers most use cases, it forces applications that need\nto work with larger data sizes to support multiple segments mbufs.\nThe non-linear characteristic brings complexity and performance\nimplications to the application.\n\nTo resolve the issue, change the API so that the application can\noptionally provide a second mempool containing larger mbufs. If that\nis not provided (NULL), the behavior remains as before the change.\nOtherwise, the data size is checked and the corresponding mempool\nis used to return linear mbufs.\n\nSigned-off-by: Flavio Leitner <fbl@sysclose.org>\n---\n drivers/net/vhost/rte_eth_vhost.c |  4 +--\n examples/tep_termination/main.c   |  2 +-\n examples/vhost/main.c             |  2 +-\n lib/librte_vhost/rte_vhost.h      |  5 ++-\n lib/librte_vhost/virtio_net.c     | 57 +++++++++++++++++++++++--------\n 5 files changed, 50 insertions(+), 20 deletions(-)",
    "diff": "diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c\nindex 46f01a7f4..ce7f68a5b 100644\n--- a/drivers/net/vhost/rte_eth_vhost.c\n+++ b/drivers/net/vhost/rte_eth_vhost.c\n@@ -393,8 +393,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n \t\t\t\t\t\t VHOST_MAX_PKT_BURST);\n \n \t\tnb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,\n-\t\t\t\t\t\t  r->mb_pool, &bufs[nb_rx],\n-\t\t\t\t\t\t  num);\n+\t\t\t\t\t\t  r->mb_pool, NULL,\n+\t\t\t\t\t\t  &bufs[nb_rx], num);\n \n \t\tnb_rx += nb_pkts;\n \t\tnb_receive -= nb_pkts;\ndiff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c\nindex ab956ad7c..3ebf0fa6e 100644\n--- a/examples/tep_termination/main.c\n+++ b/examples/tep_termination/main.c\n@@ -697,7 +697,7 @@ switch_worker(__rte_unused void *arg)\n \t\t\tif (likely(!vdev->remove)) {\n \t\t\t\t/* Handle guest TX*/\n \t\t\t\ttx_count = rte_vhost_dequeue_burst(vdev->vid,\n-\t\t\t\t\t\tVIRTIO_TXQ, mbuf_pool,\n+\t\t\t\t\t\tVIRTIO_TXQ, mbuf_pool, NULL,\n \t\t\t\t\t\tpkts_burst, MAX_PKT_BURST);\n \t\t\t\t/* If this is the first received packet we need to learn the MAC */\n \t\t\t\tif (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {\ndiff --git a/examples/vhost/main.c b/examples/vhost/main.c\nindex ab649bf14..e9b306af3 100644\n--- a/examples/vhost/main.c\n+++ b/examples/vhost/main.c\n@@ -1092,7 +1092,7 @@ drain_virtio_tx(struct vhost_dev *vdev)\n \t\t\t\t\tpkts, MAX_PKT_BURST);\n \t} else {\n \t\tcount = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,\n-\t\t\t\t\tmbuf_pool, pkts, MAX_PKT_BURST);\n+\t\t\t\t\tmbuf_pool, NULL, pkts, MAX_PKT_BURST);\n \t}\n \n \t/* setup VMDq for the first packet */\ndiff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h\nindex 19474bca0..b05fd8e2a 100644\n--- a/lib/librte_vhost/rte_vhost.h\n+++ b/lib/librte_vhost/rte_vhost.h\n@@ -593,6 +593,8 @@ uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,\n  *  virtio queue index in mq case\n  * @param mbuf_pool\n  *  mbuf_pool where host mbuf is allocated.\n+ * @param mbuf_pool_large\n+ *  mbuf_pool where larger host mbuf is allocated.\n  * @param pkts\n  *  array to contain packets to be dequeued\n  * @param count\n@@ -601,7 +603,8 @@ uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,\n  *  num of packets dequeued\n  */\n uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n-\tstruct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);\n+\tstruct rte_mempool *mbuf_pool, struct rte_mempool *mbuf_pool_large,\n+\tstruct rte_mbuf **pkts, uint16_t count);\n \n /**\n  * Get guest mem table: a list of memory regions.\ndiff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c\nindex 5b85b832d..da9d77732 100644\n--- a/lib/librte_vhost/virtio_net.c\n+++ b/lib/librte_vhost/virtio_net.c\n@@ -1291,10 +1291,12 @@ get_zmbuf(struct vhost_virtqueue *vq)\n \n static __rte_noinline uint16_t\n virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n-\tstruct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)\n+\tstruct rte_mempool *mbuf_pool, struct rte_mempool *mbuf_pool_large,\n+\tstruct rte_mbuf **pkts, uint16_t count)\n {\n \tuint16_t i;\n \tuint16_t free_entries;\n+\tuint16_t mbuf_avail;\n \n \tif (unlikely(dev->dequeue_zero_copy)) {\n \t\tstruct zcopy_mbuf *zmbuf, *next;\n@@ -1340,32 +1342,42 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tVHOST_LOG_DEBUG(VHOST_DATA, \"(%d) about to dequeue %u buffers\\n\",\n \t\t\tdev->vid, count);\n \n+\t/* If the large mpool is provided, find the threshold */\n+\tmbuf_avail = 0;\n+\tif (mbuf_pool_large)\n+\t\tmbuf_avail = rte_pktmbuf_data_room_size(mbuf_pool) - RTE_PKTMBUF_HEADROOM;\n+\n \tfor (i = 0; i < count; i++) {\n \t\tstruct buf_vector buf_vec[BUF_VECTOR_MAX];\n \t\tuint16_t head_idx;\n-\t\tuint32_t dummy_len;\n+\t\tuint32_t buf_len;\n \t\tuint16_t nr_vec = 0;\n+\t\tstruct rte_mempool *mpool;\n \t\tint err;\n \n \t\tif (unlikely(fill_vec_buf_split(dev, vq,\n \t\t\t\t\t\tvq->last_avail_idx + i,\n \t\t\t\t\t\t&nr_vec, buf_vec,\n-\t\t\t\t\t\t&head_idx, &dummy_len,\n+\t\t\t\t\t\t&head_idx, &buf_len,\n \t\t\t\t\t\tVHOST_ACCESS_RO) < 0))\n \t\t\tbreak;\n \n \t\tif (likely(dev->dequeue_zero_copy == 0))\n \t\t\tupdate_shadow_used_ring_split(vq, head_idx, 0);\n \n-\t\tpkts[i] = rte_pktmbuf_alloc(mbuf_pool);\n+\t\tif (mbuf_pool_large && buf_len > mbuf_avail)\n+\t\t\tmpool = mbuf_pool_large;\n+\t\telse\n+\t\t\tmpool = mbuf_pool;\n+\n+\t\tpkts[i] = rte_pktmbuf_alloc(mpool);\n \t\tif (unlikely(pkts[i] == NULL)) {\n \t\t\tRTE_LOG(ERR, VHOST_DATA,\n \t\t\t\t\"Failed to allocate memory for mbuf.\\n\");\n \t\t\tbreak;\n \t\t}\n \n-\t\terr = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],\n-\t\t\t\tmbuf_pool);\n+\t\terr = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], mpool);\n \t\tif (unlikely(err)) {\n \t\t\trte_pktmbuf_free(pkts[i]);\n \t\t\tbreak;\n@@ -1411,9 +1423,11 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n static __rte_noinline uint16_t\n virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n-\tstruct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)\n+\tstruct rte_mempool *mbuf_pool, struct rte_mempool *mbuf_pool_large,\n+\tstruct rte_mbuf **pkts, uint16_t count)\n {\n \tuint16_t i;\n+\tuint16_t mbuf_avail;\n \n \tif (unlikely(dev->dequeue_zero_copy)) {\n \t\tstruct zcopy_mbuf *zmbuf, *next;\n@@ -1448,17 +1462,23 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tVHOST_LOG_DEBUG(VHOST_DATA, \"(%d) about to dequeue %u buffers\\n\",\n \t\t\tdev->vid, count);\n \n+\t/* If the large mpool is provided, find the threshold */\n+\tmbuf_avail = 0;\n+\tif (mbuf_pool_large)\n+\t\tmbuf_avail = rte_pktmbuf_data_room_size(mbuf_pool) - RTE_PKTMBUF_HEADROOM;\n+\n \tfor (i = 0; i < count; i++) {\n \t\tstruct buf_vector buf_vec[BUF_VECTOR_MAX];\n \t\tuint16_t buf_id;\n-\t\tuint32_t dummy_len;\n+\t\tuint32_t buf_len;\n \t\tuint16_t desc_count, nr_vec = 0;\n+\t\tstruct rte_mempool *mpool;\n \t\tint err;\n \n \t\tif (unlikely(fill_vec_buf_packed(dev, vq,\n \t\t\t\t\t\tvq->last_avail_idx, &desc_count,\n \t\t\t\t\t\tbuf_vec, &nr_vec,\n-\t\t\t\t\t\t&buf_id, &dummy_len,\n+\t\t\t\t\t\t&buf_id, &buf_len,\n \t\t\t\t\t\tVHOST_ACCESS_RO) < 0))\n \t\t\tbreak;\n \n@@ -1466,15 +1486,19 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t\tupdate_shadow_used_ring_packed(vq, buf_id, 0,\n \t\t\t\t\tdesc_count);\n \n-\t\tpkts[i] = rte_pktmbuf_alloc(mbuf_pool);\n+\t\tif (mbuf_pool_large && buf_len > mbuf_avail)\n+\t\t\tmpool = mbuf_pool_large;\n+\t\telse\n+\t\t\tmpool = mbuf_pool;\n+\n+\t\tpkts[i] = rte_pktmbuf_alloc(mpool);\n \t\tif (unlikely(pkts[i] == NULL)) {\n \t\t\tRTE_LOG(ERR, VHOST_DATA,\n \t\t\t\t\"Failed to allocate memory for mbuf.\\n\");\n \t\t\tbreak;\n \t\t}\n \n-\t\terr = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],\n-\t\t\t\tmbuf_pool);\n+\t\terr = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], mpool);\n \t\tif (unlikely(err)) {\n \t\t\trte_pktmbuf_free(pkts[i]);\n \t\t\tbreak;\n@@ -1526,7 +1550,8 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n uint16_t\n rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n-\tstruct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)\n+\tstruct rte_mempool *mbuf_pool, struct rte_mempool *mbuf_pool_large,\n+\tstruct rte_mbuf **pkts, uint16_t count)\n {\n \tstruct virtio_net *dev;\n \tstruct rte_mbuf *rarp_mbuf = NULL;\n@@ -1598,9 +1623,11 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n \t}\n \n \tif (vq_is_packed(dev))\n-\t\tcount = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);\n+\t\tcount = virtio_dev_tx_packed(dev, vq, mbuf_pool, mbuf_pool_large, pkts,\n+                                     count);\n \telse\n-\t\tcount = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);\n+\t\tcount = virtio_dev_tx_split(dev, vq, mbuf_pool, mbuf_pool_large, pkts,\n+                                    count);\n \n out:\n \tif (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))\n",
    "prefixes": []
}