get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/59721/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 59721,
    "url": "http://patches.dpdk.org/api/patches/59721/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190925171329.63734-10-yong.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190925171329.63734-10-yong.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190925171329.63734-10-yong.liu@intel.com",
    "date": "2019-09-25T17:13:23",
    "name": "[v3,09/15] vhost: split enqueue and dequeue flush functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "505f23e1561e72216bb05e2378fe24645862b4ef",
    "submitter": {
        "id": 17,
        "url": "http://patches.dpdk.org/api/people/17/?format=api",
        "name": "Marvin Liu",
        "email": "yong.liu@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190925171329.63734-10-yong.liu@intel.com/mbox/",
    "series": [
        {
            "id": 6522,
            "url": "http://patches.dpdk.org/api/series/6522/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6522",
            "date": "2019-09-25T17:13:14",
            "name": "vhost packed ring performance optimization",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/6522/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/59721/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/59721/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E858E1BED7;\n\tWed, 25 Sep 2019 11:33:05 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby dpdk.org (Postfix) with ESMTP id A913D1BE8A\n\tfor <dev@dpdk.org>; Wed, 25 Sep 2019 11:32:43 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t25 Sep 2019 02:32:43 -0700",
            "from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])\n\tby fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:41 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.64,547,1559545200\"; d=\"scan'208\";a=\"213986237\"",
        "From": "Marvin Liu <yong.liu@intel.com>",
        "To": "maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, \n\tstephen@networkplumber.org, gavin.hu@arm.com",
        "Cc": "dev@dpdk.org,\n\tMarvin Liu <yong.liu@intel.com>",
        "Date": "Thu, 26 Sep 2019 01:13:23 +0800",
        "Message-Id": "<20190925171329.63734-10-yong.liu@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190925171329.63734-1-yong.liu@intel.com>",
        "References": "<20190919163643.24130-2-yong.liu@intel.com>\n\t<20190925171329.63734-1-yong.liu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 09/15] vhost: split enqueue and dequeue flush\n\tfunctions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Vhost enqueue descriptors are updated by batch number, while vhost\ndequeue descriptors are buffered. Meanwhile in dequeue function only\nfirst descriptor is buffered. Due to these differences, split vhost\nenqueue and dequeue flush functions.\n\nSigned-off-by: Marvin Liu <yong.liu@intel.com>",
    "diff": "diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c\nindex 8f7209f83..1b0fa2c64 100644\n--- a/lib/librte_vhost/virtio_net.c\n+++ b/lib/librte_vhost/virtio_net.c\n@@ -92,8 +92,8 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,\n }\n \n static __rte_always_inline void\n-flush_shadow_used_ring_packed(struct virtio_net *dev,\n-\t\t\tstruct vhost_virtqueue *vq)\n+flush_enqueue_shadow_used_ring_packed(struct virtio_net *dev,\n+\tstruct vhost_virtqueue *vq)\n {\n \tint i;\n \tuint16_t used_idx = vq->last_used_idx;\n@@ -158,6 +158,32 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,\n \tvhost_log_cache_sync(dev, vq);\n }\n \n+static __rte_always_inline void\n+flush_dequeue_shadow_used_ring_packed(struct virtio_net *dev,\n+\tstruct vhost_virtqueue *vq)\n+{\n+\tuint16_t head_idx = vq->dequeue_shadow_head;\n+\tuint16_t head_flags;\n+\tstruct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];\n+\n+\tif (used_elem->used_wrap_counter)\n+\t\thead_flags = PACKED_TX_USED_FLAG;\n+\telse\n+\t\thead_flags = PACKED_TX_USED_WRAP_FLAG;\n+\n+\tvq->desc_packed[head_idx].id = used_elem->id;\n+\n+\trte_smp_wmb();\n+\tvq->desc_packed[head_idx].flags = head_flags;\n+\n+\tvhost_log_cache_used_vring(dev, vq, head_idx *\n+\t\t\t\t   sizeof(struct vring_packed_desc),\n+\t\t\t\t   sizeof(struct vring_packed_desc));\n+\n+\tvq->shadow_used_idx = 0;\n+\tvhost_log_cache_sync(dev, vq);\n+}\n+\n static __rte_always_inline void\n update_shadow_used_ring_packed(struct vhost_virtqueue *vq,\n \t\t\t uint16_t desc_idx, uint32_t len, uint16_t count)\n@@ -199,6 +225,47 @@ flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t}\n }\n \n+static __rte_always_inline void\n+update_dequeue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n+\tuint16_t *ids)\n+{\n+\tuint16_t flags = 0;\n+\tuint16_t i;\n+\n+\tif (vq->used_wrap_counter)\n+\t\tflags = PACKED_TX_USED_FLAG;\n+\telse\n+\t\tflags = PACKED_TX_USED_WRAP_FLAG;\n+\n+\tif (!vq->shadow_used_idx) {\n+\t\tvq->dequeue_shadow_head = vq->last_used_idx;\n+\t\tvq->shadow_used_packed[0].id  = ids[0];\n+\t\tvq->shadow_used_packed[0].len = 0;\n+\t\tvq->shadow_used_packed[0].count = 1;\n+\t\tvq->shadow_used_packed[0].used_idx = vq->last_used_idx;\n+\t\tvq->shadow_used_packed[0].used_wrap_counter =\n+\t\t\tvq->used_wrap_counter;\n+\n+\t\tUNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)\n+\t\tfor (i = 1; i < PACKED_BATCH_SIZE; i++)\n+\t\t\tvq->desc_packed[vq->last_used_idx + i].id = ids[i];\n+\t\trte_smp_wmb();\n+\t\tUNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)\n+\t\tfor (i = 1; i < PACKED_BATCH_SIZE; i++)\n+\t\t\tvq->desc_packed[vq->last_used_idx + i].flags = flags;\n+\n+\t\tvq->shadow_used_idx = 1;\n+\t\tvq->last_used_idx += PACKED_BATCH_SIZE;\n+\t\tif (vq->last_used_idx >= vq->size) {\n+\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\tvq->last_used_idx -= vq->size;\n+\t\t}\n+\t} else {\n+\t\tuint64_t lens[PACKED_BATCH_SIZE] = {0};\n+\t\tflush_used_batch_packed(dev, vq, lens, ids, flags);\n+\t}\n+}\n+\n static __rte_always_inline void\n flush_enqueue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tuint64_t *lens, uint16_t *ids)\n@@ -306,11 +373,29 @@ flush_enqueue_packed(struct virtio_net *dev,\n \n \t\tif (vq->enqueue_shadow_count >= PACKED_BATCH_SIZE) {\n \t\t\tdo_data_copy_enqueue(dev, vq);\n-\t\t\tflush_shadow_used_ring_packed(dev, vq);\n+\t\t\tflush_enqueue_shadow_used_ring_packed(dev, vq);\n \t\t}\n \t}\n }\n \n+static __rte_unused void\n+flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)\n+{\n+\tif (!vq->shadow_used_idx)\n+\t\treturn;\n+\n+\tint16_t shadow_count = vq->last_used_idx - vq->dequeue_shadow_head;\n+\tif (shadow_count <= 0)\n+\t\tshadow_count += vq->size;\n+\n+\t/* buffer used descs as many as possible when doing dequeue */\n+\tif ((uint16_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {\n+\t\tdo_data_copy_dequeue(vq);\n+\t\tflush_dequeue_shadow_used_ring_packed(dev, vq);\n+\t\tvhost_vring_call_packed(dev, vq);\n+\t}\n+}\n+\n /* avoid write operation when necessary, to lessen cache issues */\n #define ASSIGN_UNLESS_EQUAL(var, val) do {\t\\\n \tif ((var) != (val))\t\t\t\\\n@@ -1165,7 +1250,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tdo_data_copy_enqueue(dev, vq);\n \n \tif (likely(vq->shadow_used_idx)) {\n-\t\tflush_shadow_used_ring_packed(dev, vq);\n+\t\tflush_enqueue_shadow_used_ring_packed(dev, vq);\n \t\tvhost_vring_call_packed(dev, vq);\n \t}\n \n@@ -1796,6 +1881,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t\t   pkts[i]->pkt_len);\n \t}\n \n+\tupdate_dequeue_batch_packed(dev, vq, ids);\n+\n \tif (virtio_net_with_host_offload(dev)) {\n \t\tUNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)\n \t\tfor (i = 0; i < PACKED_BATCH_SIZE; i++) {\n@@ -1896,7 +1983,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\t}\n \n \t\tif (likely(vq->shadow_used_idx)) {\n-\t\t\tflush_shadow_used_ring_packed(dev, vq);\n+\t\t\tflush_dequeue_shadow_used_ring_packed(dev, vq);\n \t\t\tvhost_vring_call_packed(dev, vq);\n \t\t}\n \t}\n@@ -1975,7 +2062,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\tif (unlikely(i < count))\n \t\t\tvq->shadow_used_idx = i;\n \t\tif (likely(vq->shadow_used_idx)) {\n-\t\t\tflush_shadow_used_ring_packed(dev, vq);\n+\t\t\tflush_dequeue_shadow_used_ring_packed(dev, vq);\n \t\t\tvhost_vring_call_packed(dev, vq);\n \t\t}\n \t}\n",
    "prefixes": [
        "v3",
        "09/15"
    ]
}