get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48502/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48502,
    "url": "http://patches.dpdk.org/api/patches/48502/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20181203141515.28368-7-jfreimann@redhat.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20181203141515.28368-7-jfreimann@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20181203141515.28368-7-jfreimann@redhat.com",
    "date": "2018-12-03T14:15:12",
    "name": "[v11,6/9] net/virtio: implement receive path for packed queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e41b5f9a84ae86244232a5be7959c40503df0bbd",
    "submitter": {
        "id": 745,
        "url": "http://patches.dpdk.org/api/people/745/?format=api",
        "name": "Jens Freimann",
        "email": "jfreimann@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20181203141515.28368-7-jfreimann@redhat.com/mbox/",
    "series": [
        {
            "id": 2645,
            "url": "http://patches.dpdk.org/api/series/2645/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=2645",
            "date": "2018-12-03T14:15:06",
            "name": "implement packed virtqueues",
            "version": 11,
            "mbox": "http://patches.dpdk.org/series/2645/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/48502/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/48502/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A140D1B43C;\n\tMon,  3 Dec 2018 15:16:46 +0100 (CET)",
            "from mx1.redhat.com (mx1.redhat.com [209.132.183.28])\n\tby dpdk.org (Postfix) with ESMTP id 76DB41B416\n\tfor <dev@dpdk.org>; Mon,  3 Dec 2018 15:16:45 +0100 (CET)",
            "from smtp.corp.redhat.com\n\t(int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby mx1.redhat.com (Postfix) with ESMTPS id BC72830024C8;\n\tMon,  3 Dec 2018 14:16:44 +0000 (UTC)",
            "from localhost (dhcp-192-205.str.redhat.com [10.33.192.205])\n\tby smtp.corp.redhat.com (Postfix) with ESMTPS id 0FEF96717D;\n\tMon,  3 Dec 2018 14:16:38 +0000 (UTC)"
        ],
        "From": "Jens Freimann <jfreimann@redhat.com>",
        "To": "dev@dpdk.org",
        "Cc": "tiwei.bie@intel.com,\n\tmaxime.coquelin@redhat.com,\n\tGavin.Hu@arm.com",
        "Date": "Mon,  3 Dec 2018 15:15:12 +0100",
        "Message-Id": "<20181203141515.28368-7-jfreimann@redhat.com>",
        "In-Reply-To": "<20181203141515.28368-1-jfreimann@redhat.com>",
        "References": "<20181203141515.28368-1-jfreimann@redhat.com>",
        "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.12",
        "X-Greylist": "Sender IP whitelisted, not delayed by milter-greylist-4.5.16\n\t(mx1.redhat.com [10.5.110.40]); Mon, 03 Dec 2018 14:16:44 +0000 (UTC)",
        "Subject": "[dpdk-dev] [PATCH v11 6/9] net/virtio: implement receive path for\n\tpacked queues",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Implement the receive part.\n\nSigned-off-by: Jens Freimann <jfreimann@redhat.com>\nSigned-off-by: Tiwei Bie <tiwei.bie@intel.com>\n---\n drivers/net/virtio/virtio_ethdev.c |  61 +++--\n drivers/net/virtio/virtio_ethdev.h |   5 +\n drivers/net/virtio/virtio_rxtx.c   | 369 ++++++++++++++++++++++++++++-\n drivers/net/virtio/virtqueue.c     |  22 ++\n drivers/net/virtio/virtqueue.h     |   2 +-\n 5 files changed, 428 insertions(+), 31 deletions(-)",
    "diff": "diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex bdcc9f365..e86300b58 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -1359,27 +1359,39 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)\n \t\t}\n \t}\n \n-\tif (hw->use_simple_rx) {\n-\t\tPMD_INIT_LOG(INFO, \"virtio: using simple Rx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->rx_pkt_burst = virtio_recv_pkts_vec;\n-\t} else if (hw->use_inorder_rx) {\n-\t\tPMD_INIT_LOG(INFO,\n-\t\t\t\"virtio: using inorder mergeable buffer Rx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;\n-\t} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n-\t\tPMD_INIT_LOG(INFO,\n-\t\t\t\"virtio: using mergeable buffer Rx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;\n-\t} else {\n-\t\tPMD_INIT_LOG(INFO, \"virtio: using standard Rx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n+\tif (vtpci_packed_queue(hw)) {\n+\t\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n+\t\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\t\"virtio: using packed ring mergeable buffer Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_packed;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\t\"virtio: using packed ring standard Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;\n+\t\t}\n+\t} else { \n+\t\tif (hw->use_simple_rx) {\n+\t\t\tPMD_INIT_LOG(INFO, \"virtio: using simple Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = virtio_recv_pkts_vec;\n+\t\t} else if (hw->use_inorder_rx) {\n+\t\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\t\"virtio: using inorder mergeable buffer Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;\n+\t\t} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n+\t\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\t\"virtio: using mergeable buffer Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(INFO, \"virtio: using standard Rx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n+\t\t}\n \t}\n-\n-\n }\n \n /* Only support 1:1 queue/interrupt mapping so far.\n@@ -1511,7 +1523,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)\n \n \t/* Setting up rx_header size for the device */\n \tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||\n-\t    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))\n+\t    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||\n+\t    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n \telse\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);\n@@ -1939,6 +1952,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n+\tif (vtpci_packed_queue(hw)) {\n+\t\thw->use_simple_rx = 0;\n+\t\thw->use_inorder_rx = 0;\n+\t\thw->use_inorder_tx = 0;\n+\t}\n+\n #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM\n \tif (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {\n \t\thw->use_simple_rx = 0;\ndiff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h\nindex 05d355180..5cf295418 100644\n--- a/drivers/net/virtio/virtio_ethdev.h\n+++ b/drivers/net/virtio/virtio_ethdev.h\n@@ -73,10 +73,15 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,\n \n uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n \n uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \n+uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,\n \t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n \ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex 1fcc9cef7..f73498602 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -31,6 +31,7 @@\n #include \"virtqueue.h\"\n #include \"virtio_rxtx.h\"\n #include \"virtio_rxtx_simple.h\"\n+#include \"virtio_ring.h\"\n \n #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP\n #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)\n@@ -105,6 +106,47 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)\n \tdxp->next = VQ_RING_DESC_CHAIN_END;\n }\n \n+static uint16_t\n+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,\n+\t\t\t\t  struct rte_mbuf **rx_pkts,\n+\t\t\t\t  uint32_t *len,\n+\t\t\t\t  uint16_t num)\n+{\n+\tstruct rte_mbuf *cookie;\n+\tuint16_t used_idx;\n+\tuint16_t id;\n+\tstruct vring_packed_desc *desc;\n+\tuint16_t i;\n+\n+\tdesc = vq->ring_packed.desc_packed;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tused_idx = vq->vq_used_cons_idx;\n+\t\tif (!desc_is_used(&desc[used_idx], vq))\n+\t\t\treturn i;\n+\t\tlen[i] = desc[used_idx].len;\n+\t\tid = desc[used_idx].id;\n+\t\tcookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;\n+\t\tif (unlikely(cookie == NULL)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"vring descriptor with no mbuf cookie at %u\",\n+\t\t\t\tvq->vq_used_cons_idx);\n+\t\t\tbreak;\n+\t\t}\n+\t\trte_prefetch0(cookie);\n+\t\trte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));\n+\t\trx_pkts[i] = cookie;\n+\n+\t\tvq->vq_free_cnt++;\n+\t\tvq->vq_used_cons_idx++;\n+\t\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n+\t\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n+\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n static uint16_t\n virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,\n \t\t\t   uint32_t *len, uint16_t num)\n@@ -350,6 +392,44 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n \treturn 0;\n }\n \n+static inline int\n+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)\n+{\n+\tstruct vq_desc_extra *dxp;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tstruct vring_packed_desc *dp;\n+\tuint16_t idx;\n+\tuint16_t flags;\n+\n+\tif (unlikely(vq->vq_free_cnt < 1))\n+\t\treturn -ENOSPC;\n+\n+\tidx = vq->vq_avail_idx;\n+\n+\tdxp = &vq->vq_descx[idx];\n+\tdxp->cookie = cookie;\n+\n+\tdp = &vq->ring_packed.desc_packed[idx];\n+\tdp->addr = VIRTIO_MBUF_ADDR(cookie, vq) +\n+\t\t\tRTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;\n+\tdp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;\n+\n+\tflags = VRING_DESC_F_WRITE;\n+\tflags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |\n+\t\t VRING_DESC_F_USED(!vq->avail_wrap_counter);\n+\trte_smp_wmb();\n+\tdp->flags = flags;\n+\n+\tvq->vq_free_cnt--;\n+\tvq->vq_avail_idx++;\n+\tif (vq->vq_avail_idx >= vq->vq_nentries) {\n+\t\tvq->vq_avail_idx -= vq->vq_nentries;\n+\t\tvq->avail_wrap_counter ^= 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n /* When doing TSO, the IP length is not included in the pseudo header\n  * checksum of the packet given to the PMD, but for virtio it is\n  * expected.\n@@ -801,7 +881,10 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)\n \t\t\t\tbreak;\n \n \t\t\t/* Enqueue allocated buffers */\n-\t\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n+\t\t\tif (vtpci_packed_queue(vq->hw))\n+\t\t\t\terror = virtqueue_enqueue_recv_refill_packed(vq, m);\n+\t\t\telse\n+\t\t\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n \t\t\tif (error) {\n \t\t\t\trte_pktmbuf_free(m);\n \t\t\t\tbreak;\n@@ -809,7 +892,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)\n \t\t\tnbufs++;\n \t\t}\n \n-\t\tvq_update_avail_idx(vq);\n+\t\tif (!vtpci_packed_queue(vq->hw))\n+\t\t\tvq_update_avail_idx(vq);\n \t}\n \n \tPMD_INIT_LOG(DEBUG, \"Allocated %d bufs\", nbufs);\n@@ -896,7 +980,10 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)\n \t * Requeue the discarded mbuf. This should always be\n \t * successful since it was just dequeued.\n \t */\n-\terror = virtqueue_enqueue_recv_refill(vq, m);\n+\tif (vtpci_packed_queue(vq->hw))\n+\t\terror = virtqueue_enqueue_recv_refill_packed(vq, m);\n+\telse\n+\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n \n \tif (unlikely(error)) {\n \t\tRTE_LOG(ERR, PMD, \"cannot requeue discarded mbuf\");\n@@ -1135,6 +1222,103 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \treturn nb_rx;\n }\n \n+uint16_t\n+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct virtnet_rx *rxvq = rx_queue;\n+\tstruct virtqueue *vq = rxvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tstruct rte_mbuf *rxm, *new_mbuf;\n+\tuint16_t num, nb_rx;\n+\tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n+\tint error;\n+\tuint32_t i, nb_enqueued;\n+\tuint32_t hdr_size;\n+\tstruct virtio_net_hdr *hdr;\n+\n+\tnb_rx = 0;\n+\tif (unlikely(hw->started == 0))\n+\t\treturn nb_rx;\n+\n+\tnum = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);\n+\tif (likely(num > DESC_PER_CACHELINE))\n+\t\tnum = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);\n+\n+\tnum = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);\n+\tPMD_RX_LOG(DEBUG, \"dequeue:%d\", num);\n+\n+\tnb_enqueued = 0;\n+\thdr_size = hw->vtnet_hdr_size;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\trxm = rcv_pkts[i];\n+\n+\t\tPMD_RX_LOG(DEBUG, \"packet len:%d\", len[i]);\n+\n+\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n+\t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n+\t\t\tnb_enqueued++;\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\trxm->port = rxvq->port_id;\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->ol_flags = 0;\n+\t\trxm->vlan_tci = 0;\n+\n+\t\trxm->pkt_len = (uint32_t)(len[i] - hdr_size);\n+\t\trxm->data_len = (uint16_t)(len[i] - hdr_size);\n+\n+\t\thdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +\n+\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size);\n+\n+\t\tif (hw->vlan_strip)\n+\t\t\trte_vlan_strip(rxm);\n+\n+\t\tif (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tvirtio_rx_stats_updated(rxvq, rxm);\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\n+\trxvq->stats.packets += nb_rx;\n+\n+\t/* Allocate new mbuf for the used descriptor */\n+\twhile (likely(!virtqueue_full(vq))) {\n+\t\tnew_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);\n+\t\tif (unlikely(new_mbuf == NULL)) {\n+\t\t\tstruct rte_eth_dev *dev\n+\t\t\t\t= &rte_eth_devices[rxvq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\terror = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);\n+\t\tif (unlikely(error)) {\n+\t\t\trte_pktmbuf_free(new_mbuf);\n+\t\t\tbreak;\n+\t\t}\n+\t\tnb_enqueued++;\n+\t}\n+\n+\tif (likely(nb_enqueued)) {\n+\t\tif (unlikely(virtqueue_kick_prepare_packed(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n+\t\t\tPMD_RX_LOG(DEBUG, \"Notified\");\n+\t\t}\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+\n uint16_t\n virtio_recv_mergeable_pkts_inorder(void *rx_queue,\n \t\t\tstruct rte_mbuf **rx_pkts,\n@@ -1341,6 +1525,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \tuint16_t extra_idx;\n \tuint32_t seg_res;\n \tuint32_t hdr_size;\n+\tuint32_t rx_num = 0;\n \n \tnb_rx = 0;\n \tif (unlikely(hw->started == 0))\n@@ -1366,6 +1551,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\tbreak;\n \n \t\tnum = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);\n+\t\tif (num == 0)\n+\t\t\treturn nb_rx;\n \t\tif (num != 1)\n \t\t\tcontinue;\n \n@@ -1418,11 +1605,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\tuint16_t  rcv_cnt =\n \t\t\t\tRTE_MIN(seg_res, RTE_DIM(rcv_pkts));\n \t\t\tif (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {\n-\t\t\t\tuint32_t rx_num =\n-\t\t\t\t\tvirtqueue_dequeue_burst_rx(vq,\n-\t\t\t\t\trcv_pkts, len, rcv_cnt);\n-\t\t\t\ti += rx_num;\n-\t\t\t\trcv_cnt = rx_num;\n+\t\t\t\trx_num = virtqueue_dequeue_burst_rx(vq,\n+\t\t\t\t\t      rcv_pkts, len, rcv_cnt);\n \t\t\t} else {\n \t\t\t\tPMD_RX_LOG(ERR,\n \t\t\t\t\t   \"No enough segments for packet.\");\n@@ -1431,6 +1615,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\t\trxvq->stats.errors++;\n \t\t\t\tbreak;\n \t\t\t}\n+\t\t\ti += rx_num;\n+\t\t\trcv_cnt = rx_num;\n \n \t\t\textra_idx = 0;\n \n@@ -1483,7 +1669,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \n \tif (likely(nb_enqueued)) {\n \t\tvq_update_avail_idx(vq);\n-\n \t\tif (unlikely(virtqueue_kick_prepare(vq))) {\n \t\t\tvirtqueue_notify(vq);\n \t\t\tPMD_RX_LOG(DEBUG, \"Notified\");\n@@ -1493,6 +1678,172 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \treturn nb_rx;\n }\n \n+uint16_t\n+virtio_recv_mergeable_pkts_packed(void *rx_queue,\n+\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct virtnet_rx *rxvq = rx_queue;\n+\tstruct virtqueue *vq = rxvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tstruct rte_mbuf *rxm, *new_mbuf;\n+\tuint16_t nb_used, num, nb_rx;\n+\tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *prev;\n+\tint error;\n+\tuint32_t i, nb_enqueued;\n+\tuint32_t seg_num;\n+\tuint16_t extra_idx;\n+\tuint32_t seg_res;\n+\tuint32_t hdr_size;\n+\tuint32_t rx_num = 0;\n+\n+\tnb_rx = 0;\n+\tif (unlikely(hw->started == 0))\n+\t\treturn nb_rx;\n+\n+\tnb_used = VIRTIO_MBUF_BURST_SZ;\n+\n+\ti = 0;\n+\tnb_enqueued = 0;\n+\tseg_num = 0;\n+\textra_idx = 0;\n+\tseg_res = 0;\n+\thdr_size = hw->vtnet_hdr_size;\n+\n+\twhile (i < nb_used) {\n+\t\tstruct virtio_net_hdr_mrg_rxbuf *header;\n+\n+\t\tif (nb_rx == nb_pkts)\n+\t\t\tbreak;\n+\n+\t\tnum = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, 1);\n+\t\tif (num == 0)\n+\t\t\tbreak;\n+\t\tif (num != 1)\n+\t\t\tcontinue;\n+\n+\t\ti++;\n+\n+\t\tPMD_RX_LOG(DEBUG, \"dequeue:%d\", num);\n+\t\tPMD_RX_LOG(DEBUG, \"packet len:%d\", len[0]);\n+\n+\t\trxm = rcv_pkts[0];\n+\n+\t\tif (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {\n+\t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n+\t\t\tnb_enqueued++;\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\theader = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +\n+\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size);\n+\t\tseg_num = header->num_buffers;\n+\n+\t\tif (seg_num == 0)\n+\t\t\tseg_num = 1;\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->nb_segs = seg_num;\n+\t\trxm->ol_flags = 0;\n+\t\trxm->vlan_tci = 0;\n+\t\trxm->pkt_len = (uint32_t)(len[0] - hdr_size);\n+\t\trxm->data_len = (uint16_t)(len[0] - hdr_size);\n+\n+\t\trxm->port = rxvq->port_id;\n+\t\trx_pkts[nb_rx] = rxm;\n+\t\tprev = rxm;\n+\n+\t\tif (hw->has_rx_offload &&\n+\t\t\t\tvirtio_rx_offload(rxm, &header->hdr) < 0) {\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tseg_res = seg_num - 1;\n+\n+\t\twhile (seg_res != 0) {\n+\t\t\t/*\n+\t\t\t * Get extra segments for current uncompleted packet.\n+\t\t\t */\n+\t\t\tuint16_t rcv_cnt = RTE_MIN(seg_res, RTE_DIM(rcv_pkts));\n+\t\t\tif (likely(vq->vq_free_cnt >= rcv_cnt)) {\n+\t\t\t\trx_num = virtqueue_dequeue_burst_rx_packed(vq,\n+\t\t\t\t\t     rcv_pkts, len, rcv_cnt);\n+\t\t\t} else {\n+\t\t\t\tPMD_RX_LOG(ERR,\n+\t\t\t\t\t   \"No enough segments for packet.\");\n+\t\t\t\tnb_enqueued++;\n+\t\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\t\trxvq->stats.errors++;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\ti += rx_num;\n+\t\t\trcv_cnt = rx_num;\n+\n+\t\t\textra_idx = 0;\n+\n+\t\t\twhile (extra_idx < rcv_cnt) {\n+\t\t\t\trxm = rcv_pkts[extra_idx];\n+\n+\t\t\t\trxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;\n+\t\t\t\trxm->pkt_len = (uint32_t)(len[extra_idx]);\n+\t\t\t\trxm->data_len = (uint16_t)(len[extra_idx]);\n+\n+\t\t\t\tif (prev)\n+\t\t\t\t\tprev->next = rxm;\n+\n+\t\t\t\tprev = rxm;\n+\t\t\t\trx_pkts[nb_rx]->pkt_len += rxm->pkt_len;\n+\t\t\t\textra_idx++;\n+\t\t\t};\n+\t\t\tseg_res -= rcv_cnt;\n+\t\t}\n+\n+\t\tif (hw->vlan_strip)\n+\t\t\trte_vlan_strip(rx_pkts[nb_rx]);\n+\n+\t\tVIRTIO_DUMP_PACKET(rx_pkts[nb_rx],\n+\t\t\trx_pkts[nb_rx]->data_len);\n+\n+\t\trxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;\n+\t\tvirtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);\n+\t\tnb_rx++;\n+\t}\n+\n+\trxvq->stats.packets += nb_rx;\n+\n+\t/* Allocate new mbuf for the used descriptor */\n+\twhile (likely(!virtqueue_full(vq))) {\n+\t\tnew_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);\n+\t\tif (unlikely(new_mbuf == NULL)) {\n+\t\t\tstruct rte_eth_dev *dev\n+\t\t\t\t= &rte_eth_devices[rxvq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\terror = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);\n+\t\tif (unlikely(error)) {\n+\t\t\trte_pktmbuf_free(new_mbuf);\n+\t\t\tbreak;\n+\t\t}\n+\t\tnb_enqueued++;\n+\t}\n+\n+\tif (likely(nb_enqueued)) {\n+\t\tif (unlikely(virtqueue_kick_prepare_packed(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n+\t\t\tPMD_RX_LOG(DEBUG, \"Notified\");\n+\t\t}\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n uint16_t\n virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n {\ndiff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c\nindex 56a77cc71..f6dbb7d82 100644\n--- a/drivers/net/virtio/virtqueue.c\n+++ b/drivers/net/virtio/virtqueue.c\n@@ -65,6 +65,28 @@ virtqueue_rxvq_flush(struct virtqueue *vq)\n \tuint16_t used_idx, desc_idx;\n \tuint16_t nb_used, i;\n \n+\tif (vtpci_packed_queue(vq->hw)) {\n+\t\tstruct vring_packed_desc *descs = vq->ring_packed.desc_packed;\n+\t\tint cnt = 0;\n+\n+\t\ti = vq->vq_used_cons_idx;\n+\t\twhile (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {\n+\t\t\tdxp = &vq->vq_descx[descs[i].id];\n+\t\t\tif (dxp->cookie != NULL) {\n+\t\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\t\tdxp->cookie = NULL;\n+\t\t\t}\n+\t\t\tvq->vq_free_cnt++;\n+\t\t\tvq->vq_used_cons_idx++;\n+\t\t\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n+\t\t\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n+\t\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\t}\n+\t\t\ti = vq->vq_used_cons_idx;\n+\t\t}\n+\t\treturn;\n+\t}\n+\n \tnb_used = VIRTQUEUE_NUSED(vq);\n \n \tfor (i = 0; i < nb_used; i++) {\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 5119818e1..bd8645019 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -397,7 +397,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)\n #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))\n \n void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);\n-void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx);\n+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);\n void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n \t\t\t  uint16_t num);\n \n",
    "prefixes": [
        "v11",
        "6/9"
    ]
}