get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/472/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 472,
    "url": "http://patches.dpdk.org/api/patches/472/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/6BD6202160B55B409D423293115822625483B6@SHSMSX101.ccr.corp.intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<6BD6202160B55B409D423293115822625483B6@SHSMSX101.ccr.corp.intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/6BD6202160B55B409D423293115822625483B6@SHSMSX101.ccr.corp.intel.com",
    "date": "2014-09-24T09:22:09",
    "name": "[dpdk-dev,v3] virtio: Support mergeable buffer in virtio pmd",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": true,
    "hash": "774d94a6122044207096f25395621a4dec9bb9ad",
    "submitter": {
        "id": 45,
        "url": "http://patches.dpdk.org/api/people/45/?format=api",
        "name": "Fu, JingguoX",
        "email": "jingguox.fu@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/6BD6202160B55B409D423293115822625483B6@SHSMSX101.ccr.corp.intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/472/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/472/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id A4E1CB344;\n\tWed, 24 Sep 2014 11:16:11 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id 2F2C56885\n\tfor <dev@dpdk.org>; Wed, 24 Sep 2014 11:16:08 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga103.fm.intel.com with ESMTP; 24 Sep 2014 02:13:04 -0700",
            "from fmsmsx106.amr.corp.intel.com ([10.18.124.204])\n\tby fmsmga001.fm.intel.com with ESMTP; 24 Sep 2014 02:22:13 -0700",
            "from fmsmsx112.amr.corp.intel.com (10.18.116.6) by\n\tFMSMSX106.amr.corp.intel.com (10.18.124.204) with Microsoft SMTP\n\tServer (TLS) id 14.3.195.1; Wed, 24 Sep 2014 02:22:12 -0700",
            "from shsmsx102.ccr.corp.intel.com (10.239.4.154) by\n\tFMSMSX112.amr.corp.intel.com (10.18.116.6) with Microsoft SMTP Server\n\t(TLS) id 14.3.195.1; Wed, 24 Sep 2014 02:22:12 -0700",
            "from shsmsx101.ccr.corp.intel.com ([169.254.1.203]) by\n\tshsmsx102.ccr.corp.intel.com ([169.254.2.192]) with mapi id\n\t14.03.0195.001; Wed, 24 Sep 2014 17:22:11 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.04,587,1406617200\"; d=\"scan'208\";a=\"596086981\"",
        "From": "\"Fu, JingguoX\" <jingguox.fu@intel.com>",
        "To": "\"dev@dpdk.org\" <dev@dpdk.org>",
        "Thread-Topic": "[dpdk-dev] [PATCH v3] virtio: Support mergeable buffer in\n\tvirtio pmd",
        "Thread-Index": "AQHPt515g4uAJ2Z1JEWQMeJNFgJa2ZwQQbDA",
        "Date": "Wed, 24 Sep 2014 09:22:09 +0000",
        "Message-ID": "<6BD6202160B55B409D423293115822625483B6@SHSMSX101.ccr.corp.intel.com>",
        "References": "<1408006475-17606-1-git-send-email-changchun.ouyang@intel.com>",
        "In-Reply-To": "<1408006475-17606-1-git-send-email-changchun.ouyang@intel.com>",
        "Accept-Language": "en-US",
        "Content-Language": "en-US",
        "X-MS-Has-Attach": "",
        "X-MS-TNEF-Correlator": "",
        "x-cr-hashedpuzzle": "A74P A+T3 Bgat B6kv CYE+ Cjhd C8JG Dcc4 Do/F D+Cy EAss\n\tF60m Gyqe H1Xg Jr0M\n\tKPo9; 1; ZABlAHYAQABkAHAAZABrAC4AbwByAGcA; Sosha1_v1; 7;\n\t{68AB0A3F-B2F9-49FC-8A72-4B40C3A15EE1};\n\tagBpAG4AZwBnAHUAbwB4AC4AZgB1AEAAaQBuAHQAZQBsAC4AYwBvAG0A; Wed,\n\t24 Sep 2014 09:22:07 GMT;\n\tUgBFADoAIABbAGQAcABkAGsALQBkAGUAdgBdACAAWwBQAEEAVABDAEgAIAB2ADMAXQAgAHYAaQByAHQAaQBvADoAIABTAHUAcABwAG8AcgB0ACAAbQBlAHIAZwBlAGEAYgBsAGUAIABiAHUAZgBmAGUAcgAgAGkAbgAgAHYAaQByAHQAaQBvACAAcABtAGQA",
        "x-cr-puzzleid": "{68AB0A3F-B2F9-49FC-8A72-4B40C3A15EE1}",
        "x-originating-ip": "[10.239.127.40]",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "quoted-printable",
        "MIME-Version": "1.0",
        "Subject": "Re: [dpdk-dev] [PATCH v3] virtio: Support mergeable buffer in\n\tvirtio pmd",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Tested-by: Jingguo Fu <jingguox.fu at intel.com>\n\nThis patch includes 1 files, and has been tested by Intel.\nPlease see information as the following:\n\nHost:\nFedora 19 x86_64, Linux Kernel 3.9.0, GCC 4.8.2  Intel Xeon CPU E5-2680 v2 @ 2.80GHz\n NIC: Intel Niantic 82599, Intel i350, Intel 82580 and Intel 82576\n\nGuest:\nFedora 16 x86_64, Linux Kernel 3.4.2, GCC 4.6.3 Qemu emulator 1.4.2\n\nThis patch tests with user space vhost driver library patch.\nWe verified zero copy and one copy test cases for functional and performance.\n\nThis patch depend on the two patches:\n[dpdk-dev] [PATCH] virtio: Update max RX packet length http://www.dpdk.org/ml/archives/dev/2014-September/005107.html\n[dpdk-dev] [PATCH] virtio: Fix vring entry number issue http://www.dpdk.org/ml/archives/dev/2014-September/005170.html\n\n\n-----Original Message-----\nFrom: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ouyang Changchun\nSent: Thursday, August 14, 2014 16:55\nTo: dev@dpdk.org\nSubject: [dpdk-dev] [PATCH v3] virtio: Support mergeable buffer in virtio pmd\n\nv3 change:\n- Investigate the comments from Huawei and fix one potential issue of wrong offset to\n  the number of descriptor in buffer; also fix other tiny comments.\n\nv2 change:\n- Resolve conflicts with the tip code;\n- And resolve 2 issues:\n   -- fix mbuf leak when discard an uncompleted packet.\n   -- refine pkt.data to point to actual payload data start point.\n \nv1 change:\n- This patch supports mergeable buffer feature in DPDK based virtio PMD, which can\n  receive jumbo frame with larger size, like 3K, 4K or even 9K.\n\nSigned-off-by: Changchun Ouyang <changchun.ouyang@intel.com>\nAcked-by: Huawei Xie <huawei.xie@intel.com>\n---\n lib/librte_pmd_virtio/virtio_ethdev.c |  20 +--\n lib/librte_pmd_virtio/virtio_ethdev.h |   3 +\n lib/librte_pmd_virtio/virtio_rxtx.c   | 221 +++++++++++++++++++++++++++++-----\n 3 files changed, 207 insertions(+), 37 deletions(-)",
    "diff": "diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c b/lib/librte_pmd_virtio/virtio_ethdev.c\nindex b9f5529..535d798 100644\n--- a/lib/librte_pmd_virtio/virtio_ethdev.c\n+++ b/lib/librte_pmd_virtio/virtio_ethdev.c\n@@ -337,7 +337,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_tvq%d_hdrzone\",\n \t\t\tdev->data->port_id, queue_idx);\n \t\tvq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,\n-\t\t\tvq_size * sizeof(struct virtio_net_hdr),\n+\t\t\tvq_size * hw->vtnet_hdr_size,\n \t\t\tsocket_id, 0, CACHE_LINE_SIZE);\n \t\tif (vq->virtio_net_hdr_mz == NULL) {\n \t\t\trte_free(vq);\n@@ -346,7 +346,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\tvq->virtio_net_hdr_mem =\n \t\t\tvq->virtio_net_hdr_mz->phys_addr;\n \t\tmemset(vq->virtio_net_hdr_mz->addr, 0,\n-\t\t\tvq_size * sizeof(struct virtio_net_hdr));\n+\t\t\tvq_size * hw->vtnet_hdr_size);\n \t} else if (queue_type == VTNET_CQ) {\n \t\t/* Allocate a page for control vq command, data and status */\n \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_cvq_hdrzone\",\n@@ -571,9 +571,6 @@ virtio_negotiate_features(struct virtio_hw *hw)\n \tmask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;\n \tmask |= VTNET_LRO_FEATURES;\n \n-\t/* rx_mbuf should not be in multiple merged segments */\n-\tmask |= VIRTIO_NET_F_MRG_RXBUF;\n-\n \t/* not negotiating INDIRECT descriptor table support */\n \tmask |= VIRTIO_RING_F_INDIRECT_DESC;\n \n@@ -746,7 +743,6 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,\n \t}\n \n \teth_dev->dev_ops = &virtio_eth_dev_ops;\n-\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n \teth_dev->tx_pkt_burst = &virtio_xmit_pkts;\n \n \tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n@@ -801,10 +797,13 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,\n \tvirtio_negotiate_features(hw);\n \n \t/* Setting up rx_header size for the device */\n-\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))\n+\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n+\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n-\telse\n+\t} else {\n+\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);\n+\t}\n \n \t/* Allocate memory for storing MAC addresses */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"virtio\", ETHER_ADDR_LEN, 0);\n@@ -1009,7 +1008,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \n \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n \t\t\t\t\tdev->data->rx_queues[i])) != NULL) {\n-\t\t\trte_pktmbuf_free_seg(buf);\n+\t\t\trte_pktmbuf_free(buf);\n \t\t\tmbuf_num++;\n \t\t}\n \n@@ -1028,7 +1027,8 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \t\tmbuf_num = 0;\n \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n \t\t\t\t\tdev->data->tx_queues[i])) != NULL) {\n-\t\t\trte_pktmbuf_free_seg(buf);\n+\t\t\trte_pktmbuf_free(buf);\n+\n \t\t\tmbuf_num++;\n \t\t}\n \ndiff --git a/lib/librte_pmd_virtio/virtio_ethdev.h b/lib/librte_pmd_virtio/virtio_ethdev.h\nindex 858e644..d2e1eed 100644\n--- a/lib/librte_pmd_virtio/virtio_ethdev.h\n+++ b/lib/librte_pmd_virtio/virtio_ethdev.h\n@@ -104,6 +104,9 @@ int  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \n+uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\n \ndiff --git a/lib/librte_pmd_virtio/virtio_rxtx.c b/lib/librte_pmd_virtio/virtio_rxtx.c\nindex fcd8bd1..0b10108 100644\n--- a/lib/librte_pmd_virtio/virtio_rxtx.c\n+++ b/lib/librte_pmd_virtio/virtio_rxtx.c\n@@ -146,6 +146,7 @@ static inline int\n virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n {\n \tstruct vq_desc_extra *dxp;\n+\tstruct virtio_hw *hw = vq->hw;\n \tstruct vring_desc *start_dp;\n \tuint16_t needed = 1;\n \tuint16_t head_idx, idx;\n@@ -165,9 +166,11 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n \tdxp->ndescs = needed;\n \n \tstart_dp = vq->vq_ring.desc;\n-\tstart_dp[idx].addr  =\n-\t\t(uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));\n-\tstart_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);\n+\tstart_dp[idx].addr =\n+\t\t(uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM\n+\t\t- hw->vtnet_hdr_size);\n+\tstart_dp[idx].len =\n+\t\tcookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;\n \tstart_dp[idx].flags =  VRING_DESC_F_WRITE;\n \tidx = start_dp[idx].next;\n \tvq->vq_desc_head_idx = idx;\n@@ -184,8 +187,10 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)\n {\n \tstruct vq_desc_extra *dxp;\n \tstruct vring_desc *start_dp;\n-\tuint16_t needed = 2;\n+\tuint16_t seg_num = cookie->pkt.nb_segs;\n+\tuint16_t needed = 1 + seg_num;\n \tuint16_t head_idx, idx;\n+\tuint16_t head_size = txvq->hw->vtnet_hdr_size;\n \n \tif (unlikely(txvq->vq_free_cnt == 0))\n \t\treturn -ENOSPC;\n@@ -198,19 +203,25 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)\n \tidx = head_idx;\n \tdxp = &txvq->vq_descx[idx];\n \tif (dxp->cookie != NULL)\n-\t\trte_pktmbuf_free_seg(dxp->cookie);\n+\t\trte_pktmbuf_free(dxp->cookie);\n \tdxp->cookie = (void *)cookie;\n \tdxp->ndescs = needed;\n \n \tstart_dp = txvq->vq_ring.desc;\n-\tstart_dp[idx].addr  =\n-\t\ttxvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);\n-\tstart_dp[idx].len   = sizeof(struct virtio_net_hdr);\n+\tstart_dp[idx].addr =\n+\t\ttxvq->virtio_net_hdr_mem + idx * head_size;\n+\tstart_dp[idx].len = (uint32_t)head_size;\n \tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n-\tidx = start_dp[idx].next;\n-\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n-\tstart_dp[idx].len   = cookie->pkt.data_len;\n-\tstart_dp[idx].flags = 0;\n+\n+\tfor (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {\n+\t\tidx = start_dp[idx].next;\n+\t\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n+\t\tstart_dp[idx].len   = cookie->pkt.data_len;\n+\t\tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n+\t\tcookie = cookie->pkt.next;\n+\t}\n+\n+\tstart_dp[idx].flags &= ~VRING_DESC_F_NEXT;\n \tidx = start_dp[idx].next;\n \ttxvq->vq_desc_head_idx = idx;\n \tif (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n@@ -284,7 +295,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n \n \t\t\tif (error) {\n-\t\t\t\trte_pktmbuf_free_seg(m);\n+\t\t\t\trte_pktmbuf_free(m);\n \t\t\t\tbreak;\n \t\t\t}\n \t\t\tnbufs++;\n@@ -423,7 +434,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)\n \terror = virtqueue_enqueue_recv_refill(vq, m);\n \tif (unlikely(error)) {\n \t\tRTE_LOG(ERR, PMD, \"cannot requeue discarded mbuf\");\n-\t\trte_pktmbuf_free_seg(m);\n+\t\trte_pktmbuf_free(m);\n \t}\n }\n \n@@ -433,13 +444,13 @@ uint16_t\n virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n {\n \tstruct virtqueue *rxvq = rx_queue;\n-\tstruct virtio_hw *hw = rxvq->hw;\n \tstruct rte_mbuf *rxm, *new_mbuf;\n \tuint16_t nb_used, num, nb_rx = 0;\n \tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n \tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n \tint error;\n \tuint32_t i, nb_enqueued = 0;\n+\tconst uint32_t hdr_size = sizeof(struct virtio_net_hdr);\n \n \tnb_used = VIRTQUEUE_NUSED(rxvq);\n \n@@ -460,8 +471,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \n \t\tPMD_RX_LOG(DEBUG, \"packet len:%d\", len[i]);\n \n-\t\tif (unlikely(len[i]\n-\t\t\t     < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {\n+\t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n \t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n@@ -471,17 +481,16 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \n \t\trxm->pkt.in_port = rxvq->port_id;\n \t\trxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;\n+\n \t\trxm->pkt.nb_segs = 1;\n \t\trxm->pkt.next = NULL;\n-\t\trxm->pkt.pkt_len  = (uint32_t)(len[i]\n-\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n-\t\trxm->pkt.data_len = (uint16_t)(len[i]\n-\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n+\t\trxm->pkt.pkt_len = (uint32_t)(len[i] - hdr_size);\n+\t\trxm->pkt.data_len = (uint16_t)(len[i] - hdr_size);\n \n \t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n \n \t\trx_pkts[nb_rx++] = rxm;\n-\t\trxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);\n+\t\trxvq->bytes += rx_pkts[nb_rx - 1]->pkt.pkt_len;\n \t}\n \n \trxvq->packets += nb_rx;\n@@ -498,11 +507,165 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t}\n \t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n \t\tif (unlikely(error)) {\n-\t\t\trte_pktmbuf_free_seg(new_mbuf);\n+\t\t\trte_pktmbuf_free(new_mbuf);\n \t\t\tbreak;\n \t\t}\n \t\tnb_enqueued++;\n \t}\n+\n+\tif (likely(nb_enqueued)) {\n+\t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n+\t\t\tvirtqueue_notify(rxvq);\n+\t\t\tPMD_RX_LOG(DEBUG, \"Notified\\n\");\n+\t\t}\n+\t}\n+\n+\tvq_update_avail_idx(rxvq);\n+\n+\treturn nb_rx;\n+}\n+\n+uint16_t\n+virtio_recv_mergeable_pkts(void *rx_queue,\n+\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct virtqueue *rxvq = rx_queue;\n+\tstruct rte_mbuf *rxm, *new_mbuf;\n+\tuint16_t nb_used, num, nb_rx = 0;\n+\tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *prev;\n+\tint error;\n+\tuint32_t i = 0, nb_enqueued = 0;\n+\tuint32_t seg_num = 0;\n+\tuint16_t extra_idx = 0;\n+\tuint32_t seg_res = 0;\n+\tconst uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n+\n+\tnb_used = VIRTQUEUE_NUSED(rxvq);\n+\n+\trmb();\n+\n+\tif (nb_used == 0)\n+\t\treturn 0;\n+\n+\tPMD_RX_LOG(DEBUG, \"used:%d\\n\", nb_used);\n+\n+\twhile (i < nb_used) {\n+\t\tstruct virtio_net_hdr_mrg_rxbuf *header;\n+\n+\t\tif (nb_rx == nb_pkts)\n+\t\t\tbreak;\n+\n+\t\tnum = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);\n+\t\tif (num != 1)\n+\t\t\tcontinue;\n+\n+\t\ti++;\n+\n+\t\tPMD_RX_LOG(DEBUG, \"dequeue:%d\\n\", num);\n+\t\tPMD_RX_LOG(DEBUG, \"packet len:%d\\n\", len[0]);\n+\n+\t\trxm = rcv_pkts[0];\n+\n+\t\tif (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {\n+\t\t\tPMD_RX_LOG(ERR, \"Packet drop\\n\");\n+\t\t\tnb_enqueued++;\n+\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n+\t\t\trxvq->errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\theader = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +\n+\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size);\n+\t\tseg_num = header->num_buffers;\n+\n+\t\tif (seg_num == 0)\n+\t\t\tseg_num = 1;\n+\n+\t\trxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;\n+\t\trxm->pkt.nb_segs = seg_num;\n+\t\trxm->pkt.next = NULL;\n+\t\trxm->pkt.pkt_len = (uint32_t)(len[0] - hdr_size);\n+\t\trxm->pkt.data_len = (uint16_t)(len[0] - hdr_size);\n+\n+\t\trxm->pkt.in_port = rxvq->port_id;\n+\t\trx_pkts[nb_rx] = rxm;\n+\t\tprev = rxm;\n+\n+\t\tseg_res = seg_num - 1;\n+\n+\t\twhile (seg_res != 0) {\n+\t\t\t/*\n+\t\t\t * Get extra segments for current uncompleted packet.\n+\t\t\t */\n+\t\t\tuint32_t  rcv_cnt =\n+\t\t\t\tRTE_MIN(seg_res, RTE_DIM(rcv_pkts));\n+\t\t\tif (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {\n+\t\t\t\tuint32_t rx_num =\n+\t\t\t\t\tvirtqueue_dequeue_burst_rx(rxvq,\n+\t\t\t\t\trcv_pkts, len, rcv_cnt);\n+\t\t\t\ti += rx_num;\n+\t\t\t\trcv_cnt = rx_num;\n+\t\t\t} else {\n+\t\t\t\tPMD_RX_LOG(ERR,\n+\t\t\t\t\t\"No enough segments for packet.\\n\");\n+\t\t\t\tnb_enqueued++;\n+\t\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n+\t\t\t\trxvq->errors++;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\textra_idx = 0;\n+\n+\t\t\twhile (extra_idx < rcv_cnt) {\n+\t\t\t\trxm = rcv_pkts[extra_idx];\n+\n+\t\t\t\trxm->pkt.data =\n+\t\t\t\t\t(char *)rxm->buf_addr +\n+\t\t\t\t\tRTE_PKTMBUF_HEADROOM - hdr_size;\n+\t\t\t\trxm->pkt.next = NULL;\n+\t\t\t\trxm->pkt.pkt_len = (uint32_t)(len[extra_idx]);\n+\t\t\t\trxm->pkt.data_len = (uint16_t)(len[extra_idx]);\n+\n+\t\t\t\tif (prev)\n+\t\t\t\t\tprev->pkt.next = rxm;\n+\n+\t\t\t\tprev = rxm;\n+\t\t\t\trx_pkts[nb_rx]->pkt.pkt_len += rxm->pkt.pkt_len;\n+\t\t\t\textra_idx++;\n+\t\t\t};\n+\t\t\tseg_res -= rcv_cnt;\n+\t\t}\n+\n+\t\tVIRTIO_DUMP_PACKET(rx_pkts[nb_rx],\n+\t\t\trx_pkts[nb_rx]->pkt.data_len);\n+\n+\t\trxvq->bytes += rx_pkts[nb_rx]->pkt.pkt_len;\n+\t\tnb_rx++;\n+\t}\n+\n+\trxvq->packets += nb_rx;\n+\n+\t/* Allocate new mbuf for the used descriptor */\n+\terror = ENOSPC;\n+\twhile (likely(!virtqueue_full(rxvq))) {\n+\t\tnew_mbuf = rte_rxmbuf_alloc(rxvq->mpool);\n+\t\tif (unlikely(new_mbuf == NULL)) {\n+\t\t\tstruct rte_eth_dev *dev\n+\t\t\t\t= &rte_eth_devices[rxvq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n+\t\tif (unlikely(error)) {\n+\t\t\trte_pktmbuf_free(new_mbuf);\n+\t\t\tbreak;\n+\t\t}\n+\t\tnb_enqueued++;\n+\t}\n+\n \tif (likely(nb_enqueued)) {\n \t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n \t\t\tvirtqueue_notify(rxvq);\n@@ -536,12 +699,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \tnum = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);\n \n \twhile (nb_tx < nb_pkts) {\n-\t\tif (virtqueue_full(txvq) && num) {\n+\t\tint need = tx_pkts[nb_tx]->pkt.nb_segs - txvq->vq_free_cnt;\n+\t\tint deq_cnt = RTE_MIN(need, (int)num);\n+\n+\t\tnum -= (deq_cnt > 0) ? deq_cnt : 0;\n+\t\twhile (deq_cnt > 0) {\n \t\t\tvirtqueue_dequeue_pkt_tx(txvq);\n-\t\t\tnum--;\n+\t\t\tdeq_cnt--;\n \t\t}\n \n-\t\tif (!virtqueue_full(txvq)) {\n+\t\tif (tx_pkts[nb_tx]->pkt.nb_segs <= txvq->vq_free_cnt) {\n \t\t\ttxm = tx_pkts[nb_tx];\n \t\t\t/* Enqueue Packet buffers */\n \t\t\terror = virtqueue_enqueue_xmit(txvq, txm);\n@@ -555,7 +722,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t\tbreak;\n \t\t\t}\n \t\t\tnb_tx++;\n-\t\t\ttxvq->bytes += txm->pkt.data_len;\n+\t\t\ttxvq->bytes += txm->pkt.pkt_len;\n \t\t} else {\n \t\t\tPMD_TX_LOG(ERR, \"No free tx descriptors to transmit\");\n \t\t\tbreak;\n",
    "prefixes": [
        "dpdk-dev",
        "v3"
    ]
}