Show a patch.

GET /api/patches/96/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96,
    "url": "https://patches.dpdk.org/api/patches/96/",
    "web_url": "https://patches.dpdk.org/patch/96/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk"
    },
    "msgid": "<1406268195-24010-1-git-send-email-changchun.ouyang@intel.com>",
    "date": "2014-07-25T06:03:15",
    "name": "[dpdk-dev,v2] virtio: Support mergeable buffer in virtio PMD",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "43dcc213a5da86b25d17b7cc21e8846ce4712a17",
    "submitter": {
        "id": 31,
        "url": "https://patches.dpdk.org/api/people/31/",
        "name": "Ouyang Changchun",
        "email": "changchun.ouyang@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/patch/96/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/96/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/96/checks/",
    "tags": {},
    "headers": {
        "X-Mailman-Version": "2.1.15",
        "Received": [
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 18728592E\n\tfor <dev@dpdk.org>; Fri, 25 Jul 2014 08:01:53 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga101.fm.intel.com with ESMTP; 24 Jul 2014 23:03:24 -0700",
            "from shvmail01.sh.intel.com ([10.239.29.42])\n\tby fmsmga001.fm.intel.com with ESMTP; 24 Jul 2014 23:03:23 -0700",
            "from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com\n\t[10.239.29.89])\n\tby shvmail01.sh.intel.com with ESMTP id s6P63LLI010941;\n\tFri, 25 Jul 2014 14:03:21 +0800",
            "from shecgisg004.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid s6P63H6T024111; Fri, 25 Jul 2014 14:03:19 +0800",
            "(from couyang@localhost)\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s6P63Htu024107; \n\tFri, 25 Jul 2014 14:03:17 +0800"
        ],
        "From": "Ouyang Changchun <changchun.ouyang@intel.com>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "Message-Id": "<1406268195-24010-1-git-send-email-changchun.ouyang@intel.com>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "Date": "Fri, 25 Jul 2014 14:03:15 +0800",
        "Subject": "[dpdk-dev] [PATCH v2] virtio: Support mergeable buffer in virtio PMD",
        "Precedence": "list",
        "X-List-Received-Date": "Fri, 25 Jul 2014 06:01:55 -0000",
        "X-BeenThere": "dev@dpdk.org",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "Return-Path": "<couyang@shecgisg004.sh.intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "X-ExtLoop1": "1",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "To": "dev@dpdk.org",
        "X-IronPort-AV": "E=Sophos;i=\"5.01,728,1400050800\"; d=\"scan'208\";a=\"567027626\""
    },
    "content": "v2 change: \n- Resolve conflicts wiht the tip code; \n- And resolve 2 issues: \n   -- fix mbuf leak when discard a uncompleted packet.\n   -- refine pkt.data to point to actual payload data start point.  \n\nv1 change: \nThis patch supports mergeable buffer feature in DPDK based virtio PMD, which can\nreceive jumbo frame with larger size, like 3K, 4K or even 9K.\n\nSigned-off-by: Changchun Ouyang <changchun.ouyang@intel.com>\n---\n lib/librte_pmd_virtio/virtio_ethdev.c |  20 ++--\n lib/librte_pmd_virtio/virtio_ethdev.h |   3 +\n lib/librte_pmd_virtio/virtio_rxtx.c   | 206 +++++++++++++++++++++++++++++-----\n 3 files changed, 194 insertions(+), 35 deletions(-)",
    "diff": "diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c b/lib/librte_pmd_virtio/virtio_ethdev.c\nindex b9f5529..535d798 100644\n--- a/lib/librte_pmd_virtio/virtio_ethdev.c\n+++ b/lib/librte_pmd_virtio/virtio_ethdev.c\n@@ -337,7 +337,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_tvq%d_hdrzone\",\n \t\t\tdev->data->port_id, queue_idx);\n \t\tvq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,\n-\t\t\tvq_size * sizeof(struct virtio_net_hdr),\n+\t\t\tvq_size * hw->vtnet_hdr_size,\n \t\t\tsocket_id, 0, CACHE_LINE_SIZE);\n \t\tif (vq->virtio_net_hdr_mz == NULL) {\n \t\t\trte_free(vq);\n@@ -346,7 +346,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\tvq->virtio_net_hdr_mem =\n \t\t\tvq->virtio_net_hdr_mz->phys_addr;\n \t\tmemset(vq->virtio_net_hdr_mz->addr, 0,\n-\t\t\tvq_size * sizeof(struct virtio_net_hdr));\n+\t\t\tvq_size * hw->vtnet_hdr_size);\n \t} else if (queue_type == VTNET_CQ) {\n \t\t/* Allocate a page for control vq command, data and status */\n \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_cvq_hdrzone\",\n@@ -571,9 +571,6 @@ virtio_negotiate_features(struct virtio_hw *hw)\n \tmask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;\n \tmask |= VTNET_LRO_FEATURES;\n \n-\t/* rx_mbuf should not be in multiple merged segments */\n-\tmask |= VIRTIO_NET_F_MRG_RXBUF;\n-\n \t/* not negotiating INDIRECT descriptor table support */\n \tmask |= VIRTIO_RING_F_INDIRECT_DESC;\n \n@@ -746,7 +743,6 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,\n \t}\n \n \teth_dev->dev_ops = &virtio_eth_dev_ops;\n-\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n \teth_dev->tx_pkt_burst = &virtio_xmit_pkts;\n \n \tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n@@ -801,10 +797,13 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,\n \tvirtio_negotiate_features(hw);\n \n \t/* Setting up rx_header size for the device */\n-\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))\n+\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n+\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n-\telse\n+\t} else {\n+\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);\n+\t}\n \n \t/* Allocate memory for storing MAC addresses */\n \teth_dev->data->mac_addrs = rte_zmalloc(\"virtio\", ETHER_ADDR_LEN, 0);\n@@ -1009,7 +1008,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \n \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n \t\t\t\t\tdev->data->rx_queues[i])) != NULL) {\n-\t\t\trte_pktmbuf_free_seg(buf);\n+\t\t\trte_pktmbuf_free(buf);\n \t\t\tmbuf_num++;\n \t\t}\n \n@@ -1028,7 +1027,8 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \t\tmbuf_num = 0;\n \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n \t\t\t\t\tdev->data->tx_queues[i])) != NULL) {\n-\t\t\trte_pktmbuf_free_seg(buf);\n+\t\t\trte_pktmbuf_free(buf);\n+\n \t\t\tmbuf_num++;\n \t\t}\n \ndiff --git a/lib/librte_pmd_virtio/virtio_ethdev.h b/lib/librte_pmd_virtio/virtio_ethdev.h\nindex 858e644..d2e1eed 100644\n--- a/lib/librte_pmd_virtio/virtio_ethdev.h\n+++ b/lib/librte_pmd_virtio/virtio_ethdev.h\n@@ -104,6 +104,9 @@ int  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \n+uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\n \ndiff --git a/lib/librte_pmd_virtio/virtio_rxtx.c b/lib/librte_pmd_virtio/virtio_rxtx.c\nindex fcd8bd1..3d81b34 100644\n--- a/lib/librte_pmd_virtio/virtio_rxtx.c\n+++ b/lib/librte_pmd_virtio/virtio_rxtx.c\n@@ -146,6 +146,7 @@ static inline int\n virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n {\n \tstruct vq_desc_extra *dxp;\n+\tstruct virtio_hw *hw = vq->hw;\n \tstruct vring_desc *start_dp;\n \tuint16_t needed = 1;\n \tuint16_t head_idx, idx;\n@@ -165,9 +166,11 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n \tdxp->ndescs = needed;\n \n \tstart_dp = vq->vq_ring.desc;\n-\tstart_dp[idx].addr  =\n-\t\t(uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));\n-\tstart_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);\n+\tstart_dp[idx].addr =\n+\t\t(uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM\n+\t\t- hw->vtnet_hdr_size);\n+\tstart_dp[idx].len =\n+\t\tcookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;\n \tstart_dp[idx].flags =  VRING_DESC_F_WRITE;\n \tidx = start_dp[idx].next;\n \tvq->vq_desc_head_idx = idx;\n@@ -184,8 +187,10 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)\n {\n \tstruct vq_desc_extra *dxp;\n \tstruct vring_desc *start_dp;\n-\tuint16_t needed = 2;\n+\tuint16_t seg_num = cookie->pkt.nb_segs;\n+\tuint16_t needed = 1 + seg_num;\n \tuint16_t head_idx, idx;\n+\tuint16_t head_size = txvq->hw->vtnet_hdr_size;\n \n \tif (unlikely(txvq->vq_free_cnt == 0))\n \t\treturn -ENOSPC;\n@@ -198,19 +203,25 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)\n \tidx = head_idx;\n \tdxp = &txvq->vq_descx[idx];\n \tif (dxp->cookie != NULL)\n-\t\trte_pktmbuf_free_seg(dxp->cookie);\n+\t\trte_pktmbuf_free(dxp->cookie);\n \tdxp->cookie = (void *)cookie;\n \tdxp->ndescs = needed;\n \n \tstart_dp = txvq->vq_ring.desc;\n-\tstart_dp[idx].addr  =\n-\t\ttxvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);\n-\tstart_dp[idx].len   = sizeof(struct virtio_net_hdr);\n+\tstart_dp[idx].addr =\n+\t\ttxvq->virtio_net_hdr_mem + idx * head_size;\n+\tstart_dp[idx].len = (uint32_t)head_size;\n \tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n-\tidx = start_dp[idx].next;\n-\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n-\tstart_dp[idx].len   = cookie->pkt.data_len;\n-\tstart_dp[idx].flags = 0;\n+\n+\tfor (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {\n+\t\tidx = start_dp[idx].next;\n+\t\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n+\t\tstart_dp[idx].len   = cookie->pkt.data_len;\n+\t\tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n+\t\tcookie = cookie->pkt.next;\n+\t}\n+\n+\tstart_dp[idx].flags &= ~VRING_DESC_F_NEXT;\n \tidx = start_dp[idx].next;\n \ttxvq->vq_desc_head_idx = idx;\n \tif (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n@@ -284,7 +295,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n \n \t\t\tif (error) {\n-\t\t\t\trte_pktmbuf_free_seg(m);\n+\t\t\t\trte_pktmbuf_free(m);\n \t\t\t\tbreak;\n \t\t\t}\n \t\t\tnbufs++;\n@@ -423,7 +434,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)\n \terror = virtqueue_enqueue_recv_refill(vq, m);\n \tif (unlikely(error)) {\n \t\tRTE_LOG(ERR, PMD, \"cannot requeue discarded mbuf\");\n-\t\trte_pktmbuf_free_seg(m);\n+\t\trte_pktmbuf_free(m);\n \t}\n }\n \n@@ -471,17 +482,158 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \n \t\trxm->pkt.in_port = rxvq->port_id;\n \t\trxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;\n+\n \t\trxm->pkt.nb_segs = 1;\n \t\trxm->pkt.next = NULL;\n-\t\trxm->pkt.pkt_len  = (uint32_t)(len[i]\n-\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n-\t\trxm->pkt.data_len = (uint16_t)(len[i]\n-\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n+\t\trxm->pkt.pkt_len = (uint32_t)(len[i] - hw->vtnet_hdr_size);\n+\t\trxm->pkt.data_len = (uint16_t)(len[i] - hw->vtnet_hdr_size);\n \n \t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n \n \t\trx_pkts[nb_rx++] = rxm;\n-\t\trxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);\n+\t\trxvq->bytes += rx_pkts[nb_rx - 1]->pkt.pkt_len;\n+\t}\n+\n+\trxvq->packets += nb_rx;\n+\n+\t/* Allocate new mbuf for the used descriptor */\n+\terror = ENOSPC;\n+\twhile (likely(!virtqueue_full(rxvq))) {\n+\t\tnew_mbuf = rte_rxmbuf_alloc(rxvq->mpool);\n+\t\tif (unlikely(new_mbuf == NULL)) {\n+\t\t\tstruct rte_eth_dev *dev\n+\t\t\t\t= &rte_eth_devices[rxvq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n+\t\tif (unlikely(error)) {\n+\t\t\trte_pktmbuf_free(new_mbuf);\n+\t\t\tbreak;\n+\t\t}\n+\t\tnb_enqueued++;\n+\t}\n+\n+\tif (likely(nb_enqueued)) {\n+\t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n+\t\t\tvirtqueue_notify(rxvq);\n+\t\t\tPMD_RX_LOG(DEBUG, \"Notified\\n\");\n+\t\t}\n+\t}\n+\n+\tvq_update_avail_idx(rxvq);\n+\n+\treturn nb_rx;\n+}\n+\n+uint16_t\n+virtio_recv_mergeable_pkts(void *rx_queue,\n+\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct virtqueue *rxvq = rx_queue;\n+\tstruct virtio_hw *hw = rxvq->hw;\n+\tstruct rte_mbuf *rxm, *new_mbuf;\n+\tuint16_t nb_used, num, nb_rx = 0;\n+\tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n+\tstruct rte_mbuf *prev;\n+\tint error;\n+\tuint32_t i = 0, nb_enqueued = 0;\n+\tuint32_t seg_num = 0;\n+\tuint16_t extra_idx = 0;\n+\n+\tnb_used = VIRTQUEUE_NUSED(rxvq);\n+\n+\trmb();\n+\n+\tif (nb_used == 0)\n+\t\treturn 0;\n+\n+\twhile (i < nb_used) {\n+\t\tstruct virtio_net_hdr_mrg_rxbuf *header;\n+\t\tchar *head_ptr;\n+\n+\t\tif (nb_rx >= nb_pkts)\n+\t\t\tbreak;\n+\n+\t\tnum = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);\n+\t\ti += num;\n+\n+\t\tPMD_RX_LOG(DEBUG, \"used:%d dequeue:%d\\n\", nb_used, num);\n+\t\tPMD_RX_LOG(DEBUG, \"packet len:%d\\n\", len[i]);\n+\n+\t\trxm = rcv_pkts[0];\n+\t\textra_idx = 0;\n+\n+\t\tif (unlikely(len[0]\n+\t\t\t     < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {\n+\t\t\tPMD_RX_LOG(ERR, \"Packet drop\\n\");\n+\t\t\tnb_enqueued++;\n+\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n+\t\t\trxvq->errors++;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\thead_ptr = (char *)rxm->pkt.data;\n+\t\thead_ptr -= hw->vtnet_hdr_size;\n+\t\theader = (struct virtio_net_hdr_mrg_rxbuf *)head_ptr;\n+\t\tseg_num = header->num_buffers;\n+\n+\t\tif (seg_num == 0)\n+\t\t\tseg_num = 1;\n+\n+\t\trxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;\n+\t\trxm->pkt.nb_segs = seg_num;\n+\t\trxm->pkt.next = NULL;\n+\t\trxm->pkt.pkt_len = (uint32_t)(len[0] - hw->vtnet_hdr_size);\n+\t\trxm->pkt.data_len = (uint16_t)(len[0] - hw->vtnet_hdr_size);\n+\n+\t\trxm->pkt.in_port = rxvq->port_id;\n+\t\trx_pkts[nb_rx] = rxm;\n+\n+\t\tprev = rxm;\n+\n+\t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n+\n+\t\t/*\n+\t\t * Get extra segments for current uncompleted packet.\n+\t\t */\n+\t\tif (VIRTQUEUE_NUSED(rxvq) >= seg_num - 1) {\n+\t\t\tuint32_t rx_num = virtqueue_dequeue_burst_rx(rxvq,\n+\t\t\t\trcv_pkts, len, seg_num - 1);\n+\t\t\ti += rx_num;\n+\t\t} else {\n+\t\t\tPMD_RX_LOG(ERR, \"No enough segments for packet.\\n\");\n+\t\t\tnb_enqueued++;\n+\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n+\t\t\trxvq->errors++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\twhile (extra_idx < seg_num - 1) {\n+\t\t\trxm = rcv_pkts[extra_idx];\n+\n+\t\t\trxm->pkt.data =\n+\t\t\t\t(char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM\n+\t\t\t\t- hw->vtnet_hdr_size;\n+\t\t\trxm->pkt.next = NULL;\n+\t\t\trxm->pkt.pkt_len = (uint32_t)(len[extra_idx]);\n+\t\t\trxm->pkt.data_len = (uint16_t)(len[extra_idx]);\n+\n+\t\t\tif (prev)\n+\t\t\t\tprev->pkt.next = rxm;\n+\n+\t\t\tprev = rxm;\n+\t\t\trx_pkts[nb_rx]->pkt.pkt_len += rxm->pkt.pkt_len;\n+\n+\t\t\textra_idx++;\n+\n+\t\t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n+\t\t};\n+\n+\t\trxvq->bytes += rx_pkts[nb_rx]->pkt.pkt_len;\n+\t\tnb_rx++;\n \t}\n \n \trxvq->packets += nb_rx;\n@@ -498,11 +650,12 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t}\n \t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n \t\tif (unlikely(error)) {\n-\t\t\trte_pktmbuf_free_seg(new_mbuf);\n+\t\t\trte_pktmbuf_free(new_mbuf);\n \t\t\tbreak;\n \t\t}\n \t\tnb_enqueued++;\n \t}\n+\n \tif (likely(nb_enqueued)) {\n \t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n \t\t\tvirtqueue_notify(rxvq);\n@@ -536,12 +689,15 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \tnum = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);\n \n \twhile (nb_tx < nb_pkts) {\n-\t\tif (virtqueue_full(txvq) && num) {\n-\t\t\tvirtqueue_dequeue_pkt_tx(txvq);\n-\t\t\tnum--;\n+\t\tint need = tx_pkts[nb_tx]->pkt.nb_segs - txvq->vq_free_cnt;\n+\t\tif ((need > 0) && (num > 0)) {\n+\t\t\tdo {\n+\t\t\t\tvirtqueue_dequeue_pkt_tx(txvq);\n+\t\t\t\tnum--;\n+\t\t\t} while (num > 0);\n \t\t}\n \n-\t\tif (!virtqueue_full(txvq)) {\n+\t\tif (tx_pkts[nb_tx]->pkt.nb_segs <= txvq->vq_free_cnt) {\n \t\t\ttxm = tx_pkts[nb_tx];\n \t\t\t/* Enqueue Packet buffers */\n \t\t\terror = virtqueue_enqueue_xmit(txvq, txm);\n@@ -555,7 +711,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t\tbreak;\n \t\t\t}\n \t\t\tnb_tx++;\n-\t\t\ttxvq->bytes += txm->pkt.data_len;\n+\t\t\ttxvq->bytes += txm->pkt.pkt_len;\n \t\t} else {\n \t\t\tPMD_TX_LOG(ERR, \"No free tx descriptors to transmit\");\n \t\t\tbreak;\n",
    "prefixes": [
        "dpdk-dev",
        "v2"
    ]
}