List comments

GET /api/patches/96/comments/?order=-id
HTTP 200 OK
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

[
    {
        "id": 247,
        "web_url": "https://patches.dpdk.org/comment/247/",
        "msgid": "<C37D651A908B024F974696C65296B57B0F24595E@SHSMSX101.ccr.corp.intel.com>",
        "date": "2014-07-25T08:05:59",
        "subject": "Re: [dpdk-dev] [PATCH v2] virtio: Support mergeable buffer in\n\tvirtio PMD",
        "submitter": {
            "id": 16,
            "url": "https://patches.dpdk.org/api/people/16/",
            "name": "Huawei Xie",
            "email": "huawei.xie@intel.com"
        },
        "content": "> -----Original Message-----\n> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ouyang Changchun\n> Sent: Friday, July 25, 2014 2:03 PM\n> To: dev@dpdk.org\n> Subject: [dpdk-dev] [PATCH v2] virtio: Support mergeable buffer in virtio PMD\n> \n> v2 change:\n> - Resolve conflicts wiht the tip code;\n> - And resolve 2 issues:\n>    -- fix mbuf leak when discard a uncompleted packet.\n>    -- refine pkt.data to point to actual payload data start point.\n> \n> v1 change:\n> This patch supports mergeable buffer feature in DPDK based virtio PMD, which\n> can\n> receive jumbo frame with larger size, like 3K, 4K or even 9K.\n> \n> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>\n> ---\n>  lib/librte_pmd_virtio/virtio_ethdev.c |  20 ++--\n>  lib/librte_pmd_virtio/virtio_ethdev.h |   3 +\n>  lib/librte_pmd_virtio/virtio_rxtx.c   | 206 +++++++++++++++++++++++++++++---\n> --\n>  3 files changed, 194 insertions(+), 35 deletions(-)\n> \n> diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c\n> b/lib/librte_pmd_virtio/virtio_ethdev.c\n> index b9f5529..535d798 100644\n> --- a/lib/librte_pmd_virtio/virtio_ethdev.c\n> +++ b/lib/librte_pmd_virtio/virtio_ethdev.c\n> @@ -337,7 +337,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n>  \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_tvq%d_hdrzone\",\n>  \t\t\tdev->data->port_id, queue_idx);\n>  \t\tvq->virtio_net_hdr_mz =\n> rte_memzone_reserve_aligned(vq_name,\n> -\t\t\tvq_size * sizeof(struct virtio_net_hdr),\n> +\t\t\tvq_size * hw->vtnet_hdr_size,\n>  \t\t\tsocket_id, 0, CACHE_LINE_SIZE);\n>  \t\tif (vq->virtio_net_hdr_mz == NULL) {\n>  \t\t\trte_free(vq);\n> @@ -346,7 +346,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n>  \t\tvq->virtio_net_hdr_mem =\n>  \t\t\tvq->virtio_net_hdr_mz->phys_addr;\n>  \t\tmemset(vq->virtio_net_hdr_mz->addr, 0,\n> -\t\t\tvq_size * sizeof(struct virtio_net_hdr));\n> +\t\t\tvq_size * hw->vtnet_hdr_size);\n>  \t} else if (queue_type == VTNET_CQ) {\n>  \t\t/* Allocate a page for control vq command, data and status */\n>  \t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_cvq_hdrzone\",\n> @@ -571,9 +571,6 @@ virtio_negotiate_features(struct virtio_hw *hw)\n>  \tmask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 |\n> VIRTIO_NET_F_GUEST_ECN;\n>  \tmask |= VTNET_LRO_FEATURES;\n> \n> -\t/* rx_mbuf should not be in multiple merged segments */\n> -\tmask |= VIRTIO_NET_F_MRG_RXBUF;\n> -\n>  \t/* not negotiating INDIRECT descriptor table support */\n>  \tmask |= VIRTIO_RING_F_INDIRECT_DESC;\n> \n> @@ -746,7 +743,6 @@ eth_virtio_dev_init(__rte_unused struct eth_driver\n> *eth_drv,\n>  \t}\n> \n>  \teth_dev->dev_ops = &virtio_eth_dev_ops;\n> -\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n>  \teth_dev->tx_pkt_burst = &virtio_xmit_pkts;\n> \n>  \tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n> @@ -801,10 +797,13 @@ eth_virtio_dev_init(__rte_unused struct eth_driver\n> *eth_drv,\n>  \tvirtio_negotiate_features(hw);\n> \n>  \t/* Setting up rx_header size for the device */\n> -\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))\n> +\tif (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {\n> +\t\teth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;\n>  \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);\n> -\telse\n> +\t} else {\n> +\t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n>  \t\thw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);\n> +\t}\n> \n>  \t/* Allocate memory for storing MAC addresses */\n>  \teth_dev->data->mac_addrs = rte_zmalloc(\"virtio\", ETHER_ADDR_LEN,\n> 0);\n> @@ -1009,7 +1008,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev\n> *dev)\n> \n>  \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n>  \t\t\t\t\tdev->data->rx_queues[i])) != NULL) {\n> -\t\t\trte_pktmbuf_free_seg(buf);\n> +\t\t\trte_pktmbuf_free(buf);\n>  \t\t\tmbuf_num++;\n>  \t\t}\n> \n> @@ -1028,7 +1027,8 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev\n> *dev)\n>  \t\tmbuf_num = 0;\n>  \t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n>  \t\t\t\t\tdev->data->tx_queues[i])) != NULL) {\n> -\t\t\trte_pktmbuf_free_seg(buf);\n> +\t\t\trte_pktmbuf_free(buf);\n> +\n>  \t\t\tmbuf_num++;\n>  \t\t}\n> \n> diff --git a/lib/librte_pmd_virtio/virtio_ethdev.h\n> b/lib/librte_pmd_virtio/virtio_ethdev.h\n> index 858e644..d2e1eed 100644\n> --- a/lib/librte_pmd_virtio/virtio_ethdev.h\n> +++ b/lib/librte_pmd_virtio/virtio_ethdev.h\n> @@ -104,6 +104,9 @@ int  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,\n> uint16_t tx_queue_id,\n>  uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n>  \t\tuint16_t nb_pkts);\n> \n> +uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts,\n> +\t\tuint16_t nb_pkts);\n> +\n>  uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n>  \t\tuint16_t nb_pkts);\n> \n> diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c\n> b/lib/librte_pmd_virtio/virtio_rxtx.c\n> index fcd8bd1..3d81b34 100644\n> --- a/lib/librte_pmd_virtio/virtio_rxtx.c\n> +++ b/lib/librte_pmd_virtio/virtio_rxtx.c\n> @@ -146,6 +146,7 @@ static inline int\n>  virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n>  {\n>  \tstruct vq_desc_extra *dxp;\n> +\tstruct virtio_hw *hw = vq->hw;\n>  \tstruct vring_desc *start_dp;\n>  \tuint16_t needed = 1;\n>  \tuint16_t head_idx, idx;\n> @@ -165,9 +166,11 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq,\n> struct rte_mbuf *cookie)\n>  \tdxp->ndescs = needed;\n> \n>  \tstart_dp = vq->vq_ring.desc;\n> -\tstart_dp[idx].addr  =\n> -\t\t(uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM\n> - sizeof(struct virtio_net_hdr));\n> -\tstart_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM +\n> sizeof(struct virtio_net_hdr);\n> +\tstart_dp[idx].addr =\n> +\t\t(uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM\n> +\t\t- hw->vtnet_hdr_size);\n> +\tstart_dp[idx].len =\n> +\t\tcookie->buf_len - RTE_PKTMBUF_HEADROOM + hw-\n> >vtnet_hdr_size;\n>  \tstart_dp[idx].flags =  VRING_DESC_F_WRITE;\n>  \tidx = start_dp[idx].next;\n>  \tvq->vq_desc_head_idx = idx;\n> @@ -184,8 +187,10 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct\n> rte_mbuf *cookie)\n>  {\n>  \tstruct vq_desc_extra *dxp;\n>  \tstruct vring_desc *start_dp;\n> -\tuint16_t needed = 2;\n> +\tuint16_t seg_num = cookie->pkt.nb_segs;\n> +\tuint16_t needed = 1 + seg_num;\n>  \tuint16_t head_idx, idx;\n> +\tuint16_t head_size = txvq->hw->vtnet_hdr_size;\n> \n>  \tif (unlikely(txvq->vq_free_cnt == 0))\n>  \t\treturn -ENOSPC;\n> @@ -198,19 +203,25 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq,\n> struct rte_mbuf *cookie)\n>  \tidx = head_idx;\n>  \tdxp = &txvq->vq_descx[idx];\n>  \tif (dxp->cookie != NULL)\n> -\t\trte_pktmbuf_free_seg(dxp->cookie);\n> +\t\trte_pktmbuf_free(dxp->cookie);\n>  \tdxp->cookie = (void *)cookie;\n>  \tdxp->ndescs = needed;\n> \n>  \tstart_dp = txvq->vq_ring.desc;\n> -\tstart_dp[idx].addr  =\n> -\t\ttxvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);\n> -\tstart_dp[idx].len   = sizeof(struct virtio_net_hdr);\n> +\tstart_dp[idx].addr =\n> +\t\ttxvq->virtio_net_hdr_mem + idx * head_size;\n> +\tstart_dp[idx].len = (uint32_t)head_size;\n>  \tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n> -\tidx = start_dp[idx].next;\n> -\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n> -\tstart_dp[idx].len   = cookie->pkt.data_len;\n> -\tstart_dp[idx].flags = 0;\n> +\n> +\tfor (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {\n> +\t\tidx = start_dp[idx].next;\n> +\t\tstart_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);\n> +\t\tstart_dp[idx].len   = cookie->pkt.data_len;\n> +\t\tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n> +\t\tcookie = cookie->pkt.next;\n> +\t}\n> +\n> +\tstart_dp[idx].flags &= ~VRING_DESC_F_NEXT;\n>  \tidx = start_dp[idx].next;\n>  \ttxvq->vq_desc_head_idx = idx;\n>  \tif (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n> @@ -284,7 +295,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int\n> queue_type)\n>  \t\t\terror = virtqueue_enqueue_recv_refill(vq, m);\n> \n>  \t\t\tif (error) {\n> -\t\t\t\trte_pktmbuf_free_seg(m);\n> +\t\t\t\trte_pktmbuf_free(m);\n>  \t\t\t\tbreak;\n>  \t\t\t}\n>  \t\t\tnbufs++;\n> @@ -423,7 +434,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct\n> rte_mbuf *m)\n>  \terror = virtqueue_enqueue_recv_refill(vq, m);\n>  \tif (unlikely(error)) {\n>  \t\tRTE_LOG(ERR, PMD, \"cannot requeue discarded mbuf\");\n> -\t\trte_pktmbuf_free_seg(m);\n> +\t\trte_pktmbuf_free(m);\n>  \t}\n>  }\n> \n> @@ -471,17 +482,158 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts, uint16_t nb_pkts)\n> \n>  \t\trxm->pkt.in_port = rxvq->port_id;\n>  \t\trxm->pkt.data = (char *)rxm->buf_addr +\n> RTE_PKTMBUF_HEADROOM;\n> +\n>  \t\trxm->pkt.nb_segs = 1;\n>  \t\trxm->pkt.next = NULL;\n> -\t\trxm->pkt.pkt_len  = (uint32_t)(len[i]\n> -\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n> -\t\trxm->pkt.data_len = (uint16_t)(len[i]\n> -\t\t\t\t\t       - sizeof(struct virtio_net_hdr));\n> +\t\trxm->pkt.pkt_len = (uint32_t)(len[i] - hw->vtnet_hdr_size);\n> +\t\trxm->pkt.data_len = (uint16_t)(len[i] - hw->vtnet_hdr_size);\n> \n>  \t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n> \n>  \t\trx_pkts[nb_rx++] = rxm;\n> -\t\trxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);\n> +\t\trxvq->bytes += rx_pkts[nb_rx - 1]->pkt.pkt_len;\n> +\t}\n> +\n> +\trxvq->packets += nb_rx;\n> +\n> +\t/* Allocate new mbuf for the used descriptor */\n> +\terror = ENOSPC;\n> +\twhile (likely(!virtqueue_full(rxvq))) {\n> +\t\tnew_mbuf = rte_rxmbuf_alloc(rxvq->mpool);\n> +\t\tif (unlikely(new_mbuf == NULL)) {\n> +\t\t\tstruct rte_eth_dev *dev\n> +\t\t\t\t= &rte_eth_devices[rxvq->port_id];\n> +\t\t\tdev->data->rx_mbuf_alloc_failed++;\n> +\t\t\tbreak;\n> +\t\t}\n> +\t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n> +\t\tif (unlikely(error)) {\n> +\t\t\trte_pktmbuf_free(new_mbuf);\n> +\t\t\tbreak;\n> +\t\t}\n> +\t\tnb_enqueued++;\n> +\t}\n> +\n> +\tif (likely(nb_enqueued)) {\n> +\t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n> +\t\t\tvirtqueue_notify(rxvq);\n> +\t\t\tPMD_RX_LOG(DEBUG, \"Notified\\n\");\n> +\t\t}\n> +\t}\n> +\n> +\tvq_update_avail_idx(rxvq);\n> +\n> +\treturn nb_rx;\n> +}\n> +\n> +uint16_t\n> +virtio_recv_mergeable_pkts(void *rx_queue,\n> +\t\t\tstruct rte_mbuf **rx_pkts,\n> +\t\t\tuint16_t nb_pkts)\n> +{\n> +\tstruct virtqueue *rxvq = rx_queue;\n> +\tstruct virtio_hw *hw = rxvq->hw;\n> +\tstruct rte_mbuf *rxm, *new_mbuf;\n> +\tuint16_t nb_used, num, nb_rx = 0;\n> +\tuint32_t len[VIRTIO_MBUF_BURST_SZ];\n> +\tstruct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];\n> +\tstruct rte_mbuf *prev;\n> +\tint error;\n> +\tuint32_t i = 0, nb_enqueued = 0;\n> +\tuint32_t seg_num = 0;\n> +\tuint16_t extra_idx = 0;\n> +\n> +\tnb_used = VIRTQUEUE_NUSED(rxvq);\n> +\n> +\trmb();\n> +\n> +\tif (nb_used == 0)\n> +\t\treturn 0;\n> +\n> +\twhile (i < nb_used) {\n> +\t\tstruct virtio_net_hdr_mrg_rxbuf *header;\n> +\t\tchar *head_ptr;\n> +\n> +\t\tif (nb_rx >= nb_pkts)\n> +\t\t\tbreak;\n> +\n> +\t\tnum = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);\n> +\t\ti += num;\n> +\n> +\t\tPMD_RX_LOG(DEBUG, \"used:%d dequeue:%d\\n\", nb_used,\n> num);\n> +\t\tPMD_RX_LOG(DEBUG, \"packet len:%d\\n\", len[i]);\n> +\n> +\t\trxm = rcv_pkts[0];\n> +\t\textra_idx = 0;\n> +\n> +\t\tif (unlikely(len[0]\n> +\t\t\t     < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN))\n> {\n> +\t\t\tPMD_RX_LOG(ERR, \"Packet drop\\n\");\n> +\t\t\tnb_enqueued++;\n> +\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n> +\t\t\trxvq->errors++;\n> +\t\t\tcontinue;\n> +\t\t}\n> +\n> +\t\thead_ptr = (char *)rxm->pkt.data;\n> +\t\thead_ptr -= hw->vtnet_hdr_size;\n> +\t\theader = (struct virtio_net_hdr_mrg_rxbuf *)head_ptr;\n> +\t\tseg_num = header->num_buffers;\n> +\n> +\t\tif (seg_num == 0)\n> +\t\t\tseg_num = 1;\n> +\n> +\t\trxm->pkt.data = (char *)rxm->buf_addr +\n> RTE_PKTMBUF_HEADROOM;\n> +\t\trxm->pkt.nb_segs = seg_num;\n> +\t\trxm->pkt.next = NULL;\n> +\t\trxm->pkt.pkt_len = (uint32_t)(len[0] - hw->vtnet_hdr_size);\n> +\t\trxm->pkt.data_len = (uint16_t)(len[0] - hw->vtnet_hdr_size);\n> +\n> +\t\trxm->pkt.in_port = rxvq->port_id;\n> +\t\trx_pkts[nb_rx] = rxm;\n> +\n> +\t\tprev = rxm;\n> +\n> +\t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\nHere it might cause segmentation fault when debug is enabled.\n> +\n> +\t\t/*\n> +\t\t * Get extra segments for current uncompleted packet.\n> +\t\t */\n> +\t\tif (VIRTQUEUE_NUSED(rxvq) >= seg_num - 1) {\n> +\t\t\tuint32_t rx_num = virtqueue_dequeue_burst_rx(rxvq,\n> +\t\t\t\trcv_pkts, len, seg_num - 1);\n> +\t\t\ti += rx_num;\n> +\t\t} else {\n> +\t\t\tPMD_RX_LOG(ERR, \"No enough segments for\n> packet.\\n\");\n> +\t\t\tnb_enqueued++;\n> +\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n> +\t\t\trxvq->errors++;\n> +\t\t\tbreak;\nHere we should figure out if virtio net spec specify the behavior. If the backend wants to en-queue a merge able packet, but there isn't enough available descriptors, could it do it in a non-atomic way, i.e., firstly transfer some of them, update the used->idx and later transfer the rest of them? \nIf that is the case,  the handling here will cause the content of next segment later en-queued to be treated as virtio merge-able header.\n\n> +\t\t}\n> +\n> +\t\twhile (extra_idx < seg_num - 1) {\n> +\t\t\trxm = rcv_pkts[extra_idx];\n> +\n> +\t\t\trxm->pkt.data =\n> +\t\t\t\t(char *)rxm->buf_addr +\n> RTE_PKTMBUF_HEADROOM\n> +\t\t\t\t- hw->vtnet_hdr_size;\n> +\t\t\trxm->pkt.next = NULL;\n> +\t\t\trxm->pkt.pkt_len = (uint32_t)(len[extra_idx]);\n> +\t\t\trxm->pkt.data_len = (uint16_t)(len[extra_idx]);\n> +\n> +\t\t\tif (prev)\n> +\t\t\t\tprev->pkt.next = rxm;\n> +\n> +\t\t\tprev = rxm;\n> +\t\t\trx_pkts[nb_rx]->pkt.pkt_len += rxm->pkt.pkt_len;\n> +\n> +\t\t\textra_idx++;\n> +\n> +\t\t\tVIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);\n> +\t\t};\n> +\n> +\t\trxvq->bytes += rx_pkts[nb_rx]->pkt.pkt_len;\n> +\t\tnb_rx++;\n>  \t}\n> \n>  \trxvq->packets += nb_rx;\n> @@ -498,11 +650,12 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts, uint16_t nb_pkts)\n>  \t\t}\n>  \t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n>  \t\tif (unlikely(error)) {\n> -\t\t\trte_pktmbuf_free_seg(new_mbuf);\n> +\t\t\trte_pktmbuf_free(new_mbuf);\n>  \t\t\tbreak;\n>  \t\t}\n>  \t\tnb_enqueued++;\n>  \t}\n> +\n>  \tif (likely(nb_enqueued)) {\n>  \t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n>  \t\t\tvirtqueue_notify(rxvq);\n> @@ -536,12 +689,15 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf\n> **tx_pkts, uint16_t nb_pkts)\n>  \tnum = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used :\n> VIRTIO_MBUF_BURST_SZ);\n> \n>  \twhile (nb_tx < nb_pkts) {\n> -\t\tif (virtqueue_full(txvq) && num) {\n> -\t\t\tvirtqueue_dequeue_pkt_tx(txvq);\n> -\t\t\tnum--;\n> +\t\tint need = tx_pkts[nb_tx]->pkt.nb_segs - txvq->vq_free_cnt;\n> +\t\tif ((need > 0) && (num > 0)) {\n> +\t\t\tdo {\n> +\t\t\t\tvirtqueue_dequeue_pkt_tx(txvq);\n> +\t\t\t\tnum--;\n> +\t\t\t} while (num > 0);\n>  \t\t}\n> \n> -\t\tif (!virtqueue_full(txvq)) {\n> +\t\tif (tx_pkts[nb_tx]->pkt.nb_segs <= txvq->vq_free_cnt) {\n>  \t\t\ttxm = tx_pkts[nb_tx];\n>  \t\t\t/* Enqueue Packet buffers */\n>  \t\t\terror = virtqueue_enqueue_xmit(txvq, txm);\n> @@ -555,7 +711,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf\n> **tx_pkts, uint16_t nb_pkts)\n>  \t\t\t\tbreak;\n>  \t\t\t}\n>  \t\t\tnb_tx++;\n> -\t\t\ttxvq->bytes += txm->pkt.data_len;\n> +\t\t\ttxvq->bytes += txm->pkt.pkt_len;\n>  \t\t} else {\n>  \t\t\tPMD_TX_LOG(ERR, \"No free tx descriptors to transmit\");\n>  \t\t\tbreak;\n> --\n> 1.8.4.2",
        "headers": {
            "X-Mailman-Version": "2.1.15",
            "X-MS-TNEF-Correlator": "",
            "X-ExtLoop1": "1",
            "Thread-Topic": "[dpdk-dev] [PATCH v2] virtio: Support mergeable buffer in\n\tvirtio PMD",
            "x-originating-ip": "[10.239.127.40]",
            "Received": [
                "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 54C4F592E\n\tfor <dev@dpdk.org>; Fri, 25 Jul 2014 10:04:33 +0200 (CEST)",
                "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby orsmga102.jf.intel.com with ESMTP; 25 Jul 2014 01:00:22 -0700",
                "from fmsmsx108.amr.corp.intel.com ([10.19.9.228])\n\tby orsmga001.jf.intel.com with ESMTP; 25 Jul 2014 01:06:02 -0700",
                "from shsmsx104.ccr.corp.intel.com (10.239.4.70) by\n\tFMSMSX108.amr.corp.intel.com (10.19.9.228) with Microsoft SMTP Server\n\t(TLS) id 14.3.123.3; Fri, 25 Jul 2014 01:06:02 -0700",
                "from shsmsx101.ccr.corp.intel.com ([169.254.1.52]) by\n\tSHSMSX104.ccr.corp.intel.com ([169.254.5.204]) with mapi id\n\t14.03.0123.003; Fri, 25 Jul 2014 16:06:00 +0800"
            ],
            "References": "<1406268195-24010-1-git-send-email-changchun.ouyang@intel.com>",
            "MIME-Version": "1.0",
            "Message-ID": "<C37D651A908B024F974696C65296B57B0F24595E@SHSMSX101.ccr.corp.intel.com>",
            "Accept-Language": "en-US",
            "X-List-Received-Date": "Fri, 25 Jul 2014 08:04:34 -0000",
            "Thread-Index": "AQHPp85A0mBs6WZtwk+TgQBU4+1rLJuwbIsw",
            "X-IronPort-AV": "E=Sophos;i=\"5.01,729,1400050800\"; d=\"scan'208\";a=\"548863537\"",
            "Content-Language": "en-US",
            "Content-Transfer-Encoding": "quoted-printable",
            "From": "\"Xie, Huawei\" <huawei.xie@intel.com>",
            "Content-Type": "text/plain; charset=\"us-ascii\"",
            "List-Post": "<mailto:dev@dpdk.org>",
            "Return-Path": "<huawei.xie@intel.com>",
            "X-MS-Has-Attach": "",
            "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
            "In-Reply-To": "<1406268195-24010-1-git-send-email-changchun.ouyang@intel.com>",
            "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
            "To": "\"Ouyang, Changchun\" <changchun.ouyang@intel.com>, \"dev@dpdk.org\"\n\t<dev@dpdk.org>",
            "Precedence": "list",
            "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
            "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
            "X-BeenThere": "dev@dpdk.org",
            "Date": "Fri, 25 Jul 2014 08:05:59 +0000",
            "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
            "Subject": "Re: [dpdk-dev] [PATCH v2] virtio: Support mergeable buffer in\n\tvirtio PMD"
        }
    }
]