get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48774/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48774,
    "url": "https://patches.dpdk.org/api/patches/48774/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20181213123453.15035-6-jfreimann@redhat.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20181213123453.15035-6-jfreimann@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20181213123453.15035-6-jfreimann@redhat.com",
    "date": "2018-12-13T12:34:48",
    "name": "[v12,05/10] net/virtio: implement transmit path for packed queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "10044d3e2e9791e9b3f326ccef58ebee0f77a857",
    "submitter": {
        "id": 745,
        "url": "https://patches.dpdk.org/api/people/745/?format=api",
        "name": "Jens Freimann",
        "email": "jfreimann@redhat.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20181213123453.15035-6-jfreimann@redhat.com/mbox/",
    "series": [
        {
            "id": 2761,
            "url": "https://patches.dpdk.org/api/series/2761/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2761",
            "date": "2018-12-13T12:34:43",
            "name": "implement packed virtqueues",
            "version": 12,
            "mbox": "https://patches.dpdk.org/series/2761/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48774/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/48774/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 0A7CE1B547;\n\tThu, 13 Dec 2018 13:35:52 +0100 (CET)",
            "from mx1.redhat.com (mx1.redhat.com [209.132.183.28])\n\tby dpdk.org (Postfix) with ESMTP id 955411B4CA\n\tfor <dev@dpdk.org>; Thu, 13 Dec 2018 13:35:50 +0100 (CET)",
            "from smtp.corp.redhat.com\n\t(int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.23])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby mx1.redhat.com (Postfix) with ESMTPS id 00F5C80F91;\n\tThu, 13 Dec 2018 12:35:50 +0000 (UTC)",
            "from localhost (unknown [10.36.118.169])\n\tby smtp.corp.redhat.com (Postfix) with ESMTPS id C2D5419497;\n\tThu, 13 Dec 2018 12:35:42 +0000 (UTC)"
        ],
        "From": "Jens Freimann <jfreimann@redhat.com>",
        "To": "dev@dpdk.org",
        "Cc": "tiwei.bie@intel.com,\n\tmaxime.coquelin@redhat.com,\n\tGavin.Hu@arm.com",
        "Date": "Thu, 13 Dec 2018 13:34:48 +0100",
        "Message-Id": "<20181213123453.15035-6-jfreimann@redhat.com>",
        "In-Reply-To": "<20181213123453.15035-1-jfreimann@redhat.com>",
        "References": "<20181213123453.15035-1-jfreimann@redhat.com>",
        "X-Scanned-By": "MIMEDefang 2.84 on 10.5.11.23",
        "X-Greylist": "Sender IP whitelisted, not delayed by milter-greylist-4.5.16\n\t(mx1.redhat.com [10.5.110.27]); Thu, 13 Dec 2018 12:35:50 +0000 (UTC)",
        "Subject": "[dpdk-dev] [PATCH v12 05/10] net/virtio: implement transmit path\n\tfor packed queues",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This implements the transmit path for devices with\nsupport for packed virtqueues.\n\nSigned-off-by: Jens Freiman <jfreimann@redhat.com>\nSigned-off-by: Tiwei Bie <tiwei.bie@intel.com>\n---\n drivers/net/virtio/virtio_ethdev.c |  54 ++++---\n drivers/net/virtio/virtio_ethdev.h |   2 +\n drivers/net/virtio/virtio_rxtx.c   | 235 ++++++++++++++++++++++++++++-\n drivers/net/virtio/virtqueue.h     |  22 ++-\n 4 files changed, 291 insertions(+), 22 deletions(-)",
    "diff": "diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex b1232df59..6fdc18245 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -390,6 +390,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)\n \tif (vtpci_packed_queue(hw)) {\n \t\tvq->avail_wrap_counter = 1;\n \t\tvq->used_wrap_counter = 1;\n+\t\tvq->avail_used_flags =\n+\t\t\tVRING_DESC_F_AVAIL(vq->avail_wrap_counter) |\n+\t\t\tVRING_DESC_F_USED(!vq->avail_wrap_counter);\n \t}\n \n \t/*\n@@ -497,16 +500,22 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)\n \t\tmemset(txr, 0, vq_size * sizeof(*txr));\n \t\tfor (i = 0; i < vq_size; i++) {\n \t\t\tstruct vring_desc *start_dp = txr[i].tx_indir;\n-\n-\t\t\tvring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir));\n-\n+\t\t\tstruct vring_packed_desc *start_dp_packed = txr[i].tx_indir_pq;\n+\t\n \t\t\t/* first indirect descriptor is always the tx header */\n-\t\t\tstart_dp->addr = txvq->virtio_net_hdr_mem\n-\t\t\t\t+ i * sizeof(*txr)\n-\t\t\t\t+ offsetof(struct virtio_tx_region, tx_hdr);\n-\n-\t\t\tstart_dp->len = hw->vtnet_hdr_size;\n-\t\t\tstart_dp->flags = VRING_DESC_F_NEXT;\n+\t\t\tif (vtpci_packed_queue(hw)) {\n+\t\t\t\tstart_dp_packed->addr = txvq->virtio_net_hdr_mem\n+\t\t\t\t\t+ i * sizeof(*txr)\n+\t\t\t\t\t+ offsetof(struct virtio_tx_region, tx_hdr);\n+\t\t\t\tstart_dp_packed->len = hw->vtnet_hdr_size;\n+\t\t\t} else {\n+\t\t\t\tvring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir));\n+\t\t\t\tstart_dp->addr = txvq->virtio_net_hdr_mem\n+\t\t\t\t\t+ i * sizeof(*txr)\n+\t\t\t\t\t+ offsetof(struct virtio_tx_region, tx_hdr);\n+\t\t\t\tstart_dp->len = hw->vtnet_hdr_size;\n+\t\t\t\tstart_dp->flags = VRING_DESC_F_NEXT;\n+\t\t\t}\n \t\t}\n \t}\n \n@@ -1335,6 +1344,23 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)\n {\n \tstruct virtio_hw *hw = eth_dev->data->dev_private;\n \n+\tif (vtpci_packed_queue(hw)) {\n+\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\"virtio: using packed ring standard Tx path on port %u\",\n+\t\t\teth_dev->data->port_id);\n+\t\teth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;\n+\t} else {\n+\t\tif (hw->use_inorder_tx) {\n+\t\t\tPMD_INIT_LOG(INFO, \"virtio: using inorder Tx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(INFO, \"virtio: using standard Tx path on port %u\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\teth_dev->tx_pkt_burst = virtio_xmit_pkts;\n+\t\t}\n+\t}\n+\n \tif (hw->use_simple_rx) {\n \t\tPMD_INIT_LOG(INFO, \"virtio: using simple Rx path on port %u\",\n \t\t\teth_dev->data->port_id);\n@@ -1355,15 +1381,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)\n \t\teth_dev->rx_pkt_burst = &virtio_recv_pkts;\n \t}\n \n-\tif (hw->use_inorder_tx) {\n-\t\tPMD_INIT_LOG(INFO, \"virtio: using inorder Tx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;\n-\t} else {\n-\t\tPMD_INIT_LOG(INFO, \"virtio: using standard Tx path on port %u\",\n-\t\t\teth_dev->data->port_id);\n-\t\teth_dev->tx_pkt_burst = virtio_xmit_pkts;\n-\t}\n+\n }\n \n /* Only support 1:1 queue/interrupt mapping so far.\ndiff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h\nindex e0f80e5a4..05d355180 100644\n--- a/drivers/net/virtio/virtio_ethdev.h\n+++ b/drivers/net/virtio/virtio_ethdev.h\n@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,\n \n uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\n+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts);\n \n uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tuint16_t nb_pkts);\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex cb8f89f18..d1c294e1b 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -88,6 +88,23 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)\n \tdp->next = VQ_RING_DESC_CHAIN_END;\n }\n \n+static void\n+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)\n+{\n+\tstruct vq_desc_extra *dxp;\n+\n+\tdxp = &vq->vq_descx[id];\n+\tvq->vq_free_cnt += dxp->ndescs;\n+\n+\tif (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)\n+\t\tvq->vq_desc_head_idx = id;\n+\telse\n+\t\tvq->vq_descx[vq->vq_desc_tail_idx].next = id;\n+\n+\tvq->vq_desc_tail_idx = id;\n+\tdxp->next = VQ_RING_DESC_CHAIN_END;\n+}\n+\n static uint16_t\n virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,\n \t\t\t   uint32_t *len, uint16_t num)\n@@ -165,6 +182,33 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,\n #endif\n \n /* Cleanup from completed transmits. */\n+static void\n+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)\n+{\n+\tuint16_t used_idx, id;\n+\tuint16_t size = vq->vq_nentries;\n+\tstruct vring_packed_desc *desc = vq->ring_packed.desc_packed;\n+\tstruct vq_desc_extra *dxp;\n+\n+\tused_idx = vq->vq_used_cons_idx;\n+\twhile (num-- && desc_is_used(&desc[used_idx], vq)) {\n+\t\tused_idx = vq->vq_used_cons_idx;\n+\t\tid = desc[used_idx].id;\n+\t\tdxp = &vq->vq_descx[id];\n+\t\tvq->vq_used_cons_idx += dxp->ndescs;\n+\t\tif (vq->vq_used_cons_idx >= size) {\n+\t\t\tvq->vq_used_cons_idx -= size;\n+\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t}\n+\t\tvq_ring_free_id_packed(vq, id);\n+\t\tif (dxp->cookie != NULL) {\n+\t\t\trte_pktmbuf_free(dxp->cookie);\n+\t\t\tdxp->cookie = NULL;\n+\t\t}\n+\t\tused_idx = vq->vq_used_cons_idx;\n+\t}\n+}\n+\n static void\n virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)\n {\n@@ -456,6 +500,107 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,\n \tvq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);\n }\n \n+static inline void\n+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n+\t\t\t      uint16_t needed, int can_push)\n+{\n+\tstruct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;\n+\tstruct vq_desc_extra *dxp;\n+\tstruct virtqueue *vq = txvq->vq;\n+\tstruct vring_packed_desc *start_dp, *head_dp;\n+\tuint16_t idx, id, head_idx, head_flags;\n+\tuint16_t head_size = vq->hw->vtnet_hdr_size;\n+\tstruct virtio_net_hdr *hdr;\n+\tuint16_t prev;\n+\n+\tid = vq->vq_desc_head_idx;\n+\n+\tdxp = &vq->vq_descx[id];\n+\tdxp->ndescs = needed;\n+\tdxp->cookie = cookie;\n+\n+\thead_idx = vq->vq_avail_idx;\n+\tidx = head_idx;\n+\tprev = head_idx;\n+\tstart_dp = vq->ring_packed.desc_packed;\n+\n+\thead_dp = &vq->ring_packed.desc_packed[idx];\n+\thead_flags = cookie->next ? VRING_DESC_F_NEXT: 0;\n+\thead_flags |= vq->avail_used_flags;\n+\n+\tif (can_push) {\n+\t\t/* prepend cannot fail, checked by caller */\n+\t\thdr = (struct virtio_net_hdr *)\n+\t\t\trte_pktmbuf_prepend(cookie, head_size);\n+\t\t/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,\n+\t\t * which is wrong. Below subtract restores correct pkt size.\n+\t\t */\n+\t\tcookie->pkt_len -= head_size;\n+\n+\t\t/* if offload disabled, it is not zeroed below, do it now */\n+\t\tif (!vq->hw->has_tx_offload) {\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->flags, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);\n+\t\t\tASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);\n+\t\t}\n+\t} else {\n+\t\t/* setup first tx ring slot to point to header\n+\t\t * stored in reserved region.\n+\t\t */\n+\t\tstart_dp[idx].addr  = txvq->virtio_net_hdr_mem +\n+\t\t\tRTE_PTR_DIFF(&txr[idx].tx_hdr, txr);\n+\t\tstart_dp[idx].len   = vq->hw->vtnet_hdr_size;\n+\t\thdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;\n+\t\tidx++;\n+\t\tif (idx >= vq->vq_nentries) {\n+\t\t\tidx -= vq->vq_nentries;\n+\t\t\tvq->avail_wrap_counter ^= 1;\n+\t\t\tvq->avail_used_flags =\n+\t\t\t\tVRING_DESC_F_AVAIL(vq->avail_wrap_counter) |\n+\t\t\t\tVRING_DESC_F_USED(!vq->avail_wrap_counter);\n+\t\t}\n+\t}\n+\n+\tvirtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);\n+\n+\tdo {\n+\t\tuint16_t flags;\n+\n+\t\tstart_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);\n+\t\tstart_dp[idx].len  = cookie->data_len;\n+\t\tif (likely(idx != head_idx)) {\n+\t\t\tflags = cookie->next ? VRING_DESC_F_NEXT : 0;\n+\t\t\tflags |= vq->avail_used_flags;\n+\t\t\tstart_dp[idx].flags = flags;\n+\t\t}\n+\t\tprev = idx;\n+\t\tidx++;\n+\t\tif (idx >= vq->vq_nentries) {\n+\t\t\tidx -= vq->vq_nentries;\n+\t\t\tvq->avail_wrap_counter ^= 1;\n+\t\t\tvq->avail_used_flags =\n+\t\t\t\tVRING_DESC_F_AVAIL(vq->avail_wrap_counter) |\n+\t\t\t\tVRING_DESC_F_USED(!vq->avail_wrap_counter);\n+\t\t}\n+\t} while ((cookie = cookie->next) != NULL);\n+\n+\tstart_dp[prev].id = id;\n+\n+\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);\n+\n+\tvq->vq_desc_head_idx = dxp->next;\n+\tif (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n+\t\tvq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;\n+\n+\tvq->vq_avail_idx = idx;\n+\n+\trte_smp_wmb();\n+\thead_dp->flags = head_flags;\n+}\n+\n static inline void\n virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t\t\tuint16_t needed, int use_indirect, int can_push,\n@@ -733,8 +878,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tif (hw->use_inorder_tx)\n-\t\tvq->vq_ring.desc[vq->vq_nentries - 1].next = 0;\n+\tif (!vtpci_packed_queue(hw)) {\n+\t\tif (hw->use_inorder_tx)\n+\t\t\tvq->vq_ring.desc[vq->vq_nentries - 1].next = 0;\n+\t}\n \n \tVIRTQUEUE_DUMP(vq);\n \n@@ -1346,6 +1493,90 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \treturn nb_rx;\n }\n \n+uint16_t\n+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct virtnet_tx *txvq = tx_queue;\n+\tstruct virtqueue *vq = txvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n+\tuint16_t hdr_size = hw->vtnet_hdr_size;\n+\tuint16_t nb_tx = 0;\n+\tint error;\n+\n+\tif (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))\n+\t\treturn nb_tx;\n+\n+\tif (unlikely(nb_pkts < 1))\n+\t\treturn nb_pkts;\n+\n+\tPMD_TX_LOG(DEBUG, \"%d packets to xmit\", nb_pkts);\n+\n+\tif (nb_pkts > vq->vq_free_cnt)\n+\t\tvirtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\tstruct rte_mbuf *txm = tx_pkts[nb_tx];\n+\t\tint can_push = 0, slots, need;\n+\n+\t\t/* Do VLAN tag insertion */\n+\t\tif (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {\n+\t\t\terror = rte_vlan_insert(&txm);\n+\t\t\tif (unlikely(error)) {\n+\t\t\t\trte_pktmbuf_free(txm);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* optimize ring usage */\n+\t\tif ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||\n+\t\t      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&\n+\t\t    rte_mbuf_refcnt_read(txm) == 1 &&\n+\t\t    RTE_MBUF_DIRECT(txm) &&\n+\t\t    txm->nb_segs == 1 &&\n+\t\t    rte_pktmbuf_headroom(txm) >= hdr_size &&\n+\t\t    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),\n+\t\t\t\t   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))\n+\t\t\tcan_push = 1;\n+\n+\t\t/* How many main ring entries are needed to this Tx?\n+\t\t * any_layout => number of segments\n+\t\t * default    => number of segments + 1\n+\t\t */\n+\t\tslots = txm->nb_segs + !can_push;\n+\t\tneed = slots - vq->vq_free_cnt;\n+\n+\t\t/* Positive value indicates it need free vring descriptors */\n+\t\tif (unlikely(need > 0)) {\n+\t\t\tvirtio_rmb();\n+\t\t\tneed = RTE_MIN(need, (int)nb_pkts);\n+\t\t\tvirtio_xmit_cleanup_packed(vq, need);\n+\t\t\tneed = slots - vq->vq_free_cnt;\n+\t\t\tif (unlikely(need > 0)) {\n+\t\t\t\tPMD_TX_LOG(ERR,\n+\t\t\t\t\t   \"No free tx descriptors to transmit\");\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Enqueue Packet buffers */\n+\t\tvirtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);\n+\n+\t\ttxvq->stats.bytes += txm->pkt_len;\n+\t\tvirtio_update_packet_stats(&txvq->stats, txm);\n+\t}\n+\n+\ttxvq->stats.packets += nb_tx;\n+\n+\tif (likely(nb_tx)) {\n+\t\tif (unlikely(virtqueue_kick_prepare_packed(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n+\t\t\tPMD_TX_LOG(DEBUG, \"Notified backend after xmit\");\n+\t\t}\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n uint16_t\n virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n {\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex e6e0518a3..5119818e1 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -170,6 +170,8 @@ struct virtqueue {\n \tstruct vring_packed ring_packed;  /**< vring keeping desc, used and avail */\n \tbool avail_wrap_counter;\n \tbool used_wrap_counter;\n+\tuint16_t event_flags_shadow;\n+\tuint16_t avail_used_flags;\n \n \t/**\n \t * Last consumed descriptor in the used table,\n@@ -246,8 +248,12 @@ struct virtio_net_hdr_mrg_rxbuf {\n #define VIRTIO_MAX_TX_INDIRECT 8\n struct virtio_tx_region {\n \tstruct virtio_net_hdr_mrg_rxbuf tx_hdr;\n-\tstruct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]\n-\t\t\t   __attribute__((__aligned__(16)));\n+\tunion {\n+\t\tstruct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]\n+\t\t\t__attribute__((__aligned__(16)));\n+\t\tstruct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]\n+\t\t\t__attribute__((__aligned__(16)));\n+\t};\n };\n \n static inline void\n@@ -391,6 +397,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)\n #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))\n \n void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);\n+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx);\n void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n \t\t\t  uint16_t num);\n \n@@ -424,6 +431,17 @@ virtqueue_kick_prepare(struct virtqueue *vq)\n \treturn !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);\n }\n \n+static inline int\n+virtqueue_kick_prepare_packed(struct virtqueue *vq)\n+{\n+\tuint16_t flags;\n+\n+\tvirtio_mb();\n+\tflags = vq->ring_packed.device_event->desc_event_flags;\n+\n+\treturn flags != RING_EVENT_FLAGS_DISABLE;\n+}\n+\n static inline void\n virtqueue_notify(struct virtqueue *vq)\n {\n",
    "prefixes": [
        "v12",
        "05/10"
    ]
}