get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29616/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29616,
    "url": "https://patches.dpdk.org/api/patches/29616/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/95b209b8ab460fa2c0cf733d7f84e6147f5a64b2.1507141616.git.adrien.mazarguil@6wind.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<95b209b8ab460fa2c0cf733d7f84e6147f5a64b2.1507141616.git.adrien.mazarguil@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/95b209b8ab460fa2c0cf733d7f84e6147f5a64b2.1507141616.git.adrien.mazarguil@6wind.com",
    "date": "2017-10-04T18:48:55",
    "name": "[dpdk-dev,v3,3/6] net/mlx4: restore Tx gather support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "58c4cf1fb0a695329e8cdca1263cd0e5c8cb24d3",
    "submitter": {
        "id": 165,
        "url": "https://patches.dpdk.org/api/people/165/?format=api",
        "name": "Adrien Mazarguil",
        "email": "adrien.mazarguil@6wind.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/95b209b8ab460fa2c0cf733d7f84e6147f5a64b2.1507141616.git.adrien.mazarguil@6wind.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/29616/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/29616/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D7A9C1B6E1;\n\tWed,  4 Oct 2017 20:49:23 +0200 (CEST)",
            "from mail-wm0-f45.google.com (mail-wm0-f45.google.com\n\t[74.125.82.45]) by dpdk.org (Postfix) with ESMTP id C72A71B6D4\n\tfor <dev@dpdk.org>; Wed,  4 Oct 2017 20:49:20 +0200 (CEST)",
            "by mail-wm0-f45.google.com with SMTP id t69so24601761wmt.2\n\tfor <dev@dpdk.org>; Wed, 04 Oct 2017 11:49:20 -0700 (PDT)",
            "from 6wind.com (host.78.145.23.62.rev.coltfrance.com.\n\t[62.23.145.78]) by smtp.gmail.com with ESMTPSA id\n\te14sm1853804edk.95.2017.10.04.11.49.18\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tWed, 04 Oct 2017 11:49:18 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=GBPAHcUm/KcGqo6NX6Br9hdgd7x4pOHBub5J5XkQjaM=;\n\tb=PJAKyQrndqC2MsGeeer49LY8MJJjAlWjL8iPDh2Qe8OQALaJr7b2rtBbZTISsF0Bfe\n\t0jKCQkH+zN+qWDQQPQA7BSBxTOtWvvSq9Gw7da2TuqP2IVMtEjDQS1k5CVgPtDfZ3WIf\n\toc+TQQPuatLERSjcJtG27BfAwLtuppCigI7tR/pnt7/OYjEhlVpzr4d9q+0T3RKQ3c19\n\tBw/x2vHltWaFt7jVroR0K/tTamBb5+63m9eS9Q+l5VdgBeh8HEv+L6IeJGVQlUQKM6ff\n\tAq2FT7erbN1GMhM9h3MIZaM5luM1VvfYNgWY3UOyWt0ZgzAMzIBg7cU278PTUbKx9WQc\n\tXydg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=GBPAHcUm/KcGqo6NX6Br9hdgd7x4pOHBub5J5XkQjaM=;\n\tb=jwlGz3pHhroLY+N4uVdTcljteEbneXn3uxqmmddmj147vWsjaBVMz/iKZ9htomcK/e\n\tGVODnVQ34yaSUfPi9jtoVyzqqaJIS/aSJPh55wZ62iKoXC1fg87Dycb6TyvHDZM8pUt/\n\tQudC+EwbK6gkFo70/5H3mFSkj+8QOeFzmRex3v9bfK8rL9p8vT1JL3Wl0SjYT/j0OlBW\n\tj5yhBibzgkputy8sUwsl28tu4LHYGZ+xzxi4K0iIwOrPWe5yCPmHEzcfPcAIi4vbM18l\n\tkq++X3dax3ID2BvjK1zi0l77hjgEAepth8ZdAo9z4H5psUMDsaE8RBapj6HEnj9Lah8g\n\tZbAg==",
        "X-Gm-Message-State": "AHPjjUiLRmxPDJcr70UUZL4a7hWGZ803ONXL+D+xQoHogBSiwCZxdgc7\n\ttnGOe7iwQO2pS3+h1xjxa5MFKyUN",
        "X-Google-Smtp-Source": "AOwi7QBrhCrIzqxO8oHNlWxTd+ImafFWiOhkz7SOoaKN+btcs8ieCClyzohTOo0OW6YcIfFbKdfxOg==",
        "X-Received": "by 10.80.136.85 with SMTP id c21mr29525494edc.171.1507142959666; \n\tWed, 04 Oct 2017 11:49:19 -0700 (PDT)",
        "From": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "Moti Haimovsky <motih@mellanox.com>,\n\tMatan Azrad <matan@mellanox.com>",
        "Date": "Wed,  4 Oct 2017 20:48:55 +0200",
        "Message-Id": "<95b209b8ab460fa2c0cf733d7f84e6147f5a64b2.1507141616.git.adrien.mazarguil@6wind.com>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": "<cover.1507141616.git.adrien.mazarguil@6wind.com>",
        "References": "<1507027711-879-1-git-send-email-matan@mellanox.com>\n\t<cover.1507141616.git.adrien.mazarguil@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v3 3/6] net/mlx4: restore Tx gather support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Moti Haimovsky <motih@mellanox.com>\n\nThis patch adds support for transmitting packets spanning over multiple\nbuffers.\n\nIn this patch we also take into consideration the amount of entries a\npacket occupies in the TxQ when setting the report-completion flag of the\nchip.\n\nSigned-off-by: Moti Haimovsky <motih@mellanox.com>\nAcked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>\n---\n drivers/net/mlx4/mlx4_rxtx.c | 197 ++++++++++++++++++++++----------------\n drivers/net/mlx4/mlx4_rxtx.h |   6 +-\n drivers/net/mlx4/mlx4_txq.c  |  12 ++-\n 3 files changed, 127 insertions(+), 88 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c\nindex fd8ef7b..cc0baaa 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.c\n+++ b/drivers/net/mlx4/mlx4_rxtx.c\n@@ -63,6 +63,15 @@\n #include \"mlx4_utils.h\"\n \n /**\n+ * Pointer-value pair structure used in tx_post_send for saving the first\n+ * DWORD (32 byte) of a TXBB.\n+ */\n+struct pv {\n+\tstruct mlx4_wqe_data_seg *dseg;\n+\tuint32_t val;\n+};\n+\n+/**\n  * Stamp a WQE so it won't be reused by the HW.\n  *\n  * Routine is used when freeing WQE used by the chip or when failing\n@@ -291,24 +300,28 @@ mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)\n  *   Target Tx queue.\n  * @param pkt\n  *   Packet to transmit.\n- * @param send_flags\n- *   @p MLX4_WQE_CTRL_CQ_UPDATE to request completion on this packet.\n  *\n  * @return\n  *   0 on success, negative errno value otherwise and rte_errno is set.\n  */\n static inline int\n-mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt, uint32_t send_flags)\n+mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt)\n {\n \tstruct mlx4_wqe_ctrl_seg *ctrl;\n \tstruct mlx4_wqe_data_seg *dseg;\n \tstruct mlx4_sq *sq = &txq->msq;\n+\tstruct rte_mbuf *buf;\n \tuint32_t head_idx = sq->head & sq->txbb_cnt_mask;\n \tuint32_t lkey;\n \tuintptr_t addr;\n+\tuint32_t srcrb_flags;\n+\tuint32_t owner_opcode = MLX4_OPCODE_SEND;\n+\tuint32_t byte_count;\n \tint wqe_real_size;\n \tint nr_txbbs;\n \tint rc;\n+\tstruct pv *pv = (struct pv *)txq->bounce_buf;\n+\tint pv_counter = 0;\n \n \t/* Calculate the needed work queue entry size for this packet. */\n \twqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +\n@@ -324,56 +337,81 @@ mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt, uint32_t send_flags)\n \t\trc = ENOSPC;\n \t\tgoto err;\n \t}\n-\t/* Get the control and single-data entries of the WQE. */\n+\t/* Get the control and data entries of the WQE. */\n \tctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx);\n \tdseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl +\n \t\t\t\t\t    sizeof(struct mlx4_wqe_ctrl_seg));\n-\t/* Fill the data segment with buffer information. */\n-\taddr = rte_pktmbuf_mtod(pkt, uintptr_t);\n-\trte_prefetch0((volatile void *)addr);\n-\tdseg->addr = rte_cpu_to_be_64(addr);\n-\t/* Memory region key for this memory pool. */\n-\tlkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(pkt));\n-\tif (unlikely(lkey == (uint32_t)-1)) {\n-\t\t/* MR does not exist. */\n-\t\tDEBUG(\"%p: unable to get MP <-> MR association\", (void *)txq);\n+\t/* Fill the data segments with buffer information. */\n+\tfor (buf = pkt; buf != NULL; buf = buf->next, dseg++) {\n+\t\taddr = rte_pktmbuf_mtod(buf, uintptr_t);\n+\t\trte_prefetch0((volatile void *)addr);\n+\t\t/* Handle WQE wraparound. */\n+\t\tif (unlikely(dseg >= (struct mlx4_wqe_data_seg *)sq->eob))\n+\t\t\tdseg = (struct mlx4_wqe_data_seg *)sq->buf;\n+\t\tdseg->addr = rte_cpu_to_be_64(addr);\n+\t\t/* Memory region key for this memory pool. */\n+\t\tlkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));\n+\t\tif (unlikely(lkey == (uint32_t)-1)) {\n+\t\t\t/* MR does not exist. */\n+\t\t\tDEBUG(\"%p: unable to get MP <-> MR association\",\n+\t\t\t      (void *)txq);\n+\t\t\t/*\n+\t\t\t * Restamp entry in case of failure.\n+\t\t\t * Make sure that size is written correctly\n+\t\t\t * Note that we give ownership to the SW, not the HW.\n+\t\t\t */\n+\t\t\tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n+\t\t\tmlx4_txq_stamp_freed_wqe(sq, head_idx,\n+\t\t\t\t     (sq->head & sq->txbb_cnt) ? 0 : 1);\n+\t\t\trc = EFAULT;\n+\t\t\tgoto err;\n+\t\t}\n+\t\tdseg->lkey = rte_cpu_to_be_32(lkey);\n+\t\tif (likely(buf->data_len)) {\n+\t\t\tbyte_count = rte_cpu_to_be_32(buf->data_len);\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * Zero length segment is treated as inline segment\n+\t\t\t * with zero data.\n+\t\t\t */\n+\t\t\tbyte_count = RTE_BE32(0x80000000);\n+\t\t}\n \t\t/*\n-\t\t * Restamp entry in case of failure, make sure that size is\n-\t\t * written correctly.\n-\t\t * Note that we give ownership to the SW, not the HW.\n+\t\t * If the data segment is not at the beginning of a\n+\t\t * Tx basic block (TXBB) then write the byte count,\n+\t\t * else postpone the writing to just before updating the\n+\t\t * control segment.\n \t\t */\n-\t\tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n-\t\tmlx4_txq_stamp_freed_wqe(sq, head_idx,\n-\t\t\t\t\t (sq->head & sq->txbb_cnt) ? 0 : 1);\n-\t\trc = EFAULT;\n-\t\tgoto err;\n+\t\tif ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {\n+\t\t\t/*\n+\t\t\t * Need a barrier here before writing the byte_count\n+\t\t\t * fields to make sure that all the data is visible\n+\t\t\t * before the byte_count field is set.\n+\t\t\t * Otherwise, if the segment begins a new cacheline,\n+\t\t\t * the HCA prefetcher could grab the 64-byte chunk and\n+\t\t\t * get a valid (!= 0xffffffff) byte count but stale\n+\t\t\t * data, and end up sending the wrong data.\n+\t\t\t */\n+\t\t\trte_io_wmb();\n+\t\t\tdseg->byte_count = byte_count;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * This data segment starts at the beginning of a new\n+\t\t\t * TXBB, so we need to postpone its byte_count writing\n+\t\t\t * for later.\n+\t\t\t */\n+\t\t\tpv[pv_counter].dseg = dseg;\n+\t\t\tpv[pv_counter++].val = byte_count;\n+\t\t}\n \t}\n-\tdseg->lkey = rte_cpu_to_be_32(lkey);\n-\t/*\n-\t * Need a barrier here before writing the byte_count field to\n-\t * make sure that all the data is visible before the\n-\t * byte_count field is set. Otherwise, if the segment begins\n-\t * a new cache line, the HCA prefetcher could grab the 64-byte\n-\t * chunk and get a valid (!= 0xffffffff) byte count but\n-\t * stale data, and end up sending the wrong data.\n-\t */\n-\trte_io_wmb();\n-\tif (likely(pkt->data_len))\n-\t\tdseg->byte_count = rte_cpu_to_be_32(pkt->data_len);\n-\telse\n-\t\t/*\n-\t\t * Zero length segment is treated as inline segment\n-\t\t * with zero data.\n-\t\t */\n-\t\tdseg->byte_count = RTE_BE32(0x80000000);\n-\t/*\n-\t * Fill the control parameters for this packet.\n-\t * For raw Ethernet, the SOLICIT flag is used to indicate that no ICRC\n-\t * should be calculated.\n-\t */\n-\tctrl->srcrb_flags =\n-\t\trte_cpu_to_be_32(MLX4_WQE_CTRL_SOLICIT |\n-\t\t\t\t (send_flags & MLX4_WQE_CTRL_CQ_UPDATE));\n+\t/* Write the first DWORD of each TXBB save earlier. */\n+\tif (pv_counter) {\n+\t\t/* Need a barrier here before writing the byte_count. */\n+\t\trte_io_wmb();\n+\t\tfor (--pv_counter; pv_counter  >= 0; pv_counter--)\n+\t\t\tpv[pv_counter].dseg->byte_count = pv[pv_counter].val;\n+\t}\n+\t/* Fill the control parameters for this packet. */\n \tctrl->fence_size = (wqe_real_size >> 4) & 0x3f;\n \t/*\n \t * The caller should prepare \"imm\" in advance in order to support\n@@ -382,14 +420,27 @@ mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt, uint32_t send_flags)\n \t */\n \tctrl->imm = 0;\n \t/*\n-\t * Make sure descriptor is fully written before setting ownership\n-\t * bit (because HW can start executing as soon as we do).\n+\t * For raw Ethernet, the SOLICIT flag is used to indicate that no ICRC\n+\t * should be calculated.\n+\t */\n+\ttxq->elts_comp_cd -= nr_txbbs;\n+\tif (unlikely(txq->elts_comp_cd <= 0)) {\n+\t\ttxq->elts_comp_cd = txq->elts_comp_cd_init;\n+\t\tsrcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |\n+\t\t\t\t       MLX4_WQE_CTRL_CQ_UPDATE);\n+\t} else {\n+\t\tsrcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);\n+\t}\n+\tctrl->srcrb_flags = srcrb_flags;\n+\t/*\n+\t * Make sure descriptor is fully written before\n+\t * setting ownership bit (because HW can start\n+\t * executing as soon as we do).\n \t */\n \trte_wmb();\n-\tctrl->owner_opcode =\n-\t\trte_cpu_to_be_32(MLX4_OPCODE_SEND |\n-\t\t\t\t ((sq->head & sq->txbb_cnt) ?\n-\t\t\t\t  MLX4_BIT_WQE_OWN : 0));\n+\tctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |\n+\t\t\t\t\t      ((sq->head & sq->txbb_cnt) ?\n+\t\t\t\t\t       MLX4_BIT_WQE_OWN : 0));\n \tsq->head += nr_txbbs;\n \treturn 0;\n err:\n@@ -416,14 +467,13 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \tstruct txq *txq = (struct txq *)dpdk_txq;\n \tunsigned int elts_head = txq->elts_head;\n \tconst unsigned int elts_n = txq->elts_n;\n-\tunsigned int elts_comp_cd = txq->elts_comp_cd;\n \tunsigned int elts_comp = 0;\n \tunsigned int bytes_sent = 0;\n \tunsigned int i;\n \tunsigned int max;\n \tint err;\n \n-\tassert(elts_comp_cd != 0);\n+\tassert(txq->elts_comp_cd != 0);\n \tmlx4_txq_complete(txq);\n \tmax = (elts_n - (elts_head - txq->elts_tail));\n \tif (max > elts_n)\n@@ -442,8 +492,6 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\t\t(((elts_head + 1) == elts_n) ? 0 : elts_head + 1);\n \t\tstruct txq_elt *elt_next = &(*txq->elts)[elts_head_next];\n \t\tstruct txq_elt *elt = &(*txq->elts)[elts_head];\n-\t\tunsigned int segs = buf->nb_segs;\n-\t\tuint32_t send_flags = 0;\n \n \t\t/* Clean up old buffer. */\n \t\tif (likely(elt->buf != NULL)) {\n@@ -461,34 +509,16 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \t\t\t\ttmp = next;\n \t\t\t} while (tmp != NULL);\n \t\t}\n-\t\t/* Request Tx completion. */\n-\t\tif (unlikely(--elts_comp_cd == 0)) {\n-\t\t\telts_comp_cd = txq->elts_comp_cd_init;\n-\t\t\t++elts_comp;\n-\t\t\tsend_flags |= MLX4_WQE_CTRL_CQ_UPDATE;\n-\t\t}\n-\t\tif (likely(segs == 1)) {\n-\t\t\t/* Update element. */\n-\t\t\telt->buf = buf;\n-\t\t\tRTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);\n-\t\t\t/* Post the packet for sending. */\n-\t\t\terr = mlx4_post_send(txq, buf, send_flags);\n-\t\t\tif (unlikely(err)) {\n-\t\t\t\tif (unlikely(send_flags &\n-\t\t\t\t\t     MLX4_WQE_CTRL_CQ_UPDATE)) {\n-\t\t\t\t\telts_comp_cd = 1;\n-\t\t\t\t\t--elts_comp;\n-\t\t\t\t}\n-\t\t\t\telt->buf = NULL;\n-\t\t\t\tgoto stop;\n-\t\t\t}\n-\t\t\telt->buf = buf;\n-\t\t\tbytes_sent += buf->pkt_len;\n-\t\t} else {\n-\t\t\terr = -EINVAL;\n-\t\t\trte_errno = -err;\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);\n+\t\t/* Post the packet for sending. */\n+\t\terr = mlx4_post_send(txq, buf);\n+\t\tif (unlikely(err)) {\n+\t\t\telt->buf = NULL;\n \t\t\tgoto stop;\n \t\t}\n+\t\telt->buf = buf;\n+\t\tbytes_sent += buf->pkt_len;\n+\t\t++elts_comp;\n \t\telts_head = elts_head_next;\n \t}\n stop:\n@@ -504,7 +534,6 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)\n \trte_write32(txq->msq.doorbell_qpn, txq->msq.db);\n \ttxq->elts_head = elts_head;\n \ttxq->elts_comp += elts_comp;\n-\ttxq->elts_comp_cd = elts_comp_cd;\n \treturn i;\n }\n \ndiff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h\nindex ac84177..528e286 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.h\n+++ b/drivers/net/mlx4/mlx4_rxtx.h\n@@ -101,13 +101,15 @@ struct txq {\n \tstruct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */\n \tunsigned int elts_head; /**< Current index in (*elts)[]. */\n \tunsigned int elts_tail; /**< First element awaiting completion. */\n-\tunsigned int elts_comp; /**< Number of completion requests. */\n-\tunsigned int elts_comp_cd; /**< Countdown for next completion. */\n+\tunsigned int elts_comp; /**< Number of packets awaiting completion. */\n+\tint elts_comp_cd; /**< Countdown for next completion. */\n \tunsigned int elts_comp_cd_init; /**< Initial value for countdown. */\n \tunsigned int elts_n; /**< (*elts)[] length. */\n \tstruct txq_elt (*elts)[]; /**< Tx elements. */\n \tstruct mlx4_txq_stats stats; /**< Tx queue counters. */\n \tuint32_t max_inline; /**< Max inline send size. */\n+\tuint8_t *bounce_buf;\n+\t/**< Memory used for storing the first DWORD of data TXBBs. */\n \tstruct {\n \t\tconst struct rte_mempool *mp; /**< Cached memory pool. */\n \t\tstruct ibv_mr *mr; /**< Memory region (for mp). */\ndiff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c\nindex fb28ef2..7552a88 100644\n--- a/drivers/net/mlx4/mlx4_txq.c\n+++ b/drivers/net/mlx4/mlx4_txq.c\n@@ -83,8 +83,13 @@ mlx4_txq_alloc_elts(struct txq *txq, unsigned int elts_n)\n \t\trte_calloc_socket(\"TXQ\", 1, sizeof(*elts), 0, txq->socket);\n \tint ret = 0;\n \n-\tif (elts == NULL) {\n-\t\tERROR(\"%p: can't allocate packets array\", (void *)txq);\n+\t/* Allocate bounce buffer. */\n+\ttxq->bounce_buf = rte_zmalloc_socket(\"TXQ\",\n+\t\t\t\t\t     MLX4_MAX_WQE_SIZE,\n+\t\t\t\t\t     RTE_CACHE_LINE_MIN_SIZE,\n+\t\t\t\t\t     txq->socket);\n+\tif (!elts || !txq->bounce_buf) {\n+\t\tERROR(\"%p: can't allocate TXQ memory\", (void *)txq);\n \t\tret = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -110,6 +115,8 @@ mlx4_txq_alloc_elts(struct txq *txq, unsigned int elts_n)\n \tassert(ret == 0);\n \treturn 0;\n error:\n+\trte_free(txq->bounce_buf);\n+\ttxq->bounce_buf = NULL;\n \trte_free(elts);\n \tDEBUG(\"%p: failed, freed everything\", (void *)txq);\n \tassert(ret > 0);\n@@ -175,6 +182,7 @@ mlx4_txq_cleanup(struct txq *txq)\n \t\tclaim_zero(ibv_destroy_qp(txq->qp));\n \tif (txq->cq != NULL)\n \t\tclaim_zero(ibv_destroy_cq(txq->cq));\n+\trte_free(txq->bounce_buf);\n \tfor (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {\n \t\tif (txq->mp2mr[i].mp == NULL)\n \t\t\tbreak;\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "3/6"
    ]
}