get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/52316/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 52316,
    "url": "http://patches.dpdk.org/api/patches/52316/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190405013357.14503-3-yskoh@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190405013357.14503-3-yskoh@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190405013357.14503-3-yskoh@mellanox.com",
    "date": "2019-04-05T01:33:55",
    "name": "[v3,2/4] net/mlx5: remove redundant queue index",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "89c9de6a20bac25e1f8fe4b4c8a1c7706767b41f",
    "submitter": {
        "id": 636,
        "url": "http://patches.dpdk.org/api/people/636/?format=api",
        "name": "Yongseok Koh",
        "email": "yskoh@mellanox.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190405013357.14503-3-yskoh@mellanox.com/mbox/",
    "series": [
        {
            "id": 4126,
            "url": "http://patches.dpdk.org/api/series/4126/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4126",
            "date": "2019-04-05T01:33:53",
            "name": "net/mlx: remove device register remap",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/4126/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/52316/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/52316/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6E5D51B45B;\n\tFri,  5 Apr 2019 03:34:15 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id 9036E1B44D\n\tfor <dev@dpdk.org>; Fri,  5 Apr 2019 03:34:10 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n\tyskoh@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 5 Apr 2019 04:34:08 +0300",
            "from scfae-sc-2.mti.labs.mlnx (scfae-sc-2.mti.labs.mlnx\n\t[10.101.0.96])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x351Y2kU018492;\n\tFri, 5 Apr 2019 04:34:06 +0300"
        ],
        "From": "Yongseok Koh <yskoh@mellanox.com>",
        "To": "shahafs@mellanox.com",
        "Cc": "dev@dpdk.org",
        "Date": "Thu,  4 Apr 2019 18:33:55 -0700",
        "Message-Id": "<20190405013357.14503-3-yskoh@mellanox.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "In-Reply-To": "<20190405013357.14503-1-yskoh@mellanox.com>",
        "References": "<20190325193627.19726-1-yskoh@mellanox.com>\n\t<20190405013357.14503-1-yskoh@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v3 2/4] net/mlx5: remove redundant queue index",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Queue index is redundantly stored for both Rx and Tx structures.\nE.g. txq_ctrl->idx and txq->stats.idx. Both are consolidated to single\nstorage - rxq->idx and txq->idx.\n\nAlso, rxq and txq are moved to the beginning of its control structure\n(rxq_ctrl and txq_ctrl) for cacheline alignment.\n\nSigned-off-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5_rxq.c     | 29 ++++++++++++++---------------\n drivers/net/mlx5/mlx5_rxtx.h    | 10 ++++------\n drivers/net/mlx5/mlx5_stats.c   | 15 ++++++---------\n drivers/net/mlx5/mlx5_trigger.c |  2 +-\n drivers/net/mlx5/mlx5_txq.c     | 21 ++++++++++-----------\n 5 files changed, 35 insertions(+), 42 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex dcb97c2100..8a84b0a1b5 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -156,7 +156,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \t}\n \tDRV_LOG(DEBUG,\n \t\t\"port %u Rx queue %u allocated and configured %u segments\",\n-\t\trxq->port_id, rxq_ctrl->idx, wqe_n);\n+\t\trxq->port_id, rxq->idx, wqe_n);\n \treturn 0;\n error:\n \terr = rte_errno; /* Save rte_errno before cleanup. */\n@@ -168,7 +168,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \t\t(*rxq->mprq_bufs)[i] = NULL;\n \t}\n \tDRV_LOG(DEBUG, \"port %u Rx queue %u failed, freed everything\",\n-\t\trxq->port_id, rxq_ctrl->idx);\n+\t\trxq->port_id, rxq->idx);\n \trte_errno = err; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -241,7 +241,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \tDRV_LOG(DEBUG,\n \t\t\"port %u Rx queue %u allocated and configured %u segments\"\n \t\t\" (max %u packets)\",\n-\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,\n+\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,\n \t\telts_n / (1 << rxq_ctrl->rxq.sges_n));\n \treturn 0;\n error:\n@@ -253,7 +253,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \t\t(*rxq_ctrl->rxq.elts)[i] = NULL;\n \t}\n \tDRV_LOG(DEBUG, \"port %u Rx queue %u failed, freed everything\",\n-\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);\n+\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);\n \trte_errno = err; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -287,7 +287,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \tuint16_t i;\n \n \tDRV_LOG(DEBUG, \"port %u Multi-Packet Rx queue %u freeing WRs\",\n-\t\trxq->port_id, rxq_ctrl->idx);\n+\t\trxq->port_id, rxq->idx);\n \tif (rxq->mprq_bufs == NULL)\n \t\treturn;\n \tassert(mlx5_rxq_check_vec_support(rxq) < 0);\n@@ -318,7 +318,7 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)\n \tuint16_t i;\n \n \tDRV_LOG(DEBUG, \"port %u Rx queue %u freeing WRs\",\n-\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);\n+\t\tPORT_ID(rxq_ctrl->priv), rxq->idx);\n \tif (rxq->elts == NULL)\n \t\treturn;\n \t/**\n@@ -364,7 +364,7 @@ void\n mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)\n {\n \tDRV_LOG(DEBUG, \"port %u cleaning up Rx queue %u\",\n-\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);\n+\t\tPORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);\n \tif (rxq_ctrl->ibv)\n \t\tmlx5_rxq_ibv_release(rxq_ctrl->ibv);\n \tmemset(rxq_ctrl, 0, sizeof(*rxq_ctrl));\n@@ -495,11 +495,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)\n \t\treturn;\n \trxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);\n \tpriv = rxq_ctrl->priv;\n-\tif (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))\n+\tif (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))\n \t\trte_panic(\"port %u Rx queue %u is still used by a flow and\"\n \t\t\t  \" cannot be removed\\n\",\n-\t\t\t  PORT_ID(priv), rxq_ctrl->idx);\n-\tmlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);\n+\t\t\t  PORT_ID(priv), rxq->idx);\n+\tmlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);\n }\n \n /**\n@@ -793,7 +793,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)\n \tif (!tmpl) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"port %u Rx queue %u cannot allocate verbs resources\",\n-\t\t\tdev->data->port_id, rxq_ctrl->idx);\n+\t\t\tdev->data->port_id, rxq_data->idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -1104,7 +1104,7 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)\n \n \tLIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {\n \t\tDRV_LOG(DEBUG, \"port %u Verbs Rx queue %u still referenced\",\n-\t\t\tdev->data->port_id, rxq_ibv->rxq_ctrl->idx);\n+\t\t\tdev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);\n \t\t++ret;\n \t}\n \treturn ret;\n@@ -1470,7 +1470,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \ttmpl->rxq.port_id = dev->data->port_id;\n \ttmpl->priv = priv;\n \ttmpl->rxq.mp = mp;\n-\ttmpl->rxq.stats.idx = idx;\n \ttmpl->rxq.elts_n = log2above(desc);\n \ttmpl->rxq.rq_repl_thresh =\n \t\tMLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);\n@@ -1479,7 +1478,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n #ifndef RTE_ARCH_64\n \ttmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;\n #endif\n-\ttmpl->idx = idx;\n+\ttmpl->rxq.idx = idx;\n \trte_atomic32_inc(&tmpl->refcnt);\n \tLIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);\n \treturn tmpl;\n@@ -1592,7 +1591,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)\n \n \tLIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {\n \t\tDRV_LOG(DEBUG, \"port %u Rx Queue %u still referenced\",\n-\t\t\tdev->data->port_id, rxq_ctrl->idx);\n+\t\t\tdev->data->port_id, rxq_ctrl->rxq.idx);\n \t\t++ret;\n \t}\n \treturn ret;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex ced9945888..7b58063ceb 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -41,7 +41,6 @@\n #define MLX5_FLOW_TUNNEL 5\n \n struct mlx5_rxq_stats {\n-\tunsigned int idx; /**< Mapping index. */\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \tuint64_t ipackets; /**< Total of successfully received packets. */\n \tuint64_t ibytes; /**< Total of successfully received bytes. */\n@@ -51,7 +50,6 @@ struct mlx5_rxq_stats {\n };\n \n struct mlx5_txq_stats {\n-\tunsigned int idx; /**< Mapping index. */\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \tuint64_t opackets; /**< Total of successfully sent packets. */\n \tuint64_t obytes; /**< Total of successfully sent bytes. */\n@@ -116,6 +114,7 @@ struct mlx5_rxq_data {\n \tstruct rte_mempool *mp;\n \tstruct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */\n \tstruct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */\n+\tuint16_t idx; /* Queue index. */\n \tstruct mlx5_rxq_stats stats;\n \tuint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */\n \tstruct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */\n@@ -141,14 +140,13 @@ struct mlx5_rxq_ibv {\n \n /* RX queue control descriptor. */\n struct mlx5_rxq_ctrl {\n+\tstruct mlx5_rxq_data rxq; /* Data path structure. */\n \tLIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */\n \trte_atomic32_t refcnt; /* Reference counter. */\n \tstruct mlx5_rxq_ibv *ibv; /* Verbs elements. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n-\tstruct mlx5_rxq_data rxq; /* Data path structure. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n-\tuint16_t idx; /* Queue index. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n \tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\n };\n@@ -205,6 +203,7 @@ struct mlx5_txq_data {\n \tvolatile uint32_t *cq_db; /* Completion queue doorbell. */\n \tvolatile void *bf_reg; /* Blueflame register remapped. */\n \tstruct rte_mbuf *(*elts)[]; /* TX elements. */\n+\tuint16_t idx; /* Queue index. */\n \tstruct mlx5_txq_stats stats; /* TX queue counters. */\n #ifndef RTE_ARCH_64\n \trte_spinlock_t *uar_lock;\n@@ -223,6 +222,7 @@ struct mlx5_txq_ibv {\n \n /* TX queue control descriptor. */\n struct mlx5_txq_ctrl {\n+\tstruct mlx5_txq_data txq; /* Data path structure. */\n \tLIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */\n \trte_atomic32_t refcnt; /* Reference counter. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n@@ -230,10 +230,8 @@ struct mlx5_txq_ctrl {\n \tunsigned int max_tso_header; /* Max TSO header size. */\n \tstruct mlx5_txq_ibv *ibv; /* Verbs queue object. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n-\tstruct mlx5_txq_data txq; /* Data path structure. */\n \toff_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */\n \tvolatile void *bf_reg_orig; /* Blueflame register from verbs. */\n-\tuint16_t idx; /* Queue index. */\n };\n \n /* mlx5_rxq.c */\ndiff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c\nindex 5af199d0d5..ed50667f45 100644\n--- a/drivers/net/mlx5/mlx5_stats.c\n+++ b/drivers/net/mlx5/mlx5_stats.c\n@@ -386,7 +386,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n \n \t\tif (rxq == NULL)\n \t\t\tcontinue;\n-\t\tidx = rxq->stats.idx;\n+\t\tidx = rxq->idx;\n \t\tif (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \t\t\ttmp.q_ipackets[idx] += rxq->stats.ipackets;\n@@ -407,7 +407,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n \n \t\tif (txq == NULL)\n \t\t\tcontinue;\n-\t\tidx = txq->stats.idx;\n+\t\tidx = txq->idx;\n \t\tif (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {\n #ifdef MLX5_PMD_SOFT_COUNTERS\n \t\t\ttmp.q_opackets[idx] += txq->stats.opackets;\n@@ -442,21 +442,18 @@ mlx5_stats_reset(struct rte_eth_dev *dev)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;\n \tunsigned int i;\n-\tunsigned int idx;\n \n \tfor (i = 0; (i != priv->rxqs_n); ++i) {\n \t\tif ((*priv->rxqs)[i] == NULL)\n \t\t\tcontinue;\n-\t\tidx = (*priv->rxqs)[i]->stats.idx;\n-\t\t(*priv->rxqs)[i]->stats =\n-\t\t\t(struct mlx5_rxq_stats){ .idx = idx };\n+\t\tmemset(&(*priv->rxqs)[i]->stats, 0,\n+\t\t       sizeof(struct mlx5_rxq_stats));\n \t}\n \tfor (i = 0; (i != priv->txqs_n); ++i) {\n \t\tif ((*priv->txqs)[i] == NULL)\n \t\t\tcontinue;\n-\t\tidx = (*priv->txqs)[i]->stats.idx;\n-\t\t(*priv->txqs)[i]->stats =\n-\t\t\t(struct mlx5_txq_stats){ .idx = idx };\n+\t\tmemset(&(*priv->txqs)[i]->stats, 0,\n+\t\t       sizeof(struct mlx5_txq_stats));\n \t}\n \tmlx5_read_ib_stat(priv, \"out_of_buffer\", &stats_ctrl->imissed_base);\n #ifndef MLX5_PMD_SOFT_COUNTERS\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 5b73f0ff03..7c1e5594d6 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -123,7 +123,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"port %u Rx queue %u registering\"\n \t\t\t\" mp %s having %u chunks\",\n-\t\t\tdev->data->port_id, rxq_ctrl->idx,\n+\t\t\tdev->data->port_id, rxq_ctrl->rxq.idx,\n \t\t\tmp->name, mp->nb_mem_chunks);\n \t\tmlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);\n \t\tret = rxq_alloc_elts(rxq_ctrl);\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 1b3d89f2f6..4bd08cb035 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)\n \tfor (i = 0; (i != elts_n); ++i)\n \t\t(*txq_ctrl->txq.elts)[i] = NULL;\n \tDRV_LOG(DEBUG, \"port %u Tx queue %u allocated and configured %u WRs\",\n-\t\tPORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);\n+\t\tPORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);\n \ttxq_ctrl->txq.elts_head = 0;\n \ttxq_ctrl->txq.elts_tail = 0;\n \ttxq_ctrl->txq.elts_comp = 0;\n@@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)\n \tstruct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;\n \n \tDRV_LOG(DEBUG, \"port %u Tx queue %u freeing WRs\",\n-\t\tPORT_ID(txq_ctrl->priv), txq_ctrl->idx);\n+\t\tPORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);\n \ttxq_ctrl->txq.elts_head = 0;\n \ttxq_ctrl->txq.elts_tail = 0;\n \ttxq_ctrl->txq.elts_comp = 0;\n@@ -224,7 +224,7 @@ mlx5_tx_queue_release(void *dpdk_txq)\n \t\tif ((*priv->txqs)[i] == txq) {\n \t\t\tmlx5_txq_release(ETH_DEV(priv), i);\n \t\t\tDRV_LOG(DEBUG, \"port %u removing Tx queue %u from list\",\n-\t\t\t\tPORT_ID(priv), txq_ctrl->idx);\n+\t\t\t\tPORT_ID(priv), txq->idx);\n \t\t\tbreak;\n \t\t}\n }\n@@ -273,7 +273,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)\n \t\t\tcontinue;\n \t\ttxq = (*priv->txqs)[i];\n \t\ttxq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);\n-\t\tassert(txq_ctrl->idx == (uint16_t)i);\n+\t\tassert(txq->idx == (uint16_t)i);\n \t\t/* UAR addr form verbs used to find dup and offset in page. */\n \t\tuar_va = (uintptr_t)txq_ctrl->bf_reg_orig;\n \t\toff = uar_va & (page_size - 1); /* offset in page. */\n@@ -301,7 +301,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)\n \t\t\t\tDRV_LOG(ERR,\n \t\t\t\t\t\"port %u call to mmap failed on UAR\"\n \t\t\t\t\t\" for txq %u\",\n-\t\t\t\t\tdev->data->port_id, txq_ctrl->idx);\n+\t\t\t\t\tdev->data->port_id, txq->idx);\n \t\t\t\trte_errno = ENXIO;\n \t\t\t\treturn -rte_errno;\n \t\t\t}\n@@ -629,7 +629,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)\n \n \tLIST_FOREACH(txq_ibv, &priv->txqsibv, next) {\n \t\tDRV_LOG(DEBUG, \"port %u Verbs Tx queue %u still referenced\",\n-\t\t\tdev->data->port_id, txq_ibv->txq_ctrl->idx);\n+\t\t\tdev->data->port_id, txq_ibv->txq_ctrl->txq.idx);\n \t\t++ret;\n \t}\n \treturn ret;\n@@ -778,7 +778,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \ttmpl->priv = priv;\n \ttmpl->socket = socket;\n \ttmpl->txq.elts_n = log2above(desc);\n-\ttmpl->idx = idx;\n+\ttmpl->txq.idx = idx;\n \ttxq_set_params(tmpl);\n \tDRV_LOG(DEBUG, \"port %u device_attr.max_qp_wr is %d\",\n \t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);\n@@ -786,7 +786,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);\n \ttmpl->txq.elts =\n \t\t(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);\n-\ttmpl->txq.stats.idx = idx;\n \trte_atomic32_inc(&tmpl->refcnt);\n \tLIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);\n \treturn tmpl;\n@@ -893,12 +892,12 @@ int\n mlx5_txq_verify(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_txq_ctrl *txq;\n+\tstruct mlx5_txq_ctrl *txq_ctrl;\n \tint ret = 0;\n \n-\tLIST_FOREACH(txq, &priv->txqsctrl, next) {\n+\tLIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {\n \t\tDRV_LOG(DEBUG, \"port %u Tx queue %u still referenced\",\n-\t\t\tdev->data->port_id, txq->idx);\n+\t\t\tdev->data->port_id, txq_ctrl->txq.idx);\n \t\t++ret;\n \t}\n \treturn ret;\n",
    "prefixes": [
        "v3",
        "2/4"
    ]
}