get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/85307/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 85307,
    "url": "https://patches.dpdk.org/api/patches/85307/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1608205475-20067-6-git-send-email-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1608205475-20067-6-git-send-email-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1608205475-20067-6-git-send-email-michaelba@nvidia.com",
    "date": "2020-12-17T11:44:23",
    "name": "[05/17] net/mlx5: move rearm and clock queue CQ creation to common",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e62483d1ed9d1a3a6806de31316e50fb276c8749",
    "submitter": {
        "id": 1949,
        "url": "https://patches.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1608205475-20067-6-git-send-email-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 14348,
            "url": "https://patches.dpdk.org/api/series/14348/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=14348",
            "date": "2020-12-17T11:44:23",
            "name": "common/mlx5: share DevX resources creations",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/14348/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/85307/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/85307/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 36F1BA09F6;\n\tThu, 17 Dec 2020 12:45:00 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 25B50CA0C;\n\tThu, 17 Dec 2020 12:44:58 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id 3BF5CCA08\n for <dev@dpdk.org>; Thu, 17 Dec 2020 12:44:55 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n michaelba@nvidia.com) with SMTP; 17 Dec 2020 13:44:50 +0200",
            "from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BHBio2U004524;\n Thu, 17 Dec 2020 13:44:50 +0200"
        ],
        "From": "Michael Baum <michaelba@nvidia.com>",
        "To": "dev@dpdk.org",
        "Cc": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Thu, 17 Dec 2020 11:44:23 +0000",
        "Message-Id": "<1608205475-20067-6-git-send-email-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1608205475-20067-1-git-send-email-michaelba@nvidia.com>",
        "References": "<1608205475-20067-1-git-send-email-michaelba@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH 05/17] net/mlx5: move rearm and clock queue CQ\n\tcreation to common",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Using common function for CQ creation at rearm queue and clock queue.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h      |   9 +--\n drivers/net/mlx5/mlx5_rxtx.c |   2 +-\n drivers/net/mlx5/mlx5_txpp.c | 147 +++++++++++--------------------------------\n 3 files changed, 40 insertions(+), 118 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 121d726..00ccaee 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -26,6 +26,7 @@\n #include <mlx5_prm.h>\n #include <mlx5_common_mp.h>\n #include <mlx5_common_mr.h>\n+#include <mlx5_common_devx.h>\n \n #include \"mlx5_defs.h\"\n #include \"mlx5_utils.h\"\n@@ -612,13 +613,7 @@ struct mlx5_flow_id_pool {\n /* Tx pacing queue structure - for Clock and Rearm queues. */\n struct mlx5_txpp_wq {\n \t/* Completion Queue related data.*/\n-\tstruct mlx5_devx_obj *cq;\n-\tvoid *cq_umem;\n-\tunion {\n-\t\tvolatile void *cq_buf;\n-\t\tvolatile struct mlx5_cqe *cqes;\n-\t};\n-\tvolatile uint32_t *cq_dbrec;\n+\tstruct mlx5_devx_cq cq_obj;\n \tuint32_t cq_ci:24;\n \tuint32_t arm_sn:2;\n \t/* Send Queue related data.*/\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex d12d746..dad24a3 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -2277,7 +2277,7 @@ enum mlx5_txcmp_code {\n \n \tqs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);\n \tqs->max_index = rte_cpu_to_be_32(wci);\n-\tqs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);\n+\tqs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);\n \tqs->reserved0 = RTE_BE32(0);\n \tqs->reserved1 = RTE_BE32(0);\n }\ndiff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c\nindex 2438bf1..54ea572 100644\n--- a/drivers/net/mlx5/mlx5_txpp.c\n+++ b/drivers/net/mlx5/mlx5_txpp.c\n@@ -13,6 +13,7 @@\n #include <rte_eal_paging.h>\n \n #include <mlx5_malloc.h>\n+#include <mlx5_common_devx.h>\n \n #include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n@@ -126,12 +127,7 @@\n \t\tclaim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));\n \tif (wq->sq_buf)\n \t\tmlx5_free((void *)(uintptr_t)wq->sq_buf);\n-\tif (wq->cq)\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(wq->cq));\n-\tif (wq->cq_umem)\n-\t\tclaim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));\n-\tif (wq->cq_buf)\n-\t\tmlx5_free((void *)(uintptr_t)wq->cq_buf);\n+\tmlx5_devx_cq_destroy(&wq->cq_obj);\n \tmemset(wq, 0, sizeof(*wq));\n }\n \n@@ -181,19 +177,6 @@\n }\n \n static void\n-mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n-{\n-\tstruct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;\n-\tstruct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;\n-\tuint32_t i;\n-\n-\tfor (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {\n-\t\tcqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;\n-\t\t++cqe;\n-\t}\n-}\n-\n-static void\n mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n {\n \tstruct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;\n@@ -228,7 +211,8 @@\n \t\tindex = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &\n \t\t\t((1 << MLX5_CQ_INDEX_WIDTH) - 1);\n \t\tqs->max_index = rte_cpu_to_be_32(index);\n-\t\tqs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);\n+\t\tqs->qpn_cqn =\n+\t\t\t   rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);\n \t}\n }\n \n@@ -238,7 +222,11 @@\n {\n \tstruct mlx5_devx_create_sq_attr sq_attr = { 0 };\n \tstruct mlx5_devx_modify_sq_attr msq_attr = { 0 };\n-\tstruct mlx5_devx_cq_attr cq_attr = { 0 };\n+\tstruct mlx5_devx_cq_attr cq_attr = {\n+\t\t.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?\n+\t\t\t\t\t MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B,\n+\t\t.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),\n+\t};\n \tstruct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;\n \tsize_t page_size;\n \tuint32_t umem_size, umem_dbrec;\n@@ -249,50 +237,16 @@\n \t\tDRV_LOG(ERR, \"Failed to get mem page size\");\n \t\treturn -ENOMEM;\n \t}\n-\t/* Allocate memory buffer for CQEs and doorbell record. */\n-\tumem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;\n-\tumem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n-\tumem_size += MLX5_DBR_SIZE;\n-\twq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,\n-\t\t\t\t page_size, sh->numa_node);\n-\tif (!wq->cq_buf) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate memory for Rearm Queue.\");\n-\t\treturn -ENOMEM;\n-\t}\n-\t/* Register allocated buffer in user space with DevX. */\n-\twq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,\n-\t\t\t\t\t       (void *)(uintptr_t)wq->cq_buf,\n-\t\t\t\t\t       umem_size,\n-\t\t\t\t\t       IBV_ACCESS_LOCAL_WRITE);\n-\tif (!wq->cq_umem) {\n-\t\trte_errno = errno;\n-\t\tDRV_LOG(ERR, \"Failed to register umem for Rearm Queue.\");\n-\t\tgoto error;\n-\t}\n \t/* Create completion queue object for Rearm Queue. */\n-\tcq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?\n-\t\t\t    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;\n-\tcq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);\n-\tcq_attr.eqn = sh->eqn;\n-\tcq_attr.q_umem_valid = 1;\n-\tcq_attr.q_umem_offset = 0;\n-\tcq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);\n-\tcq_attr.db_umem_valid = 1;\n-\tcq_attr.db_umem_offset = umem_dbrec;\n-\tcq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);\n-\tcq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);\n-\tcq_attr.log_page_size = rte_log2_u32(page_size);\n-\twq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);\n-\tif (!wq->cq) {\n-\t\trte_errno = errno;\n+\tret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,\n+\t\t\t\t  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,\n+\t\t\t\t  sh->numa_node);\n+\tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create CQ for Rearm Queue.\");\n-\t\tgoto error;\n+\t\treturn ret;\n \t}\n-\twq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);\n \twq->cq_ci = 0;\n \twq->arm_sn = 0;\n-\t/* Mark all CQEs initially as invalid. */\n-\tmlx5_txpp_fill_cqe_rearm_queue(sh);\n \t/*\n \t * Allocate memory buffer for Send Queue WQEs.\n \t * There should be no WQE leftovers in the cyclic queue.\n@@ -323,7 +277,7 @@\n \tsq_attr.state = MLX5_SQC_STATE_RST;\n \tsq_attr.tis_lst_sz = 1;\n \tsq_attr.tis_num = sh->tis->id;\n-\tsq_attr.cqn = wq->cq->id;\n+\tsq_attr.cqn = wq->cq_obj.cq->id;\n \tsq_attr.cd_master = 1;\n \tsq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);\n \tsq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;\n@@ -466,7 +420,13 @@\n {\n \tstruct mlx5_devx_create_sq_attr sq_attr = { 0 };\n \tstruct mlx5_devx_modify_sq_attr msq_attr = { 0 };\n-\tstruct mlx5_devx_cq_attr cq_attr = { 0 };\n+\tstruct mlx5_devx_cq_attr cq_attr = {\n+\t\t.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?\n+\t\t\t\t\t MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B,\n+\t\t.use_first_only = 1,\n+\t\t.overrun_ignore = 1,\n+\t\t.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),\n+\t};\n \tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n \tsize_t page_size;\n \tuint32_t umem_size, umem_dbrec;\n@@ -487,48 +447,14 @@\n \t}\n \tsh->txpp.ts_p = 0;\n \tsh->txpp.ts_n = 0;\n-\t/* Allocate memory buffer for CQEs and doorbell record. */\n-\tumem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;\n-\tumem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n-\tumem_size += MLX5_DBR_SIZE;\n-\twq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,\n-\t\t\t\t\tpage_size, sh->numa_node);\n-\tif (!wq->cq_buf) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate memory for Clock Queue.\");\n-\t\treturn -ENOMEM;\n-\t}\n-\t/* Register allocated buffer in user space with DevX. */\n-\twq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,\n-\t\t\t\t\t       (void *)(uintptr_t)wq->cq_buf,\n-\t\t\t\t\t       umem_size,\n-\t\t\t\t\t       IBV_ACCESS_LOCAL_WRITE);\n-\tif (!wq->cq_umem) {\n-\t\trte_errno = errno;\n-\t\tDRV_LOG(ERR, \"Failed to register umem for Clock Queue.\");\n-\t\tgoto error;\n-\t}\n \t/* Create completion queue object for Clock Queue. */\n-\tcq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?\n-\t\t\t    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;\n-\tcq_attr.use_first_only = 1;\n-\tcq_attr.overrun_ignore = 1;\n-\tcq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);\n-\tcq_attr.eqn = sh->eqn;\n-\tcq_attr.q_umem_valid = 1;\n-\tcq_attr.q_umem_offset = 0;\n-\tcq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);\n-\tcq_attr.db_umem_valid = 1;\n-\tcq_attr.db_umem_offset = umem_dbrec;\n-\tcq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);\n-\tcq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);\n-\tcq_attr.log_page_size = rte_log2_u32(page_size);\n-\twq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);\n-\tif (!wq->cq) {\n-\t\trte_errno = errno;\n+\tret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,\n+\t\t\t\t  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,\n+\t\t\t\t  sh->numa_node);\n+\tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create CQ for Clock Queue.\");\n \t\tgoto error;\n \t}\n-\twq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);\n \twq->cq_ci = 0;\n \t/* Allocate memory buffer for Send Queue WQEs. */\n \tif (sh->txpp.test) {\n@@ -574,7 +500,7 @@\n \t\tsq_attr.static_sq_wq = 1;\n \t}\n \tsq_attr.state = MLX5_SQC_STATE_RST;\n-\tsq_attr.cqn = wq->cq->id;\n+\tsq_attr.cqn = wq->cq_obj.cq->id;\n \tsq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;\n \tsq_attr.wq_attr.cd_slave = 1;\n \tsq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);\n@@ -625,12 +551,13 @@\n \tstruct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;\n \tuint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;\n \tuint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;\n-\tuint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);\n+\tuint64_t db_be =\n+\t\trte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);\n \tbase_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);\n \tuint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);\n \n \trte_compiler_barrier();\n-\taq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);\n+\taq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);\n \trte_wmb();\n #ifdef RTE_ARCH_64\n \t*(uint64_t *)addr = db_be;\n@@ -728,7 +655,7 @@\n mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)\n {\n \tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n-\tstruct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;\n+\tstruct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;\n \tunion {\n \t\trte_int128_t u128;\n \t\tstruct mlx5_cqe_ts cts;\n@@ -809,7 +736,7 @@\n \tdo {\n \t\tvolatile struct mlx5_cqe *cqe;\n \n-\t\tcqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];\n+\t\tcqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];\n \t\tret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);\n \t\tswitch (ret) {\n \t\tcase MLX5_CQE_STATUS_ERR:\n@@ -841,7 +768,7 @@\n \t\t}\n \t\t/* Update doorbell record to notify hardware. */\n \t\trte_compiler_barrier();\n-\t\t*wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);\n+\t\t*wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);\n \t\trte_wmb();\n \t\twq->cq_ci = cq_ci;\n \t\t/* Fire new requests to Rearm Queue. */\n@@ -936,9 +863,8 @@\n \t}\n \t/* Subscribe CQ event to the event channel controlled by the driver. */\n \tret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,\n-\t\t\t\t\t\t   sh->txpp.rearm_queue.cq->obj,\n-\t\t\t\t\t\t   sizeof(event_nums),\n-\t\t\t\t\t\t   event_nums, 0);\n+\t\t\t\t\t    sh->txpp.rearm_queue.cq_obj.cq->obj,\n+\t\t\t\t\t     sizeof(event_nums), event_nums, 0);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to subscribe CQE event.\");\n \t\trte_errno = errno;\n@@ -1140,7 +1066,8 @@\n \n \tif (sh->txpp.refcnt) {\n \t\tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n-\t\tstruct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;\n+\t\tstruct mlx5_cqe *cqe =\n+\t\t\t\t(struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;\n \t\tunion {\n \t\t\trte_int128_t u128;\n \t\t\tstruct mlx5_cqe_ts cts;\n",
    "prefixes": [
        "05/17"
    ]
}