get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/123032/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 123032,
    "url": "https://patches.dpdk.org/api/patches/123032/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230203094340.8103-18-beilei.xing@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230203094340.8103-18-beilei.xing@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230203094340.8103-18-beilei.xing@intel.com",
    "date": "2023-02-03T09:43:38",
    "name": "[v6,17/19] common/idpf: refine API name for queue config module",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7ccc87e71e3cabd6f113f6d1b71f859499de5a52",
    "submitter": {
        "id": 410,
        "url": "https://patches.dpdk.org/api/people/410/?format=api",
        "name": "Xing, Beilei",
        "email": "beilei.xing@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230203094340.8103-18-beilei.xing@intel.com/mbox/",
    "series": [
        {
            "id": 26786,
            "url": "https://patches.dpdk.org/api/series/26786/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=26786",
            "date": "2023-02-03T09:43:21",
            "name": "net/idpf: introduce idpf common modle",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/26786/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/123032/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/123032/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D362D41BBB;\n\tFri,  3 Feb 2023 11:12:10 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A9AFC42FA8;\n\tFri,  3 Feb 2023 11:10:47 +0100 (CET)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n by mails.dpdk.org (Postfix) with ESMTP id A703642D79\n for <dev@dpdk.org>; Fri,  3 Feb 2023 11:10:40 +0100 (CET)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Feb 2023 02:10:40 -0800",
            "from dpdk-beileix-3.sh.intel.com ([10.67.110.253])\n by orsmga007.jf.intel.com with ESMTP; 03 Feb 2023 02:10:38 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1675419040; x=1706955040;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=ieJEdUtm3mrR2ggJ4PsecYafV5QbHJ+xFa4ZxaE65ds=;\n b=BNYCYtRQ0JVkCsHrLz+bWFKU6y0r/oHNYrn9/NjmskoHD6+ylWTRR1WP\n LPFY6rP6XY5KCds8fJehJWKPU/9fNrxJxL1WQ0TVEev94xxgB9HJnUYXF\n spFXx8XUPgIsncl3g934Zgv5A67R0Ko2Y4WKKBDu9j9tPA+5G0vPerT2/\n rx6W7pThH4YAljutEyV24RGn9ODJYm5rYnNYmll8B7CmX0c80hV97RjU6\n YECS+3plm5ebIBzwmyJnTQz1vwhUdEZLNPP6ymq7ZAiffNlJcwlFQE6fx\n PLetCtMJcedOD7aRgYcEFx4fT/vVoC+KCWxWGxwt5yJOgvvbH9XwV7NPt w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10609\"; a=\"356052860\"",
            "E=Sophos;i=\"5.97,270,1669104000\"; d=\"scan'208\";a=\"356052860\"",
            "E=McAfee;i=\"6500,9779,10609\"; a=\"659047922\"",
            "E=Sophos;i=\"5.97,270,1669104000\"; d=\"scan'208\";a=\"659047922\""
        ],
        "X-ExtLoop1": "1",
        "From": "beilei.xing@intel.com",
        "To": "jingjing.wu@intel.com",
        "Cc": "dev@dpdk.org,\n\tqi.z.zhang@intel.com,\n\tBeilei Xing <beilei.xing@intel.com>",
        "Subject": "[PATCH v6 17/19] common/idpf: refine API name for queue config module",
        "Date": "Fri,  3 Feb 2023 09:43:38 +0000",
        "Message-Id": "<20230203094340.8103-18-beilei.xing@intel.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "In-Reply-To": "<20230203094340.8103-1-beilei.xing@intel.com>",
        "References": "<20230202095357.37929-1-beilei.xing@intel.com>\n <20230203094340.8103-1-beilei.xing@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nThis patch refines API name for queue config functions.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/common/idpf/idpf_common_rxtx.c        | 42 ++++++++--------\n drivers/common/idpf/idpf_common_rxtx.h        | 38 +++++++-------\n drivers/common/idpf/idpf_common_rxtx_avx512.c |  2 +-\n drivers/common/idpf/version.map               | 37 +++++++-------\n drivers/net/idpf/idpf_rxtx.c                  | 50 +++++++++----------\n 5 files changed, 85 insertions(+), 84 deletions(-)",
    "diff": "diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c\nindex bc95fef6bc..0b87aeea73 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.c\n+++ b/drivers/common/idpf/idpf_common_rxtx.c\n@@ -11,7 +11,7 @@ int idpf_timestamp_dynfield_offset = -1;\n uint64_t idpf_timestamp_dynflag;\n \n int\n-idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh)\n {\n \t/* The following constraints must be satisfied:\n \t * thresh < rxq->nb_rx_desc\n@@ -26,8 +26,8 @@ idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n }\n \n int\n-idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n-\t\t     uint16_t tx_free_thresh)\n+idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\t\tuint16_t tx_free_thresh)\n {\n \t/* TX descriptors will have their RS bit set after tx_rs_thresh\n \t * descriptors have been used. The TX descriptor ring will be cleaned\n@@ -74,7 +74,7 @@ idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n }\n \n void\n-idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq)\n+idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq)\n {\n \tuint16_t i;\n \n@@ -90,7 +90,7 @@ idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq)\n }\n \n void\n-idpf_release_txq_mbufs(struct idpf_tx_queue *txq)\n+idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq)\n {\n \tuint16_t nb_desc, i;\n \n@@ -115,7 +115,7 @@ idpf_release_txq_mbufs(struct idpf_tx_queue *txq)\n }\n \n void\n-idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq)\n {\n \tuint16_t len;\n \tuint32_t i;\n@@ -134,7 +134,7 @@ idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq)\n }\n \n void\n-idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq)\n {\n \tuint16_t len;\n \tuint32_t i;\n@@ -166,15 +166,15 @@ idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n }\n \n void\n-idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq)\n {\n-\tidpf_reset_split_rx_descq(rxq);\n-\tidpf_reset_split_rx_bufq(rxq->bufq1);\n-\tidpf_reset_split_rx_bufq(rxq->bufq2);\n+\tidpf_qc_split_rx_descq_reset(rxq);\n+\tidpf_qc_split_rx_bufq_reset(rxq->bufq1);\n+\tidpf_qc_split_rx_bufq_reset(rxq->bufq2);\n }\n \n void\n-idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq)\n {\n \tuint16_t len;\n \tuint32_t i;\n@@ -205,7 +205,7 @@ idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq)\n }\n \n void\n-idpf_reset_split_tx_descq(struct idpf_tx_queue *txq)\n+idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq)\n {\n \tstruct idpf_tx_entry *txe;\n \tuint32_t i, size;\n@@ -239,7 +239,7 @@ idpf_reset_split_tx_descq(struct idpf_tx_queue *txq)\n }\n \n void\n-idpf_reset_split_tx_complq(struct idpf_tx_queue *cq)\n+idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq)\n {\n \tuint32_t i, size;\n \n@@ -257,7 +257,7 @@ idpf_reset_split_tx_complq(struct idpf_tx_queue *cq)\n }\n \n void\n-idpf_reset_single_tx_queue(struct idpf_tx_queue *txq)\n+idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)\n {\n \tstruct idpf_tx_entry *txe;\n \tuint32_t i, size;\n@@ -294,7 +294,7 @@ idpf_reset_single_tx_queue(struct idpf_tx_queue *txq)\n }\n \n void\n-idpf_rx_queue_release(void *rxq)\n+idpf_qc_rx_queue_release(void *rxq)\n {\n \tstruct idpf_rx_queue *q = rxq;\n \n@@ -324,7 +324,7 @@ idpf_rx_queue_release(void *rxq)\n }\n \n void\n-idpf_tx_queue_release(void *txq)\n+idpf_qc_tx_queue_release(void *txq)\n {\n \tstruct idpf_tx_queue *q = txq;\n \n@@ -343,7 +343,7 @@ idpf_tx_queue_release(void *txq)\n }\n \n int\n-idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)\n+idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq)\n {\n \tint err;\n \tif ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {\n@@ -360,7 +360,7 @@ idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)\n }\n \n int\n-idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n+idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)\n {\n \tvolatile struct virtchnl2_singleq_rx_buf_desc *rxd;\n \tstruct rte_mbuf *mbuf = NULL;\n@@ -395,7 +395,7 @@ idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n }\n \n int\n-idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n+idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)\n {\n \tvolatile struct virtchnl2_splitq_rx_buf_desc *rxd;\n \tstruct rte_mbuf *mbuf = NULL;\n@@ -1451,7 +1451,7 @@ idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)\n }\n \n int __rte_cold\n-idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)\n+idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)\n {\n \trxq->ops = &def_singleq_rx_ops_vec;\n \treturn idpf_singleq_rx_vec_setup_default(rxq);\ndiff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h\nindex 6e3ee7de25..7966d15f51 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.h\n+++ b/drivers/common/idpf/idpf_common_rxtx.h\n@@ -215,38 +215,38 @@ extern int idpf_timestamp_dynfield_offset;\n extern uint64_t idpf_timestamp_dynflag;\n \n __rte_internal\n-int idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh);\n+int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);\n __rte_internal\n-int idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n-\t\t\t uint16_t tx_free_thresh);\n+int idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\t\t    uint16_t tx_free_thresh);\n __rte_internal\n-void idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq);\n+void idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq);\n __rte_internal\n-void idpf_release_txq_mbufs(struct idpf_tx_queue *txq);\n+void idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq);\n __rte_internal\n-void idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq);\n+void idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq);\n __rte_internal\n-void idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq);\n+void idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq);\n __rte_internal\n-void idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq);\n+void idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq);\n __rte_internal\n-void idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq);\n+void idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq);\n __rte_internal\n-void idpf_reset_split_tx_descq(struct idpf_tx_queue *txq);\n+void idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq);\n __rte_internal\n-void idpf_reset_split_tx_complq(struct idpf_tx_queue *cq);\n+void idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq);\n __rte_internal\n-void idpf_reset_single_tx_queue(struct idpf_tx_queue *txq);\n+void idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq);\n __rte_internal\n-void idpf_rx_queue_release(void *rxq);\n+void idpf_qc_rx_queue_release(void *rxq);\n __rte_internal\n-void idpf_tx_queue_release(void *txq);\n+void idpf_qc_tx_queue_release(void *txq);\n __rte_internal\n-int idpf_register_ts_mbuf(struct idpf_rx_queue *rxq);\n+int idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq);\n __rte_internal\n-int idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq);\n+int idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);\n __rte_internal\n-int idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq);\n+int idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);\n __rte_internal\n uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t       uint16_t nb_pkts);\n@@ -263,9 +263,9 @@ __rte_internal\n uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tuint16_t nb_pkts);\n __rte_internal\n-int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);\n+int idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);\n __rte_internal\n-int idpf_singleq_tx_vec_setup_avx512(struct idpf_tx_queue *txq);\n+int idpf_qc_singleq_tx_vec_avx512_setup(struct idpf_tx_queue *txq);\n __rte_internal\n uint16_t idpf_singleq_recv_pkts_avx512(void *rx_queue,\n \t\t\t\t       struct rte_mbuf **rx_pkts,\ndiff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c\nindex 6ae0e14d2f..d94e36b521 100644\n--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c\n+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c\n@@ -850,7 +850,7 @@ static const struct idpf_txq_ops avx512_singleq_tx_vec_ops = {\n };\n \n int __rte_cold\n-idpf_singleq_tx_vec_setup_avx512(struct idpf_tx_queue *txq)\n+idpf_qc_singleq_tx_vec_avx512_setup(struct idpf_tx_queue *txq)\n {\n \ttxq->ops = &avx512_singleq_tx_vec_ops;\n \treturn 0;\ndiff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map\nindex bd4dae503a..2ff152a353 100644\n--- a/drivers/common/idpf/version.map\n+++ b/drivers/common/idpf/version.map\n@@ -4,6 +4,25 @@ INTERNAL {\n \tidpf_adapter_deinit;\n \tidpf_adapter_init;\n \n+\tidpf_qc_rx_thresh_check;\n+\tidpf_qc_rx_queue_release;\n+\tidpf_qc_rxq_mbufs_release;\n+\tidpf_qc_single_rx_queue_reset;\n+\tidpf_qc_single_rxq_mbufs_alloc;\n+\tidpf_qc_single_tx_queue_reset;\n+\tidpf_qc_singleq_rx_vec_setup;\n+\tidpf_qc_singleq_tx_vec_avx512_setup;\n+\tidpf_qc_split_rx_bufq_reset;\n+\tidpf_qc_split_rx_descq_reset;\n+\tidpf_qc_split_rx_queue_reset;\n+\tidpf_qc_split_rxq_mbufs_alloc;\n+\tidpf_qc_split_tx_complq_reset;\n+\tidpf_qc_split_tx_descq_reset;\n+\tidpf_qc_ts_mbuf_register;\n+\tidpf_qc_tx_queue_release;\n+\tidpf_qc_tx_thresh_check;\n+\tidpf_qc_txq_mbufs_release;\n+\n \tidpf_vport_deinit;\n \tidpf_vport_info_init;\n \tidpf_vport_init;\n@@ -11,32 +30,14 @@ INTERNAL {\n \tidpf_vport_irq_unmap_config;\n \tidpf_vport_rss_config;\n \n-\tidpf_alloc_single_rxq_mbufs;\n-\tidpf_alloc_split_rxq_mbufs;\n-\tidpf_check_rx_thresh;\n-\tidpf_check_tx_thresh;\n \tidpf_execute_vc_cmd;\n \tidpf_prep_pkts;\n-\tidpf_register_ts_mbuf;\n-\tidpf_release_rxq_mbufs;\n-\tidpf_release_txq_mbufs;\n-\tidpf_reset_single_rx_queue;\n-\tidpf_reset_single_tx_queue;\n-\tidpf_reset_split_rx_bufq;\n-\tidpf_reset_split_rx_descq;\n-\tidpf_reset_split_rx_queue;\n-\tidpf_reset_split_tx_complq;\n-\tidpf_reset_split_tx_descq;\n-\tidpf_rx_queue_release;\n \tidpf_singleq_recv_pkts;\n \tidpf_singleq_recv_pkts_avx512;\n-\tidpf_singleq_rx_vec_setup;\n-\tidpf_singleq_tx_vec_setup_avx512;\n \tidpf_singleq_xmit_pkts;\n \tidpf_singleq_xmit_pkts_avx512;\n \tidpf_splitq_recv_pkts;\n \tidpf_splitq_xmit_pkts;\n-\tidpf_tx_queue_release;\n \tidpf_vc_alloc_vectors;\n \tidpf_vc_check_api_version;\n \tidpf_vc_config_irq_map_unmap;\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex c0c622d64b..ec75d6f69e 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -51,11 +51,11 @@ idpf_tx_offload_convert(uint64_t offload)\n }\n \n static const struct idpf_rxq_ops def_rxq_ops = {\n-\t.release_mbufs = idpf_release_rxq_mbufs,\n+\t.release_mbufs = idpf_qc_rxq_mbufs_release,\n };\n \n static const struct idpf_txq_ops def_txq_ops = {\n-\t.release_mbufs = idpf_release_txq_mbufs,\n+\t.release_mbufs = idpf_qc_txq_mbufs_release,\n };\n \n static const struct rte_memzone *\n@@ -183,7 +183,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,\n \t\tgoto err_sw_ring_alloc;\n \t}\n \n-\tidpf_reset_split_rx_bufq(bufq);\n+\tidpf_qc_split_rx_bufq_reset(bufq);\n \tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n \t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n \tbufq->ops = &def_rxq_ops;\n@@ -242,12 +242,12 @@ idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n \t\tIDPF_DEFAULT_RX_FREE_THRESH :\n \t\trx_conf->rx_free_thresh;\n-\tif (idpf_check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\tif (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0)\n \t\treturn -EINVAL;\n \n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx] != NULL) {\n-\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tidpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n@@ -300,12 +300,12 @@ idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tgoto err_sw_ring_alloc;\n \t\t}\n \n-\t\tidpf_reset_single_rx_queue(rxq);\n+\t\tidpf_qc_single_rx_queue_reset(rxq);\n \t\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n \t\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n \t\trxq->ops = &def_rxq_ops;\n \t} else {\n-\t\tidpf_reset_split_rx_descq(rxq);\n+\t\tidpf_qc_split_rx_descq_reset(rxq);\n \n \t\t/* Setup Rx buffer queues */\n \t\tret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,\n@@ -379,7 +379,7 @@ idpf_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n \tcq->tx_ring_phys_addr = mz->iova;\n \tcq->compl_ring = mz->addr;\n \tcq->mz = mz;\n-\tidpf_reset_split_tx_complq(cq);\n+\tidpf_qc_split_tx_complq_reset(cq);\n \n \ttxq->complq = cq;\n \n@@ -413,12 +413,12 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n \ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?\n \t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n-\tif (idpf_check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n+\tif (idpf_qc_tx_thresh_check(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n \t\treturn -EINVAL;\n \n \t/* Free memory if needed. */\n \tif (dev->data->tx_queues[queue_idx] != NULL) {\n-\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tidpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n@@ -470,10 +470,10 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \tif (!is_splitq) {\n \t\ttxq->tx_ring = mz->addr;\n-\t\tidpf_reset_single_tx_queue(txq);\n+\t\tidpf_qc_single_tx_queue_reset(txq);\n \t} else {\n \t\ttxq->desc_ring = mz->addr;\n-\t\tidpf_reset_split_tx_descq(txq);\n+\t\tidpf_qc_split_tx_descq_reset(txq);\n \n \t\t/* Setup tx completion queue if split model */\n \t\tret = idpf_tx_complq_setup(dev, txq, queue_idx,\n@@ -516,7 +516,7 @@ idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\treturn -EINVAL;\n \t}\n \n-\terr = idpf_register_ts_mbuf(rxq);\n+\terr = idpf_qc_ts_mbuf_register(rxq);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"fail to residter timestamp mbuf %u\",\n \t\t\t\t\trx_queue_id);\n@@ -525,7 +525,7 @@ idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \n \tif (rxq->bufq1 == NULL) {\n \t\t/* Single queue */\n-\t\terr = idpf_alloc_single_rxq_mbufs(rxq);\n+\t\terr = idpf_qc_single_rxq_mbufs_alloc(rxq);\n \t\tif (err != 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX queue mbuf\");\n \t\t\treturn err;\n@@ -537,12 +537,12 @@ idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\tIDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n \t} else {\n \t\t/* Split queue */\n-\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq1);\n+\t\terr = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);\n \t\tif (err != 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n \t\t\treturn err;\n \t\t}\n-\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq2);\n+\t\terr = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);\n \t\tif (err != 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n \t\t\treturn err;\n@@ -664,11 +664,11 @@ idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \trxq = dev->data->rx_queues[rx_queue_id];\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n \t\trxq->ops->release_mbufs(rxq);\n-\t\tidpf_reset_single_rx_queue(rxq);\n+\t\tidpf_qc_single_rx_queue_reset(rxq);\n \t} else {\n \t\trxq->bufq1->ops->release_mbufs(rxq->bufq1);\n \t\trxq->bufq2->ops->release_mbufs(rxq->bufq2);\n-\t\tidpf_reset_split_rx_queue(rxq);\n+\t\tidpf_qc_split_rx_queue_reset(rxq);\n \t}\n \tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n \n@@ -695,10 +695,10 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \ttxq = dev->data->tx_queues[tx_queue_id];\n \ttxq->ops->release_mbufs(txq);\n \tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n-\t\tidpf_reset_single_tx_queue(txq);\n+\t\tidpf_qc_single_tx_queue_reset(txq);\n \t} else {\n-\t\tidpf_reset_split_tx_descq(txq);\n-\t\tidpf_reset_split_tx_complq(txq->complq);\n+\t\tidpf_qc_split_tx_descq_reset(txq);\n+\t\tidpf_qc_split_tx_complq_reset(txq->complq);\n \t}\n \tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n \n@@ -708,13 +708,13 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n void\n idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_rx_queue_release(dev->data->rx_queues[qid]);\n+\tidpf_qc_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n void\n idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_tx_queue_release(dev->data->tx_queues[qid]);\n+\tidpf_qc_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n void\n@@ -776,7 +776,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)\n \t\tif (vport->rx_vec_allowed) {\n \t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\t\t\trxq = dev->data->rx_queues[i];\n-\t\t\t\t(void)idpf_singleq_rx_vec_setup(rxq);\n+\t\t\t\t(void)idpf_qc_singleq_rx_vec_setup(rxq);\n \t\t\t}\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->rx_use_avx512) {\n@@ -835,7 +835,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)\n \t\t\t\t\ttxq = dev->data->tx_queues[i];\n \t\t\t\t\tif (txq == NULL)\n \t\t\t\t\t\tcontinue;\n-\t\t\t\t\tidpf_singleq_tx_vec_setup_avx512(txq);\n+\t\t\t\t\tidpf_qc_singleq_tx_vec_avx512_setup(txq);\n \t\t\t\t}\n \t\t\t\tdev->tx_pkt_burst = idpf_singleq_xmit_pkts_avx512;\n \t\t\t\tdev->tx_pkt_prepare = idpf_prep_pkts;\n",
    "prefixes": [
        "v6",
        "17/19"
    ]
}