get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/126345/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 126345,
    "url": "https://patches.dpdk.org/api/patches/126345/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230421065048.106899-2-beilei.xing@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230421065048.106899-2-beilei.xing@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230421065048.106899-2-beilei.xing@intel.com",
    "date": "2023-04-21T06:50:39",
    "name": "[01/10] net/cpfl: refine structures",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d84e63e502f2c1da71b6cc257141f5390af80966",
    "submitter": {
        "id": 410,
        "url": "https://patches.dpdk.org/api/people/410/?format=api",
        "name": "Xing, Beilei",
        "email": "beilei.xing@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230421065048.106899-2-beilei.xing@intel.com/mbox/",
    "series": [
        {
            "id": 27810,
            "url": "https://patches.dpdk.org/api/series/27810/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=27810",
            "date": "2023-04-21T06:50:38",
            "name": "add hairpin queue support",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/27810/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/126345/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/126345/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6B8F2429A9;\n\tFri, 21 Apr 2023 09:13:59 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9BDCB41156;\n\tFri, 21 Apr 2023 09:13:55 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by mails.dpdk.org (Postfix) with ESMTP id 39F57410DD\n for <dev@dpdk.org>; Fri, 21 Apr 2023 09:13:52 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Apr 2023 00:13:52 -0700",
            "from dpdk-beileix-3.sh.intel.com ([10.67.110.253])\n by orsmga008.jf.intel.com with ESMTP; 21 Apr 2023 00:13:50 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1682061233; x=1713597233;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=I0+3lO/1KDJs2KT/65ipDy0lXWiaUqFH8Cu73UgH4fI=;\n b=FQbUGxf0ABApYXlrSri9l6UU7nsaA4MsIvLkGkL89lqmHh3/Z2rpoOGs\n qRCHwiEvKIABm9B/sZ+w6pki6lX/8sBw+/EqLiI9F4fqMoQMWYLp+x3Ai\n DaNcFscY62oW2JRpUksIaNVJbm68WrcF0/bzhvZHAwblW6cvw4fjMwcsm\n 7mfrDRFbIAs/FpeWKn/6McwG8uDlzN63Oi8uswXKXhghfn5RPqMqE1WQi\n EspE6K0NTiT59zuZZCJr2d2stn5BSIs4VwNETsRHKZZaJaySBmoH+e13K\n MHBsss3PRmpDmrKHIIXYfk0bMXJQuO149FPmjV9W5obcNkLZ9VulkS0sd Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10686\"; a=\"326260036\"",
            "E=Sophos;i=\"5.99,214,1677571200\"; d=\"scan'208\";a=\"326260036\"",
            "E=McAfee;i=\"6600,9927,10686\"; a=\"722669099\"",
            "E=Sophos;i=\"5.99,214,1677571200\"; d=\"scan'208\";a=\"722669099\""
        ],
        "X-ExtLoop1": "1",
        "From": "beilei.xing@intel.com",
        "To": "jingjing.wu@intel.com",
        "Cc": "dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing <beilei.xing@intel.com>",
        "Subject": "[PATCH 01/10] net/cpfl: refine structures",
        "Date": "Fri, 21 Apr 2023 06:50:39 +0000",
        "Message-Id": "<20230421065048.106899-2-beilei.xing@intel.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "In-Reply-To": "<20230421065048.106899-1-beilei.xing@intel.com>",
        "References": "<20230421065048.106899-1-beilei.xing@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nThis patch refines some structures to support hairpin queue,\ncpfl_rx_queue/cpfl_tx_queue/cpfl_vport.\n\nSigned-off-by: Mingxia Liu <mingxia.liu@intel.com>\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/net/cpfl/cpfl_ethdev.c          |  85 +++++++-----\n drivers/net/cpfl/cpfl_ethdev.h          |   6 +-\n drivers/net/cpfl/cpfl_rxtx.c            | 175 +++++++++++++++++-------\n drivers/net/cpfl/cpfl_rxtx.h            |   8 ++\n drivers/net/cpfl/cpfl_rxtx_vec_common.h |  17 +--\n 5 files changed, 196 insertions(+), 95 deletions(-)",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex 306b8ad769..4a507f05d5 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -124,7 +124,8 @@ static int\n cpfl_dev_link_update(struct rte_eth_dev *dev,\n \t\t     __rte_unused int wait_to_complete)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct rte_eth_link new_link;\n \tunsigned int i;\n \n@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,\n static int\n cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \n \tdev_info->max_rx_queues = base->caps.max_rx_q;\n@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n static int\n cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \n \t/* mtu setting is forbidden if port is start */\n \tif (dev->data->dev_started) {\n@@ -256,12 +259,12 @@ static uint64_t\n cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n {\n \tuint64_t mbuf_alloc_failed = 0;\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i = 0;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tmbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tmbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,\n \t\t\t\t\t\t     __ATOMIC_RELAXED);\n \t}\n \n@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n static int\n cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tint ret;\n \n@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n static void\n cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\t__atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t__atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);\n \t}\n }\n \n static int\n cpfl_dev_stats_reset(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tint ret;\n \n@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)\n static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,\n \t\t\t       struct rte_eth_xstat *xstats, unsigned int n)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tunsigned int i;\n \tint ret;\n@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,\n \t\t     struct rte_eth_rss_reta_entry64 *reta_conf,\n \t\t     uint16_t reta_size)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tuint16_t idx, shift;\n \tint ret = 0;\n@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,\n \t\t    struct rte_eth_rss_reta_entry64 *reta_conf,\n \t\t    uint16_t reta_size)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tuint16_t idx, shift;\n \tint ret = 0;\n@@ -536,7 +541,8 @@ static int\n cpfl_rss_hash_update(struct rte_eth_dev *dev,\n \t\t     struct rte_eth_rss_conf *rss_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tint ret = 0;\n \n@@ -601,7 +607,8 @@ static int\n cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,\n \t\t       struct rte_eth_rss_conf *rss_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tint ret = 0;\n \n@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,\n static int\n cpfl_dev_configure(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct rte_eth_conf *conf = &dev->data->dev_conf;\n \tstruct idpf_adapter *base = vport->adapter;\n \tint ret;\n@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)\n static int\n cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tuint16_t nb_rx_queues = dev->data->nb_rx_queues;\n \n \treturn idpf_vport_irq_map_config(vport, nb_rx_queues);\n@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)\n static int\n cpfl_start_queues(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint err = 0;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tif (txq == NULL || txq->tx_deferred_start)\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tif (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)\n \t\t\tcontinue;\n \t\terr = cpfl_tx_queue_start(dev, i);\n \t\tif (err != 0) {\n@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)\n \t}\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tif (rxq == NULL || rxq->rx_deferred_start)\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tif (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)\n \t\t\tcontinue;\n \t\terr = cpfl_rx_queue_start(dev, i);\n \t\tif (err != 0) {\n@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)\n static int\n cpfl_dev_start(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);\n \tuint16_t num_allocated_vectors = base->caps.num_allocated_vectors;\n@@ -815,7 +825,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)\n static int\n cpfl_dev_stop(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \n \tif (vport->stopped == 1)\n \t\treturn 0;\n@@ -836,7 +847,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)\n static int\n cpfl_dev_close(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);\n \n \tcpfl_dev_stop(dev);\n@@ -846,7 +858,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)\n \tadapter->cur_vport_nb--;\n \tdev->data->dev_private = NULL;\n \tadapter->vports[vport->sw_idx] = NULL;\n-\trte_free(vport);\n+\trte_free(cpfl_vport);\n \n \treturn 0;\n }\n@@ -1051,7 +1063,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)\n \tint i;\n \n \tfor (i = 0; i < adapter->cur_vport_nb; i++) {\n-\t\tvport = adapter->vports[i];\n+\t\tvport = &adapter->vports[i]->base;\n \t\tif (vport->vport_id != vport_id)\n \t\t\tcontinue;\n \t\telse\n@@ -1328,7 +1340,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)\n static int\n cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct cpfl_vport_param *param = init_params;\n \tstruct cpfl_adapter_ext *adapter = param->adapter;\n \t/* for sending create vport virtchnl msg prepare */\n@@ -1354,7 +1367,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n \t\tgoto err;\n \t}\n \n-\tadapter->vports[param->idx] = vport;\n+\tadapter->vports[param->idx] = cpfl_vport;\n \tadapter->cur_vports |= RTE_BIT32(param->devarg_id);\n \tadapter->cur_vport_nb++;\n \n@@ -1470,7 +1483,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t\tsnprintf(name, sizeof(name), \"cpfl_%s_vport_0\",\n \t\t\t pci_dev->device.name);\n \t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n-\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t    sizeof(struct cpfl_vport),\n \t\t\t\t\t    NULL, NULL, cpfl_dev_vport_init,\n \t\t\t\t\t    &vport_param);\n \t\tif (retval != 0)\n@@ -1488,7 +1501,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t\t\t\t pci_dev->device.name,\n \t\t\t\t devargs.req_vports[i]);\n \t\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n-\t\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t\t    sizeof(struct cpfl_vport),\n \t\t\t\t\t\t    NULL, NULL, cpfl_dev_vport_init,\n \t\t\t\t\t\t    &vport_param);\n \t\t\tif (retval != 0)\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h\nindex 200dfcac02..81fe9ac4c3 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.h\n+++ b/drivers/net/cpfl/cpfl_ethdev.h\n@@ -69,13 +69,17 @@ struct cpfl_devargs {\n \tuint16_t req_vport_nb;\n };\n \n+struct cpfl_vport {\n+\tstruct idpf_vport base;\n+};\n+\n struct cpfl_adapter_ext {\n \tTAILQ_ENTRY(cpfl_adapter_ext) next;\n \tstruct idpf_adapter base;\n \n \tchar name[CPFL_ADAPTER_NAME_LEN];\n \n-\tstruct idpf_vport **vports;\n+\tstruct cpfl_vport **vports;\n \tuint16_t max_vport_nb;\n \n \tuint16_t cur_vports; /* bit mask of created vport */\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nindex de59b31b3d..a441e2ffbe 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.c\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,\n \t\t\t uint16_t nb_desc, unsigned int socket_id,\n \t\t\t struct rte_mempool *mp, uint8_t bufq_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tstruct idpf_hw *hw = &base->hw;\n \tconst struct rte_memzone *mz;\n@@ -219,15 +220,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)\n \trte_free(bufq);\n }\n \n+static void\n+cpfl_rx_queue_release(void *rxq)\n+{\n+\tstruct cpfl_rx_queue *cpfl_rxq = rxq;\n+\tstruct idpf_rx_queue *q = NULL;\n+\n+\tif (cpfl_rxq == NULL)\n+\t\treturn;\n+\n+\tq = &cpfl_rxq->base;\n+\n+\t/* Split queue */\n+\tif (!q->adapter->is_rx_singleq) {\n+\t\tif (q->bufq2)\n+\t\t\tcpfl_rx_split_bufq_release(q->bufq2);\n+\n+\t\tif (q->bufq1)\n+\t\t\tcpfl_rx_split_bufq_release(q->bufq1);\n+\n+\t\trte_free(cpfl_rxq);\n+\t\treturn;\n+\t}\n+\n+\t/* Single queue */\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(cpfl_rxq);\n+}\n+\n+static void\n+cpfl_tx_queue_release(void *txq)\n+{\n+\tstruct cpfl_tx_queue *cpfl_txq = txq;\n+\tstruct idpf_tx_queue *q = NULL;\n+\n+\tif (cpfl_txq == NULL)\n+\t\treturn;\n+\n+\tq = &cpfl_txq->base;\n+\n+\tif (q->complq) {\n+\t\trte_memzone_free(q->complq->mz);\n+\t\trte_free(q->complq);\n+\t}\n+\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(cpfl_txq);\n+}\n+\n int\n cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t    uint16_t nb_desc, unsigned int socket_id,\n \t\t    const struct rte_eth_rxconf *rx_conf,\n \t\t    struct rte_mempool *mp)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tstruct idpf_hw *hw = &base->hw;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_rx_queue *rxq;\n \tuint16_t rx_free_thresh;\n@@ -247,21 +302,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx] != NULL) {\n-\t\tidpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tcpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n \t/* Setup Rx queue */\n-\trxq = rte_zmalloc_socket(\"cpfl rxq\",\n-\t\t\t\t sizeof(struct idpf_rx_queue),\n+\tcpfl_rxq = rte_zmalloc_socket(\"cpfl rxq\",\n+\t\t\t\t sizeof(struct cpfl_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n-\tif (rxq == NULL) {\n+\tif (cpfl_rxq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n \t\tret = -ENOMEM;\n \t\tgoto err_rxq_alloc;\n \t}\n \n+\trxq = &cpfl_rxq->base;\n+\n \tis_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n \n \trxq->mp = mp;\n@@ -328,7 +385,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t}\n \n \trxq->q_set = true;\n-\tdev->data->rx_queues[queue_idx] = rxq;\n+\tdev->data->rx_queues[queue_idx] = cpfl_rxq;\n \n \treturn 0;\n \n@@ -348,7 +405,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n \t\t     uint16_t queue_idx, uint16_t nb_desc,\n \t\t     unsigned int socket_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_tx_queue *cq;\n \tint ret;\n@@ -396,9 +454,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t    uint16_t nb_desc, unsigned int socket_id,\n \t\t    const struct rte_eth_txconf *tx_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n \tstruct idpf_adapter *base = vport->adapter;\n \tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tstruct idpf_hw *hw = &base->hw;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_tx_queue *txq;\n@@ -418,21 +478,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \t/* Free memory if needed. */\n \tif (dev->data->tx_queues[queue_idx] != NULL) {\n-\t\tidpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tcpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n \t/* Allocate the TX queue data structure. */\n-\ttxq = rte_zmalloc_socket(\"cpfl txq\",\n-\t\t\t\t sizeof(struct idpf_tx_queue),\n+\tcpfl_txq = rte_zmalloc_socket(\"cpfl txq\",\n+\t\t\t\t sizeof(struct cpfl_tx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n-\tif (txq == NULL) {\n+\tif (cpfl_txq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n \t\tret = -ENOMEM;\n \t\tgoto err_txq_alloc;\n \t}\n \n+\ttxq = &cpfl_txq->base;\n+\n \tis_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n \n \ttxq->nb_tx_desc = nb_desc;\n@@ -486,7 +548,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n \ttxq->ops = &def_txq_ops;\n \ttxq->q_set = true;\n-\tdev->data->tx_queues[queue_idx] = txq;\n+\tdev->data->tx_queues[queue_idx] = cpfl_txq;\n \n \treturn 0;\n \n@@ -502,6 +564,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n int\n cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tstruct idpf_rx_queue *rxq;\n \tuint16_t max_pkt_len;\n \tuint32_t frame_size;\n@@ -510,7 +573,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tif (rx_queue_id >= dev->data->nb_rx_queues)\n \t\treturn -EINVAL;\n \n-\trxq = dev->data->rx_queues[rx_queue_id];\n+\tcpfl_rxq = dev->data->rx_queues[rx_queue_id];\n+\trxq = &cpfl_rxq->base;\n \n \tif (rxq == NULL || !rxq->q_set) {\n \t\tPMD_DRV_LOG(ERR, \"RX queue %u not available or setup\",\n@@ -574,9 +638,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_rx_queue *rxq =\n-\t\tdev->data->rx_queues[rx_queue_id];\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n+\tstruct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];\n+\tstruct idpf_rx_queue *rxq = &cpfl_rxq->base;\n \tint err = 0;\n \n \terr = idpf_vc_rxq_config(vport, rxq);\n@@ -609,15 +674,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \n \tif (tx_queue_id >= dev->data->nb_tx_queues)\n \t\treturn -EINVAL;\n \n-\ttxq = dev->data->tx_queues[tx_queue_id];\n+\tcpfl_txq = dev->data->tx_queues[tx_queue_id];\n \n \t/* Init the RX tail register. */\n-\tIDPF_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\tIDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);\n \n \treturn 0;\n }\n@@ -625,12 +690,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n int\n cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_tx_queue *txq =\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n+\tstruct cpfl_tx_queue *cpfl_txq =\n \t\tdev->data->tx_queues[tx_queue_id];\n \tint err = 0;\n \n-\terr = idpf_vc_txq_config(vport, txq);\n+\terr = idpf_vc_txq_config(vport, &cpfl_txq->base);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Fail to configure Tx queue %u\", tx_queue_id);\n \t\treturn err;\n@@ -649,7 +715,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n \t\t\t    tx_queue_id);\n \t} else {\n-\t\ttxq->q_started = true;\n+\t\tcpfl_txq->base.q_started = true;\n \t\tdev->data->tx_queue_state[tx_queue_id] =\n \t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n \t}\n@@ -660,13 +726,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n int\n cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tstruct idpf_rx_queue *rxq;\n \tint err;\n \n \tif (rx_queue_id >= dev->data->nb_rx_queues)\n \t\treturn -EINVAL;\n \n+\tcpfl_rxq = dev->data->rx_queues[rx_queue_id];\n \terr = idpf_vc_queue_switch(vport, rx_queue_id, true, false);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n@@ -674,7 +743,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\treturn err;\n \t}\n \n-\trxq = dev->data->rx_queues[rx_queue_id];\n+\trxq = &cpfl_rxq->base;\n \trxq->q_started = false;\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n \t\trxq->ops->release_mbufs(rxq);\n@@ -692,13 +761,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tstruct idpf_tx_queue *txq;\n \tint err;\n \n \tif (tx_queue_id >= dev->data->nb_tx_queues)\n \t\treturn -EINVAL;\n \n+\tcpfl_txq = dev->data->tx_queues[tx_queue_id];\n+\n \terr = idpf_vc_queue_switch(vport, tx_queue_id, false, false);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u off\",\n@@ -706,7 +779,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \t\treturn err;\n \t}\n \n-\ttxq = dev->data->tx_queues[tx_queue_id];\n+\ttxq = &cpfl_txq->base;\n \ttxq->q_started = false;\n \ttxq->ops->release_mbufs(txq);\n \tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n@@ -723,25 +796,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n void\n cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_qc_rx_queue_release(dev->data->rx_queues[qid]);\n+\tcpfl_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n void\n cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_qc_tx_queue_release(dev->data->tx_queues[qid]);\n+\tcpfl_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n void\n cpfl_stop_queues(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tif (rxq == NULL)\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tif (cpfl_rxq == NULL)\n \t\t\tcontinue;\n \n \t\tif (cpfl_rx_queue_stop(dev, i) != 0)\n@@ -749,8 +822,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)\n \t}\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tif (txq == NULL)\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tif (cpfl_txq == NULL)\n \t\t\tcontinue;\n \n \t\tif (cpfl_tx_queue_stop(dev, i) != 0)\n@@ -761,9 +834,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)\n void\n cpfl_set_rx_function(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n #ifdef RTE_ARCH_X86\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i;\n \n \tif (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&\n@@ -789,8 +863,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n \t\tif (vport->rx_vec_allowed) {\n \t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\t\t\trxq = dev->data->rx_queues[i];\n-\t\t\t\t(void)idpf_qc_splitq_rx_vec_setup(rxq);\n+\t\t\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t\t\t(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);\n \t\t\t}\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->rx_use_avx512) {\n@@ -809,8 +883,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n \t} else {\n \t\tif (vport->rx_vec_allowed) {\n \t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\t\t\trxq = dev->data->rx_queues[i];\n-\t\t\t\t(void)idpf_qc_singleq_rx_vec_setup(rxq);\n+\t\t\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t\t\t(void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);\n \t\t\t}\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->rx_use_avx512) {\n@@ -859,10 +933,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n void\n cpfl_set_tx_function(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n #ifdef RTE_ARCH_X86\n #ifdef CC_AVX512_SUPPORT\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint i;\n #endif /* CC_AVX512_SUPPORT */\n \n@@ -877,8 +952,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)\n \t\t\t\tvport->tx_use_avx512 = true;\n \t\t\tif (vport->tx_use_avx512) {\n \t\t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\t\t\t\ttxq = dev->data->tx_queues[i];\n-\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(txq);\n+\t\t\t\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);\n \t\t\t\t}\n \t\t\t}\n \t\t}\n@@ -915,10 +990,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->tx_use_avx512) {\n \t\t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\t\t\t\ttxq = dev->data->tx_queues[i];\n-\t\t\t\t\tif (txq == NULL)\n+\t\t\t\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\t\t\t\tif (cpfl_txq == NULL)\n \t\t\t\t\t\tcontinue;\n-\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(txq);\n+\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);\n \t\t\t\t}\n \t\t\t\tPMD_DRV_LOG(NOTICE,\n \t\t\t\t\t    \"Using Single AVX512 Vector Tx (port %d).\",\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h\nindex fb267d38c8..bfb9ad97bd 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.h\n+++ b/drivers/net/cpfl/cpfl_rxtx.h\n@@ -23,6 +23,14 @@\n \n #define CPFL_SUPPORT_CHAIN_NUM 5\n \n+struct cpfl_rx_queue {\n+\tstruct idpf_rx_queue base;\n+};\n+\n+struct cpfl_tx_queue {\n+\tstruct idpf_tx_queue base;\n+};\n+\n int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\ndiff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h\nindex 665418d27d..5690b17911 100644\n--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h\n+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h\n@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)\n static inline int\n cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_vport *cpfl_vport = dev->data->dev_private;\n+\tstruct idpf_vport *vport = &cpfl_vport->base;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tdefault_ret = cpfl_rx_vec_queue_default(rxq);\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tdefault_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);\n \t\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n-\t\t\tsplitq_ret = cpfl_rx_splitq_vec_default(rxq);\n+\t\t\tsplitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);\n \t\t\tret = splitq_ret && default_ret;\n \t\t} else {\n \t\t\tret = default_ret;\n@@ -100,12 +101,12 @@ static inline int\n cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)\n {\n \tint i;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint ret = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tret = cpfl_tx_vec_queue_default(txq);\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tret = cpfl_tx_vec_queue_default(&cpfl_txq->base);\n \t\tif (ret == CPFL_SCALAR_PATH)\n \t\t\treturn CPFL_SCALAR_PATH;\n \t}\n",
    "prefixes": [
        "01/10"
    ]
}