get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/123882/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 123882,
    "url": "https://patches.dpdk.org/api/patches/123882/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230214113852.3341607-2-mingxia.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230214113852.3341607-2-mingxia.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230214113852.3341607-2-mingxia.liu@intel.com",
    "date": "2023-02-14T11:38:48",
    "name": "[v2,1/5] net/cpfl: add some structure for hairpin queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e9fde8f2d16d2810e655b4f6d6164805f6e42aaa",
    "submitter": {
        "id": 2514,
        "url": "https://patches.dpdk.org/api/people/2514/?format=api",
        "name": "Liu, Mingxia",
        "email": "mingxia.liu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230214113852.3341607-2-mingxia.liu@intel.com/mbox/",
    "series": [
        {
            "id": 27002,
            "url": "https://patches.dpdk.org/api/series/27002/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=27002",
            "date": "2023-02-14T11:38:47",
            "name": "add port to port feature",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/27002/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/123882/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/123882/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 2640841C49;\n\tTue, 14 Feb 2023 13:36:35 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B25BA42D67;\n\tTue, 14 Feb 2023 13:36:31 +0100 (CET)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id 32E9B42D47\n for <dev@dpdk.org>; Tue, 14 Feb 2023 13:36:29 +0100 (CET)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 14 Feb 2023 04:36:28 -0800",
            "from dpdk-mingxial-01.sh.intel.com ([10.67.119.167])\n by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:26 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1676378190; x=1707914190;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=UiTdEbiQhJmnijRSWL7U3Itx+xxqE5ipmenfVS4z6lc=;\n b=n+NU3RL+Dr229zrn4F7KPq+I2NxhfdzMA0ZYNBXbvX2XkiZinSGHTVQ3\n mWBckkNl0ZZHz5qlgRmdr4hu2BK7K5KkPHb/D3PTC8LhPB36DSm9VtLTc\n Z8sWSRllZqxwAeoztXt6nspnEox/YCAuXpCPl9VH7FgX0HbtICacpC5R6\n sOcWSjFh3T0blY/GXiLkViZmti+1/QS+KqwC2FHFeRiWwWdx9ZkoCELBp\n hId03Fp/lhR/Jq/yQbzNwE4VVMzWoBb9zU/JVibhcJ9RqDotfuRYbCRJ6\n IefGOz8vanrSrO7/P6/fm68mIS+uqq16PS5sJ7prgapWoJdzHKjs4SD/w A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10620\"; a=\"314793173\"",
            "E=Sophos;i=\"5.97,296,1669104000\"; d=\"scan'208\";a=\"314793173\"",
            "E=McAfee;i=\"6500,9779,10620\"; a=\"699528515\"",
            "E=Sophos;i=\"5.97,296,1669104000\"; d=\"scan'208\";a=\"699528515\""
        ],
        "X-ExtLoop1": "1",
        "From": "Mingxia Liu <mingxia.liu@intel.com>",
        "To": "dev@dpdk.org,\n\tbeilei.xing@intel.com,\n\tyuying.zhang@intel.com",
        "Cc": "Mingxia Liu <mingxia.liu@intel.com>",
        "Subject": "[PATCH v2 1/5] net/cpfl: add some structure for hairpin queue",
        "Date": "Tue, 14 Feb 2023 11:38:48 +0000",
        "Message-Id": "<20230214113852.3341607-2-mingxia.liu@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230214113852.3341607-1-mingxia.liu@intel.com>",
        "References": "<20230118130659.976873-1-mingxia.liu@intel.com>\n <20230214113852.3341607-1-mingxia.liu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch add some structure for hairpin queue,\ncpfl_rx_queue/cpfl_tx_queue/cpfl_vport.\n\nSigned-off-by: Mingxia Liu <mingxia.liu@intel.com>\n---\n drivers/net/cpfl/cpfl_ethdev.c          | 102 +++++++-----\n drivers/net/cpfl/cpfl_ethdev.h          |   8 +-\n drivers/net/cpfl/cpfl_rxtx.c            | 196 +++++++++++++++++-------\n drivers/net/cpfl/cpfl_rxtx.h            |  28 ++++\n drivers/net/cpfl/cpfl_rxtx_vec_common.h |  18 ++-\n 5 files changed, 255 insertions(+), 97 deletions(-)",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex 543dbd60f0..f799707ea7 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -108,7 +108,9 @@ static int\n cpfl_dev_link_update(struct rte_eth_dev *dev,\n \t\t     __rte_unused int wait_to_complete)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct rte_eth_link new_link;\n \n \tmemset(&new_link, 0, sizeof(new_link));\n@@ -160,7 +162,9 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,\n static int\n cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \n \tdev_info->max_rx_queues = adapter->caps.max_rx_q;\n@@ -220,7 +224,9 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n static int\n cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \n \t/* mtu setting is forbidden if port is start */\n \tif (dev->data->dev_started) {\n@@ -260,12 +266,12 @@ static uint64_t\n cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n {\n \tuint64_t mbuf_alloc_failed = 0;\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i = 0;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tmbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tmbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,\n \t\t\t\t\t\t     __ATOMIC_RELAXED);\n \t}\n \n@@ -275,8 +281,9 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n static int\n cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tint ret;\n \n@@ -308,20 +315,21 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n static void\n cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\t__atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t__atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);\n \t}\n }\n \n static int\n cpfl_dev_stats_reset(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tint ret;\n \n@@ -346,8 +354,9 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)\n static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,\n \t\t\t       struct rte_eth_xstat *xstats, unsigned int n)\n {\n-\tstruct idpf_vport *vport =\n-\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct virtchnl2_vport_stats *pstats = NULL;\n \tunsigned int i;\n \tint ret;\n@@ -461,7 +470,9 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,\n \t\t     struct rte_eth_rss_reta_entry64 *reta_conf,\n \t\t     uint16_t reta_size)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tuint16_t idx, shift;\n \tint ret = 0;\n@@ -500,7 +511,9 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,\n \t\t    struct rte_eth_rss_reta_entry64 *reta_conf,\n \t\t    uint16_t reta_size)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tuint16_t idx, shift;\n \tint ret = 0;\n@@ -538,7 +551,9 @@ static int\n cpfl_rss_hash_update(struct rte_eth_dev *dev,\n \t\t     struct rte_eth_rss_conf *rss_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tint ret = 0;\n \n@@ -603,7 +618,9 @@ static int\n cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,\n \t\t       struct rte_eth_rss_conf *rss_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tint ret = 0;\n \n@@ -640,7 +657,9 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,\n static int\n cpfl_dev_configure(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct rte_eth_conf *conf = &dev->data->dev_conf;\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tint ret;\n@@ -703,7 +722,9 @@ cpfl_dev_configure(struct rte_eth_dev *dev)\n static int\n cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tuint16_t nb_rx_queues = dev->data->nb_rx_queues;\n \n \treturn idpf_vport_irq_map_config(vport, nb_rx_queues);\n@@ -712,15 +733,16 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)\n static int\n cpfl_start_queues(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint err = 0;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tif (txq == NULL || txq->tx_deferred_start)\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tif (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)\n \t\t\tcontinue;\n+\n \t\terr = cpfl_tx_queue_start(dev, i);\n \t\tif (err != 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"Fail to start Tx queue %u\", i);\n@@ -729,8 +751,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)\n \t}\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tif (rxq == NULL || rxq->rx_deferred_start)\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tif (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)\n \t\t\tcontinue;\n \t\terr = cpfl_rx_queue_start(dev, i);\n \t\tif (err != 0) {\n@@ -745,7 +767,9 @@ cpfl_start_queues(struct rte_eth_dev *dev)\n static int\n cpfl_dev_start(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *base = vport->adapter;\n \tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);\n \tuint16_t num_allocated_vectors = base->caps.num_allocated_vectors;\n@@ -808,7 +832,9 @@ cpfl_dev_start(struct rte_eth_dev *dev)\n static int\n cpfl_dev_stop(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \n \tif (vport->stopped == 1)\n \t\treturn 0;\n@@ -829,7 +855,9 @@ cpfl_dev_stop(struct rte_eth_dev *dev)\n static int\n cpfl_dev_close(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);\n \n \tcpfl_dev_stop(dev);\n@@ -839,7 +867,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)\n \tadapter->cur_vport_nb--;\n \tdev->data->dev_private = NULL;\n \tadapter->vports[vport->sw_idx] = NULL;\n-\trte_free(vport);\n+\trte_free(cpfl_vport);\n \n \treturn 0;\n }\n@@ -1012,7 +1040,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)\n \tint i;\n \n \tfor (i = 0; i < adapter->cur_vport_nb; i++) {\n-\t\tvport = adapter->vports[i];\n+\t\tvport = &(adapter->vports[i]->base);\n \t\tif (vport->vport_id != vport_id)\n \t\t\tcontinue;\n \t\telse\n@@ -1225,7 +1253,9 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *ad)\n static int\n cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct cpfl_vport_param *param = init_params;\n \tstruct cpfl_adapter_ext *adapter = param->adapter;\n \t/* for sending create vport virtchnl msg prepare */\n@@ -1251,7 +1281,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)\n \t\tgoto err;\n \t}\n \n-\tadapter->vports[param->idx] = vport;\n+\tadapter->vports[param->idx] = cpfl_vport;\n \tadapter->cur_vports |= RTE_BIT32(param->devarg_id);\n \tadapter->cur_vport_nb++;\n \n@@ -1369,7 +1399,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t\tsnprintf(name, sizeof(name), \"cpfl_%s_vport_0\",\n \t\t\t pci_dev->device.name);\n \t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n-\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t    sizeof(struct cpfl_vport),\n \t\t\t\t\t    NULL, NULL, cpfl_dev_vport_init,\n \t\t\t\t\t    &vport_param);\n \t\tif (retval != 0)\n@@ -1387,7 +1417,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t\t\t\t pci_dev->device.name,\n \t\t\t\t devargs.req_vports[i]);\n \t\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n-\t\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t\t    sizeof(struct cpfl_vport),\n \t\t\t\t\t\t    NULL, NULL, cpfl_dev_vport_init,\n \t\t\t\t\t\t    &vport_param);\n \t\t\tif (retval != 0)\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h\nindex e00dff4bf0..ef3225878b 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.h\n+++ b/drivers/net/cpfl/cpfl_ethdev.h\n@@ -70,13 +70,19 @@ struct cpfl_devargs {\n \tuint16_t req_vport_nb;\n };\n \n+struct cpfl_vport {\n+\t/* p2p mbuf pool */\n+\tstruct rte_mempool *p2p_mp;\n+\tstruct idpf_vport base;\n+};\n+\n struct cpfl_adapter_ext {\n \tTAILQ_ENTRY(cpfl_adapter_ext) next;\n \tstruct idpf_adapter base;\n \n \tchar name[CPFL_ADAPTER_NAME_LEN];\n \n-\tstruct idpf_vport **vports;\n+\tstruct cpfl_vport **vports;\n \tuint16_t max_vport_nb;\n \n \tuint16_t cur_vports; /* bit mask of created vport */\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nindex 6226b02301..c7e5487366 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.c\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -10,6 +10,11 @@\n #include \"cpfl_rxtx.h\"\n #include \"cpfl_rxtx_vec_common.h\"\n \n+static void\n+cpfl_tx_queue_release(void *txq);\n+static void\n+cpfl_rx_queue_release(void *txq);\n+\n static uint64_t\n cpfl_rx_offload_convert(uint64_t offload)\n {\n@@ -128,7 +133,9 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,\n \t\t\t uint16_t nb_desc, unsigned int socket_id,\n \t\t\t struct rte_mempool *mp, uint8_t bufq_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tstruct idpf_hw *hw = &adapter->hw;\n \tconst struct rte_memzone *mz;\n@@ -225,9 +232,12 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t    const struct rte_eth_rxconf *rx_conf,\n \t\t    struct rte_mempool *mp)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_rx_queue *rxq;\n \tuint16_t rx_free_thresh;\n@@ -247,21 +257,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx] != NULL) {\n-\t\tidpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tcpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n \t/* Setup Rx queue */\n-\trxq = rte_zmalloc_socket(\"cpfl rxq\",\n-\t\t\t\t sizeof(struct idpf_rx_queue),\n+\tcpfl_rxq = rte_zmalloc_socket(\"cpfl rxq\",\n+\t\t\t\t sizeof(struct cpfl_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n-\tif (rxq == NULL) {\n+\tif (cpfl_rxq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n \t\tret = -ENOMEM;\n \t\tgoto err_rxq_alloc;\n \t}\n \n+\trxq = &(cpfl_rxq->base);\n+\n \tis_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n \n \trxq->mp = mp;\n@@ -328,7 +340,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t}\n \n \trxq->q_set = true;\n-\tdev->data->rx_queues[queue_idx] = rxq;\n+\tdev->data->rx_queues[queue_idx] = cpfl_rxq;\n \n \treturn 0;\n \n@@ -348,7 +360,9 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n \t\t     uint16_t queue_idx, uint16_t nb_desc,\n \t\t     unsigned int socket_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tconst struct rte_memzone *mz;\n \tstruct idpf_tx_queue *cq;\n \tint ret;\n@@ -396,15 +410,18 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t    uint16_t nb_desc, unsigned int socket_id,\n \t\t    const struct rte_eth_txconf *tx_conf)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tuint16_t tx_rs_thresh, tx_free_thresh;\n \tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_tx_queue *txq;\n \tuint64_t offloads;\n-\tuint16_t len;\n \tbool is_splitq;\n+\tuint16_t len;\n \tint ret;\n \n \toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n@@ -418,21 +435,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \t/* Free memory if needed. */\n \tif (dev->data->tx_queues[queue_idx] != NULL) {\n-\t\tidpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tcpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n \t/* Allocate the TX queue data structure. */\n-\ttxq = rte_zmalloc_socket(\"cpfl txq\",\n-\t\t\t\t sizeof(struct idpf_tx_queue),\n+\tcpfl_txq = rte_zmalloc_socket(\"cpfl txq\",\n+\t\t\t\t sizeof(struct cpfl_tx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n-\tif (txq == NULL) {\n+\tif (cpfl_txq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n \t\tret = -ENOMEM;\n \t\tgoto err_txq_alloc;\n \t}\n \n+\ttxq = &(cpfl_txq->base);\n+\n \tis_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n \n \ttxq->nb_tx_desc = nb_desc;\n@@ -486,7 +505,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n \ttxq->ops = &def_txq_ops;\n \ttxq->q_set = true;\n-\tdev->data->tx_queues[queue_idx] = txq;\n+\tdev->data->tx_queues[queue_idx] = cpfl_txq;\n \n \treturn 0;\n \n@@ -502,6 +521,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n int\n cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tstruct idpf_rx_queue *rxq;\n \tuint16_t max_pkt_len;\n \tuint32_t frame_size;\n@@ -510,7 +530,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tif (rx_queue_id >= dev->data->nb_rx_queues)\n \t\treturn -EINVAL;\n \n-\trxq = dev->data->rx_queues[rx_queue_id];\n+\tcpfl_rxq = dev->data->rx_queues[rx_queue_id];\n+\trxq = &(cpfl_rxq->base);\n \n \tif (rxq == NULL || !rxq->q_set) {\n \t\tPMD_DRV_LOG(ERR, \"RX queue %u not available or setup\",\n@@ -574,9 +595,11 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_rx_queue *rxq =\n-\t\tdev->data->rx_queues[rx_queue_id];\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];\n+\tstruct idpf_rx_queue *rxq = &(cpfl_rxq->base);\n \tint err = 0;\n \n \terr = idpf_vc_rxq_config(vport, rxq);\n@@ -609,15 +632,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \n \tif (tx_queue_id >= dev->data->nb_tx_queues)\n \t\treturn -EINVAL;\n \n-\ttxq = dev->data->tx_queues[tx_queue_id];\n+\tcpfl_txq = dev->data->tx_queues[tx_queue_id];\n \n \t/* Init the RX tail register. */\n-\tIDPF_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\tIDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);\n \n \treturn 0;\n }\n@@ -625,12 +648,14 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n int\n cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_tx_queue *txq =\n-\t\tdev->data->tx_queues[tx_queue_id];\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct cpfl_tx_queue *cpfl_txq =\n+\t    dev->data->tx_queues[tx_queue_id];\n \tint err = 0;\n \n-\terr = idpf_vc_txq_config(vport, txq);\n+\terr = idpf_vc_txq_config(vport, &(cpfl_txq->base));\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Fail to configure Tx queue %u\", tx_queue_id);\n \t\treturn err;\n@@ -649,7 +674,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n \t\t\t    tx_queue_id);\n \t} else {\n-\t\ttxq->q_started = true;\n+\t\tcpfl_txq->base.q_started = true;\n \t\tdev->data->tx_queue_state[tx_queue_id] =\n \t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n \t}\n@@ -660,13 +685,17 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n int\n cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tstruct idpf_rx_queue *rxq;\n \tint err;\n \n \tif (rx_queue_id >= dev->data->nb_rx_queues)\n \t\treturn -EINVAL;\n \n+\tcpfl_rxq = dev->data->rx_queues[rx_queue_id];\n \terr = idpf_vc_queue_switch(vport, rx_queue_id, true, false);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n@@ -674,7 +703,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\treturn err;\n \t}\n \n-\trxq = dev->data->rx_queues[rx_queue_id];\n+\trxq = &(cpfl_rxq->base);\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n \t\trxq->ops->release_mbufs(rxq);\n \t\tidpf_qc_single_rx_queue_reset(rxq);\n@@ -691,13 +720,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tstruct idpf_tx_queue *txq;\n \tint err;\n \n \tif (tx_queue_id >= dev->data->nb_tx_queues)\n \t\treturn -EINVAL;\n \n+\tcpfl_txq = dev->data->tx_queues[tx_queue_id];\n \terr = idpf_vc_queue_switch(vport, tx_queue_id, false, false);\n \tif (err != 0) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u off\",\n@@ -705,7 +738,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \t\treturn err;\n \t}\n \n-\ttxq = dev->data->tx_queues[tx_queue_id];\n+\ttxq = &(cpfl_txq->base);\n \ttxq->ops->release_mbufs(txq);\n \tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n \t\tidpf_qc_single_tx_queue_reset(txq);\n@@ -718,28 +751,83 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \treturn 0;\n }\n \n+static void\n+cpfl_rx_queue_release(void *rxq)\n+{\n+\tstruct cpfl_rx_queue *cpfl_rxq = rxq;\n+\tstruct idpf_rx_queue *q = NULL;\n+\n+\tif (cpfl_rxq == NULL)\n+\t\treturn;\n+\n+\tq = &(cpfl_rxq->base);\n+\n+\t/* Split queue */\n+\tif (q->bufq1 != NULL && q->bufq2 != NULL) {\n+\t\tq->bufq1->ops->release_mbufs(q->bufq1);\n+\t\trte_free(q->bufq1->sw_ring);\n+\t\trte_memzone_free(q->bufq1->mz);\n+\t\trte_free(q->bufq1);\n+\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n+\t\trte_free(q->bufq2->sw_ring);\n+\t\trte_memzone_free(q->bufq2->mz);\n+\t\trte_free(q->bufq2);\n+\t\trte_memzone_free(q->mz);\n+\t\trte_free(cpfl_rxq);\n+\t\treturn;\n+\t}\n+\n+\t/* Single queue */\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(cpfl_rxq);\n+}\n+\n void\n cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_qc_rx_queue_release(dev->data->rx_queues[qid]);\n+\tcpfl_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+static void\n+cpfl_tx_queue_release(void *txq)\n+{\n+\tstruct cpfl_tx_queue *cpfl_txq = txq;\n+\tstruct idpf_tx_queue *q = NULL;\n+\n+\tif (cpfl_txq == NULL)\n+\t\treturn;\n+\n+\tq = &(cpfl_txq->base);\n+\n+\tif (q->complq) {\n+\t\trte_memzone_free(q->complq->mz);\n+\t\trte_free(q->complq);\n+\t}\n+\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(cpfl_txq);\n }\n \n void\n cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tidpf_qc_tx_queue_release(dev->data->tx_queues[qid]);\n+\tcpfl_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n void\n cpfl_stop_queues(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_rx_queue *rxq;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tif (rxq == NULL)\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tif (cpfl_rxq == NULL)\n \t\t\tcontinue;\n \n \t\tif (cpfl_rx_queue_stop(dev, i) != 0)\n@@ -747,8 +835,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)\n \t}\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tif (txq == NULL)\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tif (cpfl_txq == NULL)\n \t\t\tcontinue;\n \n \t\tif (cpfl_tx_queue_stop(dev, i) != 0)\n@@ -760,9 +848,11 @@ cpfl_stop_queues(struct rte_eth_dev *dev)\n void\n cpfl_set_rx_function(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n #ifdef RTE_ARCH_X86\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i;\n \n \tif (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&\n@@ -788,8 +878,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n \t\tif (vport->rx_vec_allowed) {\n \t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\t\t\trxq = dev->data->rx_queues[i];\n-\t\t\t\t(void)idpf_qc_splitq_rx_vec_setup(rxq);\n+\t\t\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t\t\t(void)idpf_qc_splitq_rx_vec_setup(&(cpfl_rxq->base));\n \t\t\t}\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->rx_use_avx512) {\n@@ -808,8 +898,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n \t} else {\n \t\tif (vport->rx_vec_allowed) {\n \t\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\t\t\trxq = dev->data->rx_queues[i];\n-\t\t\t\t(void)idpf_qc_singleq_rx_vec_setup(rxq);\n+\t\t\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\t\t\t(void)idpf_qc_singleq_rx_vec_setup(&(cpfl_rxq->base));\n \t\t\t}\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->rx_use_avx512) {\n@@ -858,10 +948,12 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)\n void\n cpfl_set_tx_function(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n #ifdef RTE_ARCH_X86\n #ifdef CC_AVX512_SUPPORT\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint i;\n #endif /* CC_AVX512_SUPPORT */\n \n@@ -876,8 +968,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)\n \t\t\t\tvport->tx_use_avx512 = true;\n \t\t\tif (vport->tx_use_avx512) {\n \t\t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\t\t\t\ttxq = dev->data->tx_queues[i];\n-\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(txq);\n+\t\t\t\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base));\n \t\t\t\t}\n \t\t\t}\n \t\t}\n@@ -914,10 +1006,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)\n #ifdef CC_AVX512_SUPPORT\n \t\t\tif (vport->tx_use_avx512) {\n \t\t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\t\t\t\ttxq = dev->data->tx_queues[i];\n-\t\t\t\t\tif (txq == NULL)\n+\t\t\t\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\t\t\t\tif (cpfl_txq == NULL)\n \t\t\t\t\t\tcontinue;\n-\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(txq);\n+\t\t\t\t\tidpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base));\n \t\t\t\t}\n \t\t\t\tPMD_DRV_LOG(NOTICE,\n \t\t\t\t\t    \"Using Single AVX512 Vector Tx (port %d).\",\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h\nindex fb267d38c8..e241afece9 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.h\n+++ b/drivers/net/cpfl/cpfl_rxtx.h\n@@ -23,6 +23,34 @@\n \n #define CPFL_SUPPORT_CHAIN_NUM 5\n \n+struct cpfl_rxq_hairpin_info {\n+\tbool hairpin_q;\t\t/* if rx queue is a hairpin queue */\n+\t/* only valid if the hairpin queue pair crosses vport */\n+\tbool hairpin_cv;\n+\tuint16_t peer_txp;\n+};\n+\n+struct cpfl_rx_queue {\n+\tstruct idpf_rx_queue base;\n+\tstruct cpfl_rxq_hairpin_info hairpin_info;\n+};\n+\n+struct cpfl_txq_hairpin_info {\n+\t/* only valid for hairpin queue */\n+\tbool hairpin_q;\n+\t/* only valid if the hairpin queue pair crosses vport */\n+\tbool hairpin_cv;\n+\tuint16_t peer_rxq_id;\n+\tuint16_t peer_rxp;\n+\tbool bound;\n+\tuint16_t complq_peer_rxq_id;\n+};\n+\n+struct cpfl_tx_queue {\n+\tstruct idpf_tx_queue base;\n+\tstruct cpfl_txq_hairpin_info hairpin_info;\n+};\n+\n int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\ndiff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h\nindex 665418d27d..8d0b825f95 100644\n--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h\n+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h\n@@ -76,15 +76,17 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)\n static inline int\n cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)\n {\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_rx_queue *rxq;\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n \tint i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\trxq = dev->data->rx_queues[i];\n-\t\tdefault_ret = cpfl_rx_vec_queue_default(rxq);\n+\t\tcpfl_rxq = dev->data->rx_queues[i];\n+\t\tdefault_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);\n \t\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n-\t\t\tsplitq_ret = cpfl_rx_splitq_vec_default(rxq);\n+\t\t\tsplitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);\n \t\t\tret = splitq_ret && default_ret;\n \t\t} else {\n \t\t\tret = default_ret;\n@@ -100,12 +102,12 @@ static inline int\n cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)\n {\n \tint i;\n-\tstruct idpf_tx_queue *txq;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n \tint ret = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxq = dev->data->tx_queues[i];\n-\t\tret = cpfl_tx_vec_queue_default(txq);\n+\t\tcpfl_txq = dev->data->tx_queues[i];\n+\t\tret = cpfl_tx_vec_queue_default(&cpfl_txq->base);\n \t\tif (ret == CPFL_SCALAR_PATH)\n \t\t\treturn CPFL_SCALAR_PATH;\n \t}\n",
    "prefixes": [
        "v2",
        "1/5"
    ]
}