get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132021/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132021,
    "url": "http://patches.dpdk.org/api/patches/132021/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230927125416.2308974-5-yuying.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230927125416.2308974-5-yuying.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230927125416.2308974-5-yuying.zhang@intel.com",
    "date": "2023-09-27T12:54:11",
    "name": "[v8,4/9] net/cpfl: set up control path",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "51428a9e86494f6eb6c2bda09619bd40e975bf37",
    "submitter": {
        "id": 1844,
        "url": "http://patches.dpdk.org/api/people/1844/?format=api",
        "name": "Zhang, Yuying",
        "email": "yuying.zhang@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230927125416.2308974-5-yuying.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 29658,
            "url": "http://patches.dpdk.org/api/series/29658/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29658",
            "date": "2023-09-27T12:54:07",
            "name": "add rte flow support for cpfl",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/29658/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132021/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132021/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C598A42651;\n\tWed, 27 Sep 2023 14:55:06 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8B34540E40;\n\tWed, 27 Sep 2023 14:54:51 +0200 (CEST)",
            "from mgamail.intel.com (mgamail.intel.com [192.55.52.136])\n by mails.dpdk.org (Postfix) with ESMTP id F1B6D40E09\n for <dev@dpdk.org>; Wed, 27 Sep 2023 14:54:47 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 27 Sep 2023 05:54:47 -0700",
            "from dpdk-wenjing-02.sh.intel.com ([10.67.119.3])\n by orsmga004.jf.intel.com with ESMTP; 27 Sep 2023 05:54:44 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1695819288; x=1727355288;\n h=from:to:subject:date:message-id:in-reply-to:references:\n mime-version:content-transfer-encoding;\n bh=A5fgXO+QIS5q9dcyimshGbuKlyJUwL0GwM2onpU+qqo=;\n b=RY10GFQRgEWSDv3y2XT/mTjczAEWe4zwZLULkPRAbVgbQIWmDh/GBqXq\n Cl4punw7KEDZPblSCjc9Z1FQ/b7sJnwEqbT1Fbm9T3uYAShfGvAbQfXOq\n 6bhoWjV3PPfFC4MFeav1Xi6LEjsyOUCNqBlk72Pq3WBrW+Wxah/0ZyeXi\n DcuJChM+Z+1mONDg6bWa9dP08qLUfY9DTpxFSoj8W9ZKihU24aN1ol+sD\n xj1NDBMPLjeKbmsaz0Fvbox8B0Wvy1UnsQPzvikFP+qKoiY5jpG28KSLz\n dxXnr9dLRzsXkCuE1HVIqXvrNoXFzxXyoCwcLjbafenzzl69JbtUmJTFi g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10846\"; a=\"361204055\"",
            "E=Sophos;i=\"6.03,181,1694761200\"; d=\"scan'208\";a=\"361204055\"",
            "E=McAfee;i=\"6600,9927,10846\"; a=\"872873898\"",
            "E=Sophos;i=\"6.03,181,1694761200\"; d=\"scan'208\";a=\"872873898\""
        ],
        "X-ExtLoop1": "1",
        "From": "yuying.zhang@intel.com",
        "To": "yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com,\n jingjing.wu@intel.com, beilei.xing@intel.com",
        "Subject": "[PATCH v8 4/9] net/cpfl: set up control path",
        "Date": "Wed, 27 Sep 2023 12:54:11 +0000",
        "Message-Id": "<20230927125416.2308974-5-yuying.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20230927125416.2308974-1-yuying.zhang@intel.com>",
        "References": "<20230926181703.2268199-1-yuying.zhang@intel.com>\n <20230927125416.2308974-1-yuying.zhang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Yuying Zhang <yuying.zhang@intel.com>\n\nSet up control vport and control queue for flow offloading.\n\nSigned-off-by: Yuying Zhang <yuying.zhang@intel.com>\n---\n drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++\n drivers/net/cpfl/cpfl_controlq.h |  75 +++\n drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++\n drivers/net/cpfl/cpfl_ethdev.h   |  14 +\n drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++\n drivers/net/cpfl/meson.build     |   1 +\n 6 files changed, 1305 insertions(+)\n create mode 100644 drivers/net/cpfl/cpfl_controlq.c\n create mode 100644 drivers/net/cpfl/cpfl_controlq.h",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c\nnew file mode 100644\nindex 0000000000..4a925bc338\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_controlq.c\n@@ -0,0 +1,801 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2023 Intel Corporation\n+ */\n+\n+#include \"cpfl_controlq.h\"\n+#include \"base/idpf_controlq.h\"\n+#include \"rte_common.h\"\n+\n+/**\n+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP\n+ * @qinfo: pointer to create control queue info struct\n+ *\n+ * Verify that DMA parameter of each DMA memory struct is present and\n+ * consistent with control queue parameters\n+ */\n+static inline int\n+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)\n+{\n+\tstruct idpf_dma_mem *ring = &qinfo->ring_mem;\n+\tstruct idpf_dma_mem *buf = &qinfo->buf_mem;\n+\n+\tif (!ring->va || !ring->size)\n+\t\treturn -EINVAL;\n+\n+\tif (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))\n+\t\treturn -EINVAL;\n+\n+\t/* no need for buffer checks for TX queues */\n+\tif (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||\n+\t    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||\n+\t    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)\n+\t\treturn 0;\n+\n+\tif (!buf->va || !buf->size)\n+\t\treturn -EINVAL;\n+\n+\t/* accommodate different types of rx ring buffer sizes */\n+\tif ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&\n+\t     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||\n+\t    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&\n+\t     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))\n+\t\treturn -EINVAL;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to control queue struct\n+ * @qinfo: pointer to create queue info struct\n+ *\n+ * The CP takes care of all DMA memory allocations. Store the allocated memory\n+ * information for the descriptor ring and buffers. If the memory for either the\n+ * descriptor ring or the buffers is not allocated properly and/or inconsistent\n+ * with the control queue parameters, this routine will free the memory for\n+ * both the descriptors and the buffers\n+ */\n+int\n+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,\n+\t\t\t struct cpfl_ctlq_create_info *qinfo)\n+{\n+\tint ret_code = 0;\n+\tunsigned int elem_size;\n+\tint i = 0;\n+\n+\tret_code = cpfl_check_dma_mem_parameters(qinfo);\n+\tif (ret_code)\n+\t\t/* TODO: Log an error message per CP */\n+\t\tgoto err;\n+\n+\tcq->desc_ring.va = qinfo->ring_mem.va;\n+\tcq->desc_ring.pa = qinfo->ring_mem.pa;\n+\tcq->desc_ring.size = qinfo->ring_mem.size;\n+\n+\tswitch (cq->cq_type) {\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n+\tcase IDPF_CTLQ_TYPE_CONFIG_RX:\n+\tcase IDPF_CTLQ_TYPE_EVENT_RX:\n+\tcase IDPF_CTLQ_TYPE_RDMA_RX:\n+\t\t/* Only receive queues will have allocated buffers\n+\t\t * during init.  CP allocates one big chunk of DMA\n+\t\t * region who size is equal to ring_len * buff_size.\n+\t\t * In CPFLib, the block gets broken down to multiple\n+\t\t * smaller blocks that actually gets programmed in the hardware.\n+\t\t */\n+\n+\t\tcq->bi.rx_buff = (struct idpf_dma_mem **)\n+\t\t\tidpf_calloc(hw, cq->ring_size,\n+\t\t\t\t    sizeof(struct idpf_dma_mem *));\n+\t\tif (!cq->bi.rx_buff) {\n+\t\t\tret_code = -ENOMEM;\n+\t\t\t/* TODO: Log an error message per CP */\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\telem_size = qinfo->buf_size;\n+\t\tfor (i = 0; i < cq->ring_size; i++) {\n+\t\t\tcq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc\n+\t\t\t\t\t    (hw, 1,\n+\t\t\t\t\t     sizeof(struct idpf_dma_mem));\n+\t\t\tif (!cq->bi.rx_buff[i]) {\n+\t\t\t\tret_code = -ENOMEM;\n+\t\t\t\tgoto free_rx_buffs;\n+\t\t\t}\n+\t\t\tcq->bi.rx_buff[i]->va =\n+\t\t\t    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));\n+\t\t\tcq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +\n+\t\t\t\t\t       (i * elem_size);\n+\t\t\tcq->bi.rx_buff[i]->size = elem_size;\n+\t\t}\n+\t\tbreak;\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n+\tcase IDPF_CTLQ_TYPE_CONFIG_TX:\n+\tcase IDPF_CTLQ_TYPE_RDMA_TX:\n+\tcase IDPF_CTLQ_TYPE_RDMA_COMPL:\n+\t\tbreak;\n+\tdefault:\n+\t\tret_code = -EINVAL;\n+\t}\n+\n+\treturn ret_code;\n+\n+free_rx_buffs:\n+\ti--;\n+\tfor (; i >= 0; i--)\n+\t\tidpf_free(hw, cq->bi.rx_buff[i]);\n+\n+\tif (!cq->bi.rx_buff)\n+\t\tidpf_free(hw, cq->bi.rx_buff);\n+\n+err:\n+\treturn ret_code;\n+}\n+\n+/**\n+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Record the address of the receive queue DMA buffers in the descriptors.\n+ * The buffers must have been previously allocated.\n+ */\n+static void\n+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)\n+{\n+\tint i = 0;\n+\n+\tfor (i = 0; i < cq->ring_size; i++) {\n+\t\tstruct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);\n+\t\tstruct idpf_dma_mem *bi = cq->bi.rx_buff[i];\n+\n+\t\t/* No buffer to post to descriptor, continue */\n+\t\tif (!bi)\n+\t\t\tcontinue;\n+\n+\t\tdesc->flags =\n+\t\t\tCPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n+\t\tdesc->opcode = 0;\n+\t\tdesc->datalen = CPU_TO_LE16(bi->size);\n+\t\tdesc->ret_val = 0;\n+\t\tdesc->cookie_high = 0;\n+\t\tdesc->cookie_low = 0;\n+\t\tdesc->params.indirect.addr_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(bi->pa));\n+\t\tdesc->params.indirect.addr_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(bi->pa));\n+\t\tdesc->params.indirect.param0 = 0;\n+\t\tdesc->params.indirect.param1 = 0;\n+\t}\n+}\n+\n+/**\n+ * cpfl_ctlq_setup_regs - initialize control queue registers\n+ * @cq: pointer to the specific control queue\n+ * @q_create_info: structs containing info for each queue to be initialized\n+ */\n+static void\n+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)\n+{\n+\t/* set control queue registers in our local struct */\n+\tcq->reg.head = q_create_info->reg.head;\n+\tcq->reg.tail = q_create_info->reg.tail;\n+\tcq->reg.len = q_create_info->reg.len;\n+\tcq->reg.bah = q_create_info->reg.bah;\n+\tcq->reg.bal = q_create_info->reg.bal;\n+\tcq->reg.len_mask = q_create_info->reg.len_mask;\n+\tcq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;\n+\tcq->reg.head_mask = q_create_info->reg.head_mask;\n+}\n+\n+/**\n+ * cpfl_ctlq_init_regs - Initialize control queue registers\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ * @is_rxq: true if receive control queue, false otherwise\n+ *\n+ * Initialize registers. The caller is expected to have already initialized the\n+ * descriptor ring memory and buffer memory\n+ */\n+static void\n+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)\n+{\n+\t/* Update tail to post pre-allocated buffers for rx queues */\n+\tif (is_rxq)\n+\t\twr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));\n+\n+\t/* For non-Mailbox control queues only TAIL need to be set */\n+\tif (cq->q_id != -1)\n+\t\treturn;\n+\n+\t/* Clear Head for both send or receive */\n+\twr32(hw, cq->reg.head, 0);\n+\n+\t/* set starting point */\n+\twr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));\n+\twr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));\n+\twr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));\n+}\n+\n+/**\n+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure\n+ * @hw: context info for the callback\n+ * @cq: pointer to the specific control queue\n+ *\n+ * DMA buffers are released by the CP itself\n+ */\n+static void\n+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)\n+{\n+\tint i;\n+\n+\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||\n+\t    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {\n+\t\tfor (i = 0; i < cq->ring_size; i++)\n+\t\t\tidpf_free(hw, cq->bi.rx_buff[i]);\n+\t\t/* free the buffer header */\n+\t\tidpf_free(hw, cq->bi.rx_buff);\n+\t} else {\n+\t\tidpf_free(hw, cq->bi.tx_msg);\n+\t}\n+}\n+\n+/**\n+ * cpfl_ctlq_add - add one control queue\n+ * @hw: pointer to hardware struct\n+ * @qinfo: info for queue to be created\n+ * @cq_out: (output) double pointer to control queue to be created\n+ *\n+ * Allocate and initialize a control queue and add it to the control queue list.\n+ * The cq parameter will be allocated/initialized and passed back to the caller\n+ * if no errors occur.\n+ */\n+int\n+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,\n+\t      struct idpf_ctlq_info **cq_out)\n+{\n+\tstruct idpf_ctlq_info *cq;\n+\tbool is_rxq = false;\n+\tint status = 0;\n+\n+\tif (!qinfo->len || !qinfo->buf_size ||\n+\t    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||\n+\t    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)\n+\t\treturn -EINVAL;\n+\n+\tcq = (struct idpf_ctlq_info *)\n+\t     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));\n+\n+\tif (!cq)\n+\t\treturn -ENOMEM;\n+\n+\tcq->cq_type = qinfo->type;\n+\tcq->q_id = qinfo->id;\n+\tcq->buf_size = qinfo->buf_size;\n+\tcq->ring_size = qinfo->len;\n+\n+\tcq->next_to_use = 0;\n+\tcq->next_to_clean = 0;\n+\tcq->next_to_post = cq->ring_size - 1;\n+\n+\tswitch (qinfo->type) {\n+\tcase IDPF_CTLQ_TYPE_EVENT_RX:\n+\tcase IDPF_CTLQ_TYPE_CONFIG_RX:\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n+\t\tis_rxq = true;\n+\t\t/* fallthrough */\n+\tcase IDPF_CTLQ_TYPE_CONFIG_TX:\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n+\t\tstatus = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tstatus = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\tif (status)\n+\t\tgoto init_free_q;\n+\n+\tif (is_rxq) {\n+\t\tcpfl_ctlq_init_rxq_bufs(cq);\n+\t} else {\n+\t\t/* Allocate the array of msg pointers for TX queues */\n+\t\tcq->bi.tx_msg = (struct idpf_ctlq_msg **)\n+\t\t\tidpf_calloc(hw, qinfo->len,\n+\t\t\t\t    sizeof(struct idpf_ctlq_msg *));\n+\t\tif (!cq->bi.tx_msg) {\n+\t\t\tstatus = -ENOMEM;\n+\t\t\tgoto init_dealloc_q_mem;\n+\t\t}\n+\t}\n+\n+\tcpfl_ctlq_setup_regs(cq, qinfo);\n+\n+\tcpfl_ctlq_init_regs(hw, cq, is_rxq);\n+\n+\tidpf_init_lock(&cq->cq_lock);\n+\n+\tLIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);\n+\n+\t*cq_out = cq;\n+\treturn status;\n+\n+init_dealloc_q_mem:\n+\t/* free ring buffers and the ring itself */\n+\tcpfl_ctlq_dealloc_ring_res(hw, cq);\n+init_free_q:\n+\tidpf_free(hw, cq);\n+\tcq = NULL;\n+\n+\treturn status;\n+}\n+\n+/**\n+ * cpfl_ctlq_send - send command to Control Queue (CTQ)\n+ * @hw: pointer to hw struct\n+ * @cq: handle to control queue struct to send on\n+ * @num_q_msg: number of messages to send on control queue\n+ * @q_msg: pointer to array of queue messages to be sent\n+ *\n+ * The caller is expected to allocate DMAable buffers and pass them to the\n+ * send routine via the q_msg struct / control queue specific data struct.\n+ * The control queue will hold a reference to each send message until\n+ * the completion for that message has been cleaned.\n+ */\n+int\n+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tint num_desc_avail = 0;\n+\tint status = 0;\n+\tint i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\t/* Ensure there are enough descriptors to send all messages */\n+\tnum_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);\n+\tif (num_desc_avail == 0 || num_desc_avail < num_q_msg) {\n+\t\tstatus = -ENOSPC;\n+\t\tgoto sq_send_command_out;\n+\t}\n+\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tstruct idpf_ctlq_msg *msg = &q_msg[i];\n+\n+\t\tdesc = IDPF_CTLQ_DESC(cq, cq->next_to_use);\n+\t\tdesc->opcode = CPU_TO_LE16(msg->opcode);\n+\t\tdesc->pfid_vfid = CPU_TO_LE16(msg->func_id);\n+\t\tdesc->cookie_high =\n+\t\t\tCPU_TO_LE32(msg->cookie.mbx.chnl_opcode);\n+\t\tdesc->cookie_low =\n+\t\t\tCPU_TO_LE32(msg->cookie.mbx.chnl_retval);\n+\t\tdesc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<\n+\t\t\t\tIDPF_CTLQ_FLAG_HOST_ID_S);\n+\t\tif (msg->data_len) {\n+\t\t\tstruct idpf_dma_mem *buff = msg->ctx.indirect.payload;\n+\n+\t\t\tdesc->datalen |= CPU_TO_LE16(msg->data_len);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);\n+\t\t\t/* Update the address values in the desc with the pa\n+\t\t\t * value for respective buffer\n+\t\t\t */\n+\t\t\tdesc->params.indirect.addr_high =\n+\t\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(buff->pa));\n+\t\t\tdesc->params.indirect.addr_low =\n+\t\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(buff->pa));\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.indirect.context,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+\t\t} else {\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.direct,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+\t\t}\n+\n+\t\t/* Store buffer info */\n+\t\tcq->bi.tx_msg[cq->next_to_use] = msg;\n+\t\t(cq->next_to_use)++;\n+\t\tif (cq->next_to_use == cq->ring_size)\n+\t\t\tcq->next_to_use = 0;\n+\t}\n+\n+\t/* Force memory write to complete before letting hardware\n+\t * know that there are new descriptors to fetch.\n+\t */\n+\tidpf_wmb();\n+\twr32(hw, cq->reg.tail, cq->next_to_use);\n+\n+sq_send_command_out:\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write\n+ * back for the requested queue\n+ * @cq: pointer to the specific Control queue\n+ * @clean_count: (input|output) number of descriptors to clean as input, and\n+ * number of descriptors actually cleaned as output\n+ * @msg_status: (output) pointer to msg pointer array to be populated; needs\n+ * to be allocated by caller\n+ * @force: (input) clean descriptors which were not done yet. Use with caution\n+ * in kernel mode only\n+ *\n+ * Returns an array of message pointers associated with the cleaned\n+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n+ * descriptors.  The status will be returned for each; any messages that failed\n+ * to send will have a non-zero status. The caller is expected to free original\n+ * ctlq_msgs and free or reuse the DMA buffers.\n+ */\n+static int\n+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t     struct idpf_ctlq_msg *msg_status[], bool force)\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tuint16_t i = 0, num_to_clean;\n+\tuint16_t ntc, desc_err;\n+\tint ret = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tif (*clean_count == 0)\n+\t\treturn 0;\n+\tif (*clean_count > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tntc = cq->next_to_clean;\n+\tnum_to_clean = *clean_count;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tif (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tdesc_err = LE16_TO_CPU(desc->ret_val);\n+\t\tif (desc_err) {\n+\t\t\t/* strip off FW internal code */\n+\t\t\tdesc_err &= 0xff;\n+\t\t}\n+\n+\t\tmsg_status[i] = cq->bi.tx_msg[ntc];\n+\t\tif (!msg_status[i])\n+\t\t\tbreak;\n+\t\tmsg_status[i]->status = desc_err;\n+\t\tcq->bi.tx_msg[ntc] = NULL;\n+\t\t/* Zero out any stale data */\n+\t\tidpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t}\n+\n+\tcq->next_to_clean = ntc;\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\t/* Return number of descriptors actually cleaned */\n+\t*clean_count = i;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the\n+ * requested queue\n+ * @cq: pointer to the specific Control queue\n+ * @clean_count: (input|output) number of descriptors to clean as input, and\n+ * number of descriptors actually cleaned as output\n+ * @msg_status: (output) pointer to msg pointer array to be populated; needs\n+ * to be allocated by caller\n+ *\n+ * Returns an array of message pointers associated with the cleaned\n+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n+ * descriptors.  The status will be returned for each; any messages that failed\n+ * to send will have a non-zero status. The caller is expected to free original\n+ * ctlq_msgs and free or reuse the DMA buffers.\n+ */\n+int\n+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t   struct idpf_ctlq_msg *msg_status[])\n+{\n+\treturn __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);\n+}\n+\n+/**\n+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to control queue handle\n+ * @buff_count: (input|output) input is number of buffers caller is trying to\n+ * return; output is number of buffers that were not posted\n+ * @buffs: array of pointers to dma mem structs to be given to hardware\n+ *\n+ * Caller uses this function to return DMA buffers to the descriptor ring after\n+ * consuming them; buff_count will be the number of buffers.\n+ *\n+ * Note: this function needs to be called after a receive call even\n+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,\n+ * buffs = NULL to support direct commands\n+ */\n+int\n+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\tuint16_t *buff_count, struct idpf_dma_mem **buffs)\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tuint16_t ntp = cq->next_to_post;\n+\tbool buffs_avail = false;\n+\tuint16_t tbp = ntp + 1;\n+\tint status = 0;\n+\tint i = 0;\n+\n+\tif (*buff_count > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\tif (*buff_count > 0)\n+\t\tbuffs_avail = true;\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tif (tbp >= cq->ring_size)\n+\t\ttbp = 0;\n+\n+\tif (tbp == cq->next_to_clean)\n+\t\t/* Nothing to do */\n+\t\tgoto post_buffs_out;\n+\n+\t/* Post buffers for as many as provided or up until the last one used */\n+\twhile (ntp != cq->next_to_clean) {\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntp);\n+\t\tif (cq->bi.rx_buff[ntp])\n+\t\t\tgoto fill_desc;\n+\t\tif (!buffs_avail) {\n+\t\t\t/* If the caller hasn't given us any buffers or\n+\t\t\t * there are none left, search the ring itself\n+\t\t\t * for an available buffer to move to this\n+\t\t\t * entry starting at the next entry in the ring\n+\t\t\t */\n+\t\t\ttbp = ntp + 1;\n+\t\t\t/* Wrap ring if necessary */\n+\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\ttbp = 0;\n+\n+\t\t\twhile (tbp != cq->next_to_clean) {\n+\t\t\t\tif (cq->bi.rx_buff[tbp]) {\n+\t\t\t\t\tcq->bi.rx_buff[ntp] =\n+\t\t\t\t\t\tcq->bi.rx_buff[tbp];\n+\t\t\t\t\tcq->bi.rx_buff[tbp] = NULL;\n+\n+\t\t\t\t\t/* Found a buffer, no need to\n+\t\t\t\t\t * search anymore\n+\t\t\t\t\t */\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\t/* Wrap ring if necessary */\n+\t\t\t\ttbp++;\n+\t\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\t\ttbp = 0;\n+\t\t\t}\n+\n+\t\t\tif (tbp == cq->next_to_clean)\n+\t\t\t\tgoto post_buffs_out;\n+\t\t} else {\n+\t\t\t/* Give back pointer to DMA buffer */\n+\t\t\tcq->bi.rx_buff[ntp] = buffs[i];\n+\t\t\ti++;\n+\n+\t\t\tif (i >= *buff_count)\n+\t\t\t\tbuffs_avail = false;\n+\t\t}\n+\n+fill_desc:\n+\t\tdesc->flags =\n+\t\t\tCPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n+\n+\t\t/* Post buffers to descriptor */\n+\t\tdesc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);\n+\t\tdesc->params.indirect.addr_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\t\tdesc->params.indirect.addr_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\n+\t\tntp++;\n+\t\tif (ntp == cq->ring_size)\n+\t\t\tntp = 0;\n+\t}\n+\n+post_buffs_out:\n+\t/* Only update tail if buffers were actually posted */\n+\tif (cq->next_to_post != ntp) {\n+\t\tif (ntp)\n+\t\t\t/* Update next_to_post to ntp - 1 since current ntp\n+\t\t\t * will not have a buffer\n+\t\t\t */\n+\t\t\tcq->next_to_post = ntp - 1;\n+\t\telse\n+\t\t\t/* Wrap to end of end ring since current ntp is 0 */\n+\t\t\tcq->next_to_post = cq->ring_size - 1;\n+\n+\t\twr32(hw, cq->reg.tail, cq->next_to_post);\n+\t}\n+\n+\tidpf_release_lock(&cq->cq_lock);\n+\t/* return the number of buffers that were not posted */\n+\t*buff_count = *buff_count - i;\n+\n+\treturn status;\n+}\n+\n+/**\n+ * cpfl_ctlq_recv - receive control queue message call back\n+ * @cq: pointer to control queue handle to receive on\n+ * @num_q_msg: (input|output) input number of messages that should be received;\n+ * output number of messages actually received\n+ * @q_msg: (output) array of received control queue messages on this q;\n+ * needs to be pre-allocated by caller for as many messages as requested\n+ *\n+ * Called by interrupt handler or polling mechanism. Caller is expected\n+ * to free buffers\n+ */\n+int\n+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,\n+\t       struct idpf_ctlq_msg *q_msg)\n+{\n+\tuint16_t num_to_clean, ntc, ret_val, flags;\n+\tstruct idpf_ctlq_desc *desc;\n+\tint ret_code = 0;\n+\tuint16_t i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn -ENOBUFS;\n+\n+\tif (*num_q_msg == 0)\n+\t\treturn 0;\n+\telse if (*num_q_msg > cq->ring_size)\n+\t\treturn -EINVAL;\n+\n+\t/* take the lock before we start messing with the ring */\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\tntc = cq->next_to_clean;\n+\tnum_to_clean = *num_q_msg;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tflags = LE16_TO_CPU(desc->flags);\n+\t\tif (!(flags & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tret_val = LE16_TO_CPU(desc->ret_val);\n+\t\tq_msg[i].vmvf_type = (flags &\n+\t\t\t\t     (IDPF_CTLQ_FLAG_FTYPE_VM |\n+\t\t\t\t      IDPF_CTLQ_FLAG_FTYPE_PF)) >>\n+\t\t\t\t      IDPF_CTLQ_FLAG_FTYPE_S;\n+\n+\t\tif (flags & IDPF_CTLQ_FLAG_ERR)\n+\t\t\tret_code = -EBADMSG;\n+\n+\t\tq_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);\n+\t\tq_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);\n+\t\tq_msg[i].opcode = LE16_TO_CPU(desc->opcode);\n+\t\tq_msg[i].data_len = LE16_TO_CPU(desc->datalen);\n+\t\tq_msg[i].status = ret_val;\n+\n+\t\tif (desc->datalen) {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.indirect.context,\n+\t\t\t\t    &desc->params.indirect,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\n+\t\t\t/* Assign pointer to dma buffer to ctlq_msg array\n+\t\t\t * to be given to upper layer\n+\t\t\t */\n+\t\t\tq_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];\n+\n+\t\t\t/* Zero out pointer to DMA buffer info;\n+\t\t\t * will be repopulated by post buffers API\n+\t\t\t */\n+\t\t\tcq->bi.rx_buff[ntc] = NULL;\n+\t\t} else {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.direct,\n+\t\t\t\t    desc->params.raw,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\t\t}\n+\n+\t\t/* Zero out stale data in descriptor */\n+\t\tidpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),\n+\t\t\t    IDPF_DMA_MEM);\n+\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t};\n+\n+\tcq->next_to_clean = ntc;\n+\tidpf_release_lock(&cq->cq_lock);\n+\t*num_q_msg = i;\n+\tif (*num_q_msg == 0)\n+\t\tret_code = -ENOMSG;\n+\n+\treturn ret_code;\n+}\n+\n+int\n+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,\n+\t\t    struct idpf_ctlq_info **cq)\n+{\n+\treturn cpfl_ctlq_add(hw, qinfo, cq);\n+}\n+\n+/**\n+ * cpfl_ctlq_shutdown - shutdown the CQ\n+ * The main shutdown routine for any controq queue\n+ */\n+static void\n+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\tif (!cq->ring_size)\n+\t\tgoto shutdown_sq_out;\n+\n+\t/* free ring buffers and the ring itself */\n+\tcpfl_ctlq_dealloc_ring_res(hw, cq);\n+\n+\t/* Set ring_size to 0 to indicate uninitialized queue */\n+\tcq->ring_size = 0;\n+\n+shutdown_sq_out:\n+\tidpf_release_lock(&cq->cq_lock);\n+\tidpf_destroy_lock(&cq->cq_lock);\n+}\n+\n+/**\n+ * cpfl_ctlq_remove - deallocate and remove specified control queue\n+ */\n+static void\n+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tLIST_REMOVE(cq, cq_list);\n+\tcpfl_ctlq_shutdown(hw, cq);\n+\tidpf_free(hw, cq);\n+}\n+\n+void\n+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tcpfl_ctlq_remove(hw, cq);\n+}\n+\n+int\n+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])\n+{\n+\treturn cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);\n+}\n+\n+int\n+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,\n+\t\t     struct idpf_ctlq_msg q_msg[])\n+{\n+\treturn cpfl_ctlq_recv(cq, num_q_msg, q_msg);\n+}\n+\n+int\n+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t      uint16_t *buff_count, struct idpf_dma_mem **buffs)\n+{\n+\treturn cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);\n+}\n+\n+int\n+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,\n+\t\t\t struct idpf_ctlq_msg *msg_status[])\n+{\n+\treturn cpfl_ctlq_clean_sq(cq, clean_count, msg_status);\n+}\ndiff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h\nnew file mode 100644\nindex 0000000000..740ae6522c\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_controlq.h\n@@ -0,0 +1,75 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2023 Intel Corporation\n+ */\n+\n+#ifndef _CPFL_CONTROLQ_H_\n+#define _CPFL_CONTROLQ_H_\n+\n+#include \"base/idpf_osdep.h\"\n+#include \"base/idpf_controlq_api.h\"\n+\n+#define CPFL_CTLQ_DESCRIPTOR_SIZE\t32\n+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE\t4096\n+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE\t256\n+#define CPFL_DFLT_MBX_RING_LEN\t\t512\n+#define CPFL_CFGQ_RING_LEN\t\t512\n+\n+/* CRQ/CSQ specific error codes */\n+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */\n+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */\n+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */\n+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */\n+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */\n+\n+/* Generic queue info structures */\n+/* MB, CONFIG and EVENT q do not have extended info */\n+struct cpfl_ctlq_create_info {\n+\tenum idpf_ctlq_type type;\n+\tint id; /* absolute queue offset passed as input\n+\t\t * -1 for default mailbox if present\n+\t\t */\n+\tuint16_t len; /* Queue length passed as input */\n+\tuint16_t buf_size; /* buffer size passed as input */\n+\tuint64_t base_address; /* output, HPA of the Queue start  */\n+\tstruct idpf_ctlq_reg reg; /* registers accessed by ctlqs */\n+\t/* Pass down previously allocated descriptor ring and buffer memory\n+\t * for each control queue to be created\n+\t */\n+\tstruct idpf_dma_mem ring_mem;\n+\t/* The CP will allocate one large buffer that the CPFlib will piece\n+\t * into individual buffers for each descriptor\n+\t */\n+\tstruct idpf_dma_mem buf_mem;\n+\n+\tint ext_info_size;\n+\tvoid *ext_info; /* Specific to q type */\n+};\n+\n+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,\n+\t\t\t     struct idpf_ctlq_info *cq,\n+\t\t\t     struct cpfl_ctlq_create_info *qinfo);\n+int cpfl_ctlq_add(struct idpf_hw *hw,\n+\t\t  struct cpfl_ctlq_create_info *qinfo,\n+\t\t  struct idpf_ctlq_info **cq);\n+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);\n+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t       struct idpf_ctlq_msg *msg_status[]);\n+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t    u16 *buff_count, struct idpf_dma_mem **buffs);\n+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t   struct idpf_ctlq_msg *q_msg);\n+int cpfl_vport_ctlq_add(struct idpf_hw *hw,\n+\t\t\tstruct cpfl_ctlq_create_info *qinfo,\n+\t\t\tstruct idpf_ctlq_info **cq);\n+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);\n+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);\n+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t\t struct idpf_ctlq_msg q_msg[]);\n+\n+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t\t  u16 *buff_count, struct idpf_dma_mem **buffs);\n+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t\t     struct idpf_ctlq_msg *msg_status[]);\n+#endif\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex c350728861..a2bc6784d0 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint\n \t\treturn;\n \t}\n \n+\t/* ignore if it is ctrl vport */\n+\tif (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)\n+\t\treturn;\n+\n \tvport = cpfl_find_vport(adapter, vc_event->vport_id);\n \tif (!vport) {\n \t\tPMD_DRV_LOG(ERR, \"Can't find vport.\");\n@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)\n \trte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);\n }\n \n+static int\n+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)\n+{\n+\tint i, ret;\n+\n+\tfor (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {\n+\t\tret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to disable Tx config queue.\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {\n+\t\tret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to disable Rx config queue.\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)\n+{\n+\tint i, ret;\n+\n+\tret = cpfl_config_ctlq_tx(adapter);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Tx config queue.\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = cpfl_config_ctlq_rx(adapter);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Rx config queue.\");\n+\t\treturn ret;\n+\t}\n+\n+\tfor (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {\n+\t\tret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to enable Tx config queue.\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {\n+\t\tret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to enable Rx config queue.\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);\n+\tstruct cpfl_ctlq_create_info *create_cfgq_info;\n+\tint i;\n+\n+\tcreate_cfgq_info = adapter->cfgq_info;\n+\n+\tfor (i = 0; i < CPFL_CFGQ_NUM; i++) {\n+\t\tif (adapter->ctlqp[i])\n+\t\t\tcpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);\n+\t\tif (create_cfgq_info[i].ring_mem.va)\n+\t\t\tidpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);\n+\t\tif (create_cfgq_info[i].buf_mem.va)\n+\t\t\tidpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);\n+\t}\n+}\n+\n+static int\n+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct idpf_ctlq_info *cfg_cq;\n+\tint ret = 0;\n+\tint i = 0;\n+\n+\tfor (i = 0; i < CPFL_CFGQ_NUM; i++) {\n+\t\tcfg_cq = NULL;\n+\t\tret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),\n+\t\t\t\t\t  &adapter->cfgq_info[i],\n+\t\t\t\t\t  &cfg_cq);\n+\t\tif (ret || !cfg_cq) {\n+\t\t\tPMD_DRV_LOG(ERR, \"ctlq add failed for queue id: %d\",\n+\t\t\t\t    adapter->cfgq_info[i].id);\n+\t\t\tcpfl_remove_cfgqs(adapter);\n+\t\t\treturn ret;\n+\t\t}\n+\t\tPMD_DRV_LOG(INFO, \"added cfgq to hw. queue id: %d\",\n+\t\t\t    adapter->cfgq_info[i].id);\n+\t\tadapter->ctlqp[i] = cfg_cq;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+#define CPFL_CFGQ_RING_LEN\t\t512\n+#define CPFL_CFGQ_DESCRIPTOR_SIZE\t32\n+#define CPFL_CFGQ_BUFFER_SIZE\t\t256\n+#define CPFL_CFGQ_RING_SIZE\t\t512\n+\n+static int\n+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct cpfl_ctlq_create_info *create_cfgq_info;\n+\tstruct cpfl_vport *vport;\n+\tint i, err;\n+\tuint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);\n+\tuint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;\n+\n+\tvport = &adapter->ctrl_vport;\n+\tcreate_cfgq_info = adapter->cfgq_info;\n+\n+\tfor (i = 0; i < CPFL_CFGQ_NUM; i++) {\n+\t\tif (i % 2 == 0) {\n+\t\t\t/* Setup Tx config queue */\n+\t\t\tcreate_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;\n+\t\t\tcreate_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;\n+\t\t\tcreate_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;\n+\t\t\tcreate_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;\n+\t\t\tmemset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));\n+\t\t\tcreate_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +\n+\t\t\t\ti / 2 * vport->base.chunks_info.tx_qtail_spacing;\n+\t\t} else {\n+\t\t\t/* Setup Rx config queue */\n+\t\t\tcreate_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;\n+\t\t\tcreate_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;\n+\t\t\tcreate_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;\n+\t\t\tcreate_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;\n+\t\t\tmemset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));\n+\t\t\tcreate_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +\n+\t\t\t\ti / 2 * vport->base.chunks_info.rx_qtail_spacing;\n+\t\t\tif (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,\n+\t\t\t\t\t\tbuf_size)) {\n+\t\t\t\terr = -ENOMEM;\n+\t\t\t\tgoto free_mem;\n+\t\t\t}\n+\t\t}\n+\t\tif (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,\n+\t\t\t\t\tring_size)) {\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto free_mem;\n+\t\t}\n+\t}\n+\treturn 0;\n+free_mem:\n+\tfor (i = 0; i < CPFL_CFGQ_NUM; i++) {\n+\t\tif (create_cfgq_info[i].ring_mem.va)\n+\t\t\tidpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);\n+\t\tif (create_cfgq_info[i].buf_mem.va)\n+\t\t\tidpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);\n+\t}\n+\treturn err;\n+}\n+\n+static int\n+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct cpfl_vport *vport = &adapter->ctrl_vport;\n+\tstruct virtchnl2_create_vport *vport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;\n+\tint i;\n+\n+\tvport->itf.adapter = adapter;\n+\tvport->base.adapter = &adapter->base;\n+\tvport->base.vport_id = vport_info->vport_id;\n+\n+\tfor (i = 0; i < vport_info->chunks.num_chunks; i++) {\n+\t\tif (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {\n+\t\t\tvport->base.chunks_info.tx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->base.chunks_info.tx_qtail_start =\n+\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->base.chunks_info.tx_qtail_spacing =\n+\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {\n+\t\t\tvport->base.chunks_info.rx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->base.chunks_info.rx_qtail_start =\n+\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->base.chunks_info.rx_qtail_spacing =\n+\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(ERR, \"Unsupported chunk type\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)\n+{\n+\tcpfl_stop_cfgqs(adapter);\n+\tcpfl_remove_cfgqs(adapter);\n+\tidpf_vc_vport_destroy(&adapter->ctrl_vport.base);\n+}\n+\n+static int\n+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)\n+{\n+\tint ret;\n+\n+\tret = cpfl_vc_create_ctrl_vport(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to create control vport\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = cpfl_init_ctrl_vport(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init control vport\");\n+\t\tgoto err_init_ctrl_vport;\n+\t}\n+\n+\tret = cpfl_cfgq_setup(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup control queues\");\n+\t\tgoto err_cfgq_setup;\n+\t}\n+\n+\tret = cpfl_add_cfgqs(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to add control queues\");\n+\t\tgoto err_add_cfgq;\n+\t}\n+\n+\tret = cpfl_start_cfgqs(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to start control queues\");\n+\t\tgoto err_start_cfgqs;\n+\t}\n+\n+\treturn 0;\n+\n+err_start_cfgqs:\n+\tcpfl_stop_cfgqs(adapter);\n+err_add_cfgq:\n+\tcpfl_remove_cfgqs(adapter);\n+err_cfgq_setup:\n+err_init_ctrl_vport:\n+\tidpf_vc_vport_destroy(&adapter->ctrl_vport.base);\n+\n+\treturn ret;\n+}\n+\n static struct virtchnl2_get_capabilities req_caps = {\n \t.csum_caps =\n \tVIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |\n@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a\n \t\tgoto err_vports_alloc;\n \t}\n \n+\tret = cpfl_ctrl_path_open(adapter);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup control path\");\n+\t\tgoto err_create_ctrl_vport;\n+\t}\n+\n #ifdef RTE_HAS_JANSSON\n \tret = cpfl_flow_init(adapter);\n \tif (ret) {\n@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a\n \n #ifdef RTE_HAS_JANSSON\n err_flow_init:\n+\tcpfl_ctrl_path_close(adapter);\n #endif\n+err_create_ctrl_vport:\n+\trte_free(adapter->vports);\n err_vports_alloc:\n \trte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);\n \tcpfl_repr_allowlist_uninit(adapter);\n@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)\n #ifdef RTE_HAS_JANSSON\n \tcpfl_flow_uninit(adapter);\n #endif\n+\tcpfl_ctrl_path_close(adapter);\n \trte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);\n \tcpfl_vport_map_uninit(adapter);\n \tidpf_adapter_deinit(&adapter->base);\ndiff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h\nindex 69bf32cfbd..7f83d170d7 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.h\n+++ b/drivers/net/cpfl/cpfl_ethdev.h\n@@ -22,6 +22,7 @@\n #include \"cpfl_logs.h\"\n #include \"cpfl_cpchnl.h\"\n #include \"cpfl_representor.h\"\n+#include \"cpfl_controlq.h\"\n \n /* Currently, backend supports up to 8 vports */\n #define CPFL_MAX_VPORT_NUM\t8\n@@ -82,6 +83,10 @@\n #define CPFL_META_CHUNK_LENGTH\t1024\n #define CPFL_META_LENGTH\t32\n \n+#define CPFL_RX_CFGQ_NUM\t4\n+#define CPFL_TX_CFGQ_NUM\t4\n+#define CPFL_CFGQ_NUM\t\t8\n+\n /* bit[15:14] type\n  * bit[13] host/accelerator core\n  * bit[12] apf/cpf\n@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {\n \tstruct cpfl_flow_js_parser *flow_parser;\n \n \tstruct cpfl_metadata meta;\n+\n+\t/* ctrl vport and ctrl queues. */\n+\tstruct cpfl_vport ctrl_vport;\n+\tuint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];\n+\tstruct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];\n+\tstruct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];\n };\n \n TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);\n@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,\n \t\t\t   struct cpchnl2_vport_id *vport_id,\n \t\t\t   struct cpfl_vport_id *vi,\n \t\t\t   struct cpchnl2_get_vport_info_response *response);\n+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);\n+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);\n+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);\n \n #define CPFL_DEV_TO_PCI(eth_dev)\t\t\\\n \tRTE_DEV_TO_PCI((eth_dev)->device)\ndiff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c\nindex a21a4a451f..932840a972 100644\n--- a/drivers/net/cpfl/cpfl_vchnl.c\n+++ b/drivers/net/cpfl/cpfl_vchnl.c\n@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,\n \n \treturn 0;\n }\n+\n+int\n+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct virtchnl2_create_vport vport_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err = -1;\n+\n+\tmemset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));\n+\tvport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\tvport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\tvport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\tvport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;\n+\tvport_msg.num_tx_complq = 0;\n+\tvport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;\n+\tvport_msg.num_rx_bufq = 0;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CREATE_VPORT;\n+\targs.in_args = (uint8_t *)&vport_msg;\n+\targs.in_args_size = sizeof(vport_msg);\n+\targs.out_buffer = adapter->base.mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_vc_cmd_execute(&adapter->base, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\trte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,\n+\t\t   IDPF_DFLT_MBX_BUF_SIZE);\n+\treturn err;\n+}\n+\n+int\n+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct cpfl_vport *vport = &adapter->ctrl_vport;\n+\tstruct virtchnl2_config_rx_queues *vc_rxqs = NULL;\n+\tstruct virtchnl2_rxq_info *rxq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err, i;\n+\n+\tif (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\tPMD_DRV_LOG(ERR, \"This rxq model isn't supported.\");\n+\t\terr = -EINVAL;\n+\t\treturn err;\n+\t}\n+\n+\tnum_qs = CPFL_RX_CFGQ_NUM;\n+\tsize = sizeof(*vc_rxqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_rxq_info);\n+\tvc_rxqs = rte_zmalloc(\"cfg_rxqs\", size, 0);\n+\tif (!vc_rxqs) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_rx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_rxqs->vport_id = vport->base.vport_id;\n+\tvc_rxqs->num_qinfo = num_qs;\n+\n+\tfor (i = 0; i < num_qs; i++) {\n+\t\trxq_info = &vc_rxqs->qinfo[i];\n+\t\trxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;\n+\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;\n+\t\trxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;\n+\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\trxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;\n+\t\trxq_info->max_pkt_size = vport->base.max_pkt_len;\n+\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;\n+\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\t\trxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_rxqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->base.mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_vc_cmd_execute(&adapter->base, &args);\n+\trte_free(vc_rxqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES\");\n+\n+\treturn err;\n+}\n+\n+int\n+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)\n+{\n+\tstruct cpfl_vport *vport = &adapter->ctrl_vport;\n+\tstruct virtchnl2_config_tx_queues *vc_txqs = NULL;\n+\tstruct virtchnl2_txq_info *txq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err, i;\n+\n+\tif (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\tPMD_DRV_LOG(ERR, \"This txq model isn't supported.\");\n+\t\terr = -EINVAL;\n+\t\treturn err;\n+\t}\n+\n+\tnum_qs = CPFL_TX_CFGQ_NUM;\n+\tsize = sizeof(*vc_txqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_txq_info);\n+\tvc_txqs = rte_zmalloc(\"cfg_txqs\", size, 0);\n+\tif (!vc_txqs) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_tx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_txqs->vport_id = vport->base.vport_id;\n+\tvc_txqs->num_qinfo = num_qs;\n+\n+\tfor (i = 0; i < num_qs; i++) {\n+\t\ttxq_info = &vc_txqs->qinfo[i];\n+\t\ttxq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;\n+\t\ttxq_info->queue_id = adapter->cfgq_info[2 * i].id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n+\t\ttxq_info->ring_len = adapter->cfgq_info[2 * i].len;\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_txqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->base.mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_vc_cmd_execute(&adapter->base, &args);\n+\trte_free(vc_txqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES\");\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build\nindex f5654d5b0e..290ff1e655 100644\n--- a/drivers/net/cpfl/meson.build\n+++ b/drivers/net/cpfl/meson.build\n@@ -18,6 +18,7 @@ sources = files(\n         'cpfl_rxtx.c',\n         'cpfl_vchnl.c',\n         'cpfl_representor.c',\n+        'cpfl_controlq.c',\n )\n \n if arch_subdir == 'x86'\n",
    "prefixes": [
        "v8",
        "4/9"
    ]
}