get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/114569/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 114569,
    "url": "http://patches.dpdk.org/api/patches/114569/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-4-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220803113104.1184059-4-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220803113104.1184059-4-junfeng.guo@intel.com",
    "date": "2022-08-03T11:30:54",
    "name": "[03/13] net/idpf: support device initialization",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "96023b9bdd9a45f9aebf47bf0737f4a6846faaf7",
    "submitter": {
        "id": 1785,
        "url": "http://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-4-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 24188,
            "url": "http://patches.dpdk.org/api/series/24188/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24188",
            "date": "2022-08-03T11:30:51",
            "name": "add support for idpf PMD in DPDK",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/24188/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/114569/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/114569/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AC54AA00C5;\n\tWed,  3 Aug 2022 13:31:44 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 84B2D42B86;\n\tWed,  3 Aug 2022 13:31:27 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 189D742BB0\n for <dev@dpdk.org>; Wed,  3 Aug 2022 13:31:23 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Aug 2022 04:31:23 -0700",
            "from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246])\n by FMSMGA003.fm.intel.com with ESMTP; 03 Aug 2022 04:31:21 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1659526284; x=1691062284;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=NP0JPn7Rnqwz9M9M4w5BBGX2dOIhr75/oFrim2jPSn8=;\n b=NW8Oms5Zp7suO63rHFIRx0kQgRFjZXJcNeJw5EQKaspjZPUDlt8FnzPY\n LyIUL34hIvU1vwIbQJvXZaMcvsk04Qu1IfE2PV3/FxrWpxbH0ehqaDvtc\n vFYHDBP4ZBg36YkcmkzDv2C+howL4/RHogf+fht2DKgDiUKQ/uKdBoIXr\n ZxFhIxhofzHLOuPJjO89u37IzdHcbcRkpsUGjUFoegqjRKD9K7acLb6bB\n Afb1oerrvttxZosrGqqz2KMwcyrszTDqBlrjjXguKQnRA7be47Bxx7VuI\n KxzU8jwlDfeoVreYo/sNLcMWjcRNqjtIYYKGBkMLZhHJ/pvZbHkXeFr0M A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10427\"; a=\"375948499\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"375948499\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"692211047\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com,\n\tjingjing.wu@intel.com,\n\tbeilei.xing@intel.com",
        "Cc": "dev@dpdk.org, junfeng.guo@intel.com, Xiaoyun Li <xiaoyun.li@intel.com>,\n Xiao Wang <xiao.w.wang@intel.com>",
        "Subject": "[PATCH 03/13] net/idpf: support device initialization",
        "Date": "Wed,  3 Aug 2022 19:30:54 +0800",
        "Message-Id": "<20220803113104.1184059-4-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "References": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Support device init and the following dev ops:\n\t- dev_configure\n\t- dev_start\n\t- dev_stop\n\t- dev_close\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Xiao Wang <xiao.w.wang@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c | 651 +++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_ethdev.h | 205 +++++++++++\n drivers/net/idpf/idpf_vchnl.c  | 476 ++++++++++++++++++++++++\n drivers/net/idpf/meson.build   |  18 +\n drivers/net/idpf/version.map   |   3 +\n drivers/net/meson.build        |   1 +\n 6 files changed, 1354 insertions(+)\n create mode 100644 drivers/net/idpf/idpf_ethdev.c\n create mode 100644 drivers/net/idpf/idpf_ethdev.h\n create mode 100644 drivers/net/idpf/idpf_vchnl.c\n create mode 100644 drivers/net/idpf/meson.build\n create mode 100644 drivers/net/idpf/version.map",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nnew file mode 100644\nindex 0000000000..87c68226dd\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -0,0 +1,651 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+#include <rte_malloc.h>\n+#include <rte_memzone.h>\n+#include <rte_dev.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#define REPRESENTOR\t\t\"representor\"\n+\n+struct idpf_adapter *adapter;\n+uint16_t used_vecs_num;\n+\n+static const char * const idpf_valid_args[] = {\n+\tREPRESENTOR,\n+\tNULL\n+};\n+\n+static int idpf_dev_configure(struct rte_eth_dev *dev);\n+static int idpf_dev_start(struct rte_eth_dev *dev);\n+static int idpf_dev_stop(struct rte_eth_dev *dev);\n+static int idpf_dev_close(struct rte_eth_dev *dev);\n+\n+static const struct eth_dev_ops idpf_eth_dev_ops = {\n+\t.dev_configure\t\t\t= idpf_dev_configure,\n+\t.dev_start\t\t\t= idpf_dev_start,\n+\t.dev_stop\t\t\t= idpf_dev_stop,\n+\t.dev_close\t\t\t= idpf_dev_close,\n+};\n+\n+static int\n+idpf_init_vport_req_info(struct rte_eth_dev *dev)\n+{\n+\tstruct virtchnl2_create_vport *vport_info;\n+\tuint16_t idx = adapter->next_vport_idx;\n+\n+\tif (!adapter->vport_req_info[idx]) {\n+\t\tadapter->vport_req_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\t    sizeof(struct virtchnl2_create_vport), 0);\n+\t\tif (!adapter->vport_req_info[idx]) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tvport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\n+\tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\n+\treturn 0;\n+}\n+\n+static uint16_t\n+idpf_get_next_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb,\n+\t\t\tuint16_t cur_vport_idx)\n+{\n+\tuint16_t vport_idx;\n+\tuint16_t i;\n+\n+\tif (cur_vport_idx < max_vport_nb && !vports[cur_vport_idx + 1]) {\n+\t\tvport_idx = cur_vport_idx + 1;\n+\t\treturn vport_idx;\n+\t}\n+\n+\tfor (i = 0; i < max_vport_nb; i++) {\n+\t\tif (!vports[i])\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == max_vport_nb)\n+\t\tvport_idx = IDPF_INVALID_VPORT_IDX;\n+\telse\n+\t\tvport_idx = i;\n+\n+\treturn vport_idx;\n+}\n+\n+#ifndef IDPF_RSS_KEY_LEN\n+#define IDPF_RSS_KEY_LEN 52\n+#endif\n+\n+static int\n+idpf_init_vport(struct rte_eth_dev *dev)\n+{\n+\tuint16_t idx = adapter->next_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tint i;\n+\n+\tvport->adapter = adapter;\n+\tvport->vport_id = vport_info->vport_id;\n+\tvport->txq_model = vport_info->txq_model;\n+\tvport->rxq_model = vport_info->rxq_model;\n+\tvport->num_tx_q = vport_info->num_tx_q;\n+\tvport->num_tx_complq = vport_info->num_tx_complq;\n+\tvport->num_rx_q = vport_info->num_rx_q;\n+\tvport->num_rx_bufq = vport_info->num_rx_bufq;\n+\tvport->max_mtu = vport_info->max_mtu;\n+\trte_memcpy(vport->default_mac_addr,\n+\t\t   vport_info->default_mac_addr, ETH_ALEN);\n+\tvport->rss_algorithm = vport_info->rss_algorithm;\n+\tvport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,\n+\t\t\t\t     vport_info->rss_key_size);\n+\tvport->rss_lut_size = vport_info->rss_lut_size;\n+\tvport->sw_idx = idx;\n+\n+\tfor (i = 0; i < vport_info->chunks.num_chunks; i++) {\n+\t\tif (vport_info->chunks.chunks[i].type ==\n+\t\t    VIRTCHNL2_QUEUE_TYPE_TX) {\n+\t\t\tvport->chunks_info.tx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_RX) {\n+\t\t\tvport->chunks_info.rx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) {\n+\t\t\tvport->chunks_info.tx_compl_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_compl_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_compl_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) {\n+\t\t\tvport->chunks_info.rx_buf_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_buf_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_buf_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t}\n+\t}\n+\n+\tadapter->vports[idx] = vport;\n+\tadapter->cur_vport_nb++;\n+\tadapter->next_vport_idx = idpf_get_next_vport_idx(adapter->vports,\n+\t\t\t\t\t\t  adapter->max_vport_nb, idx);\n+\tif (adapter->next_vport_idx == IDPF_INVALID_VPORT_IDX) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to get next vport id\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tint ret = 0;\n+\n+\tret = idpf_init_vport_req_info(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vport req_info.\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = idpf_create_vport(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to create vport.\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = idpf_init_vport(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vports.\");\n+\t\treturn ret;\n+\t}\n+\n+\trte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,\n+\t\t\t    &dev->data->mac_addrs[0]);\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_dev_start(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tvport->stopped = 0;\n+\n+\tif (idpf_ena_dis_vport(vport, true)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to enable vport\");\n+\t\tgoto err_vport;\n+\t}\n+\n+\treturn 0;\n+\n+err_vport:\n+\treturn -1;\n+}\n+\n+static int\n+idpf_dev_stop(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (vport->stopped == 1)\n+\t\treturn 0;\n+\n+\tif (idpf_ena_dis_vport(vport, false))\n+\t\tPMD_DRV_LOG(ERR, \"disable vport failed\");\n+\n+\tvport->stopped = 1;\n+\tdev->data->dev_started = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_close(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\tidpf_dev_stop(dev);\n+\tidpf_destroy_vport(vport);\n+\n+\treturn 0;\n+}\n+\n+static int idpf_parse_devargs(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_devargs *devargs = dev->device->devargs;\n+\tstruct rte_kvargs *kvlist;\n+\tint ret = 0;\n+\n+\tif (!devargs)\n+\t\treturn 0;\n+\n+\tkvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);\n+\tif (!kvlist) {\n+\t\tPMD_INIT_LOG(ERR, \"invalid kvargs key\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_kvargs_free(kvlist);\n+\treturn ret;\n+}\n+\n+static void\n+idpf_reset_pf(struct iecm_hw *hw)\n+{\n+\tuint32_t reg;\n+\n+\treg = IECM_READ_REG(hw, PFGEN_CTRL);\n+\tIECM_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));\n+}\n+\n+#define IDPF_RESET_WAIT_CNT 100\n+static int\n+idpf_check_pf_reset_done(struct iecm_hw *hw)\n+{\n+\tuint32_t reg;\n+\tint i;\n+\n+\tfor (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {\n+\t\treg = IECM_READ_REG(hw, PFGEN_RSTAT);\n+\t\tif (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))\n+\t\t\treturn 0;\n+\t\trte_delay_ms(1000);\n+\t}\n+\n+\tPMD_INIT_LOG(ERR, \"IDPF reset timeout\");\n+\treturn -EBUSY;\n+}\n+\n+#define CTLQ_NUM 2\n+static int\n+idpf_init_mbx(struct iecm_hw *hw)\n+{\n+\tstruct iecm_ctlq_create_info ctlq_info[CTLQ_NUM] = {\n+\t\t{\n+\t\t\t.type = IECM_CTLQ_TYPE_MAILBOX_TX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ATQH,\n+\t\t\t\t.tail = PF_FW_ATQT,\n+\t\t\t\t.len = PF_FW_ATQLEN,\n+\t\t\t\t.bah = PF_FW_ATQBAH,\n+\t\t\t\t.bal = PF_FW_ATQBAL,\n+\t\t\t\t.len_mask = PF_FW_ATQLEN_ATQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ATQH_ATQH_M,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = IECM_CTLQ_TYPE_MAILBOX_RX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ARQH,\n+\t\t\t\t.tail = PF_FW_ARQT,\n+\t\t\t\t.len = PF_FW_ARQLEN,\n+\t\t\t\t.bah = PF_FW_ARQBAH,\n+\t\t\t\t.bal = PF_FW_ARQBAL,\n+\t\t\t\t.len_mask = PF_FW_ARQLEN_ARQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ARQH_ARQH_M,\n+\t\t\t}\n+\t\t}\n+\t};\n+\tstruct iecm_ctlq_info *ctlq;\n+\tint ret = 0;\n+\n+\tret = iecm_ctlq_init(hw, CTLQ_NUM, ctlq_info);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tLIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,\n+\t\t\t\t struct iecm_ctlq_info, cq_list) {\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID && ctlq->cq_type == IECM_CTLQ_TYPE_MAILBOX_TX)\n+\t\t\thw->asq = ctlq;\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID && ctlq->cq_type == IECM_CTLQ_TYPE_MAILBOX_RX)\n+\t\t\thw->arq = ctlq;\n+\t}\n+\n+\tif (!hw->asq || !hw->arq) {\n+\t\tiecm_ctlq_deinit(hw);\n+\t\tret = -ENOENT;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_adapter_init(struct rte_eth_dev *dev)\n+{\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tstruct rte_pci_device *pci_dev = IDPF_DEV_TO_PCI(dev);\n+\tint ret = 0;\n+\n+\tif (adapter->initialized)\n+\t\treturn 0;\n+\n+\thw->hw_addr = (void *)pci_dev->mem_resource[0].addr;\n+\thw->hw_addr_len = pci_dev->mem_resource[0].len;\n+\thw->back = adapter;\n+\thw->vendor_id = pci_dev->id.vendor_id;\n+\thw->device_id = pci_dev->id.device_id;\n+\thw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;\n+\n+\tret = idpf_parse_devargs(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to parse devargs\");\n+\t\tgoto err;\n+\t}\n+\n+\tidpf_reset_pf(hw);\n+\tret = idpf_check_pf_reset_done(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"IDPF is still resetting\");\n+\t\tgoto err;\n+\t}\n+\n+\tret = idpf_init_mbx(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init mailbox\");\n+\t\tgoto err;\n+\t}\n+\n+\tadapter->mbx_resp = rte_zmalloc(\"idpf_adapter_mbx_resp\", IDPF_DFLT_MBX_BUF_SIZE, 0);\n+\tif (!adapter->mbx_resp) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_adapter_mbx_resp memory\");\n+\t\tgoto err_mbx;\n+\t}\n+\n+\tif (idpf_check_api_version(adapter)) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to check api version\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tadapter->caps = rte_zmalloc(\"idpf_caps\",\n+\t\t\t       sizeof(struct virtchnl2_get_capabilities), 0);\n+\tif (!adapter->caps) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_caps memory\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tif (idpf_get_caps(adapter)) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to get capabilities\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->max_vport_nb = adapter->caps->max_vports;\n+\n+\tadapter->vport_req_info = rte_zmalloc(\"vport_req_info\",\n+\t\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t\t      sizeof(*adapter->vport_req_info),\n+\t\t\t\t\t      0);\n+\tif (!adapter->vport_req_info) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info memory\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->vport_recv_info = rte_zmalloc(\"vport_recv_info\",\n+\t\t\t\t\t       adapter->max_vport_nb *\n+\t\t\t\t\t       sizeof(*adapter->vport_recv_info),\n+\t\t\t\t\t       0);\n+\tif (!adapter->vport_recv_info) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_recv_info memory\");\n+\t\tgoto err_vport_recv_info;\n+\t}\n+\n+\tadapter->vports = rte_zmalloc(\"vports\",\n+\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t      sizeof(*adapter->vports),\n+\t\t\t\t      0);\n+\tif (!adapter->vports) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vports memory\");\n+\t\tgoto err_vports;\n+\t}\n+\n+\tadapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t       sizeof(struct virtchnl2_config_rx_queues)) /\n+\t\t\t       sizeof(struct virtchnl2_rxq_info);\n+\tadapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t       sizeof(struct virtchnl2_config_tx_queues)) /\n+\t\t\t       sizeof(struct virtchnl2_txq_info);\n+\n+\tadapter->cur_vport_nb = 0;\n+\tadapter->next_vport_idx = 0;\n+\tadapter->initialized = true;\n+\n+\treturn ret;\n+\n+err_vports:\n+\trte_free(adapter->vports);\n+\tadapter->vports = NULL;\n+err_vport_recv_info:\n+\trte_free(adapter->vport_req_info);\n+\tadapter->vport_req_info = NULL;\n+err_caps:\n+\trte_free(adapter->caps);\n+\tadapter->caps = NULL;\n+err_api:\n+\trte_free(adapter->mbx_resp);\n+\tadapter->mbx_resp = NULL;\n+err_mbx:\n+\tiecm_ctlq_deinit(hw);\n+err:\n+\treturn -1;\n+}\n+\n+\n+static int\n+idpf_dev_init(struct rte_eth_dev *dev, __rte_unused void *init_params)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tint ret = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tdev->dev_ops = &idpf_eth_dev_ops;\n+\n+\t/* for secondary processes, we don't initialise any further as primary\n+\t * has already done this work.\n+\t */\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn ret;\n+\n+\tret = idpf_adapter_init(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init adapter.\");\n+\t\treturn ret;\n+\t}\n+\n+\tdev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;\n+\n+\tvport->dev_data = dev->data;\n+\n+\tdev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);\n+\tif (dev->data->mac_addrs == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate mac_addr memory.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+err:\n+\treturn ret;\n+}\n+\n+static int\n+idpf_dev_uninit(struct rte_eth_dev *dev)\n+{\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn -EPERM;\n+\n+\tidpf_dev_close(dev);\n+\n+\treturn 0;\n+}\n+\n+static const struct rte_pci_id pci_id_idpf_map[] = {\n+\t{ RTE_PCI_DEVICE(IECM_INTEL_VENDOR_ID, IECM_DEV_ID_PF) },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+static int\n+idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t      struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };\n+\tchar name[RTE_ETH_NAME_MAX_LEN];\n+\tint i, retval;\n+\n+\tif (pci_dev->device.devargs) {\n+\t\tretval = rte_eth_devargs_parse(pci_dev->device.devargs->args,\n+\t\t\t\t&eth_da);\n+\t\tif (retval)\n+\t\t\treturn retval;\n+\t}\n+\n+\tif (!eth_da.nb_representor_ports) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to probe, need to add representor devargs.\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (!adapter) {\n+\t\tadapter = (struct idpf_adapter *)rte_zmalloc(\"idpf_adapter\",\n+\t\t\t\t\t     sizeof(struct idpf_adapter), 0);\n+\t\tif (!adapter) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate adapter.\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < eth_da.nb_representor_ports; i++) {\n+\t\tsnprintf(name, sizeof(name), \"idpf_vport_%d\",\n+\t\t\t eth_da.representor_ports[i]);\n+\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n+\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t    NULL, NULL, idpf_dev_init,\n+\t\t\t\t\t    NULL);\n+\t\tif (retval)\n+\t\t\tPMD_DRV_LOG(ERR, \"failed to creat vport %d\", i);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+idpf_adapter_rel(struct idpf_adapter *adapter)\n+{\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tint i;\n+\n+\tiecm_ctlq_deinit(hw);\n+\n+\tif (adapter->caps) {\n+\t\trte_free(adapter->caps);\n+\t\tadapter->caps = NULL;\n+\t}\n+\n+\tif (adapter->mbx_resp) {\n+\t\trte_free(adapter->mbx_resp);\n+\t\tadapter->mbx_resp = NULL;\n+\t}\n+\n+\tif (adapter->vport_req_info) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\tif (adapter->vport_req_info[i]) {\n+\t\t\t\trte_free(adapter->vport_req_info[i]);\n+\t\t\t\tadapter->vport_req_info[i] = NULL;\n+\t\t\t}\n+\t\t}\n+\t\trte_free(adapter->vport_req_info);\n+\t\tadapter->vport_req_info = NULL;\n+\t}\n+\n+\tif (adapter->vport_recv_info) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\tif (adapter->vport_recv_info[i]) {\n+\t\t\t\trte_free(adapter->vport_recv_info[i]);\n+\t\t\t\tadapter->vport_recv_info[i] = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (adapter->vports) {\n+\t\t/* Needn't free adapter->vports[i] since it's private data */\n+\t\trte_free(adapter->vports);\n+\t\tadapter->vports = NULL;\n+\t}\n+}\n+\n+static int\n+idpf_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tif (adapter) {\n+\t\tidpf_adapter_rel(adapter);\n+\t\trte_free(adapter);\n+\t\tadapter = NULL;\n+\t}\n+\n+\treturn rte_eth_dev_pci_generic_remove(pci_dev, idpf_dev_uninit);\n+}\n+\n+static struct rte_pci_driver rte_idpf_pmd = {\n+\t.id_table\t= pci_id_idpf_map,\n+\t.drv_flags\t= RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |\n+\t\t\t  RTE_PCI_DRV_PROBE_AGAIN,\n+\t.probe\t\t= idpf_pci_probe,\n+\t.remove\t\t= idpf_pci_remove,\n+};\n+\n+/**\n+ * Driver initialization routine.\n+ * Invoked once at EAL init time.\n+ * Register itself as the [Poll Mode] Driver of PCI devices.\n+ */\n+RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_ice, \"* igb_uio | uio_pci_generic | vfio-pci\");\n+\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nnew file mode 100644\nindex 0000000000..501f772fa8\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -0,0 +1,205 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_ETHDEV_H_\n+#define _IDPF_ETHDEV_H_\n+\n+#include <stdint.h>\n+#include <rte_mbuf.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n+#include <rte_spinlock.h>\n+#include <rte_ethdev.h>\n+#include <rte_kvargs.h>\n+#include <ethdev_driver.h>\n+\n+#include \"base/iecm_osdep.h\"\n+#include \"base/iecm_type.h\"\n+#include \"base/iecm_devids.h\"\n+#include \"base/iecm_lan_txrx.h\"\n+#include \"base/iecm_lan_pf_regs.h\"\n+#include \"base/virtchnl.h\"\n+#include \"base/virtchnl2.h\"\n+\n+#define IDPF_INVALID_VPORT_IDX\t0xffff\n+#define IDPF_TXQ_PER_GRP\t1\n+#define IDPF_TX_COMPLQ_PER_GRP\t1\n+#define IDPF_RXQ_PER_GRP\t1\n+#define IDPF_RX_BUFQ_PER_GRP\t2\n+\n+#define IDPF_CTLQ_ID\t\t-1\n+#define IDPF_CTLQ_LEN\t\t64\n+#define IDPF_DFLT_MBX_BUF_SIZE\t4096\n+\n+#define IDPF_DFLT_Q_VEC_NUM\t1\n+#define IDPF_DFLT_INTERVAL\t16\n+\n+#define IDPF_MAX_NUM_QUEUES\t256\n+#define IDPF_MIN_BUF_SIZE\t1024\n+#define IDPF_MAX_FRAME_SIZE\t9728\n+\n+#define IDPF_NUM_MACADDR_MAX\t64\n+\n+#define IDPF_MAX_PKT_TYPE\t1024\n+\n+#define IDPF_VLAN_TAG_SIZE\t4\n+#define IDPF_ETH_OVERHEAD \\\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)\n+\n+#ifndef ETH_ADDR_LEN\n+#define ETH_ADDR_LEN\t\t6\n+#endif\n+\n+/* Message type read in virtual channel from PF */\n+enum idpf_vc_result {\n+\tIDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */\n+\tIDPF_MSG_NON,      /* Read nothing from admin queue */\n+\tIDPF_MSG_SYS,      /* Read system msg from admin queue */\n+\tIDPF_MSG_CMD,      /* Read async command result */\n+};\n+\n+struct idpf_chunks_info {\n+\tuint32_t tx_start_qid;\n+\tuint32_t rx_start_qid;\n+\t/* Valid only if split queue model */\n+\tuint32_t tx_compl_start_qid;\n+\tuint32_t rx_buf_start_qid;\n+\n+\tuint64_t tx_qtail_start;\n+\tuint32_t tx_qtail_spacing;\n+\tuint64_t rx_qtail_start;\n+\tuint32_t rx_qtail_spacing;\n+\tuint64_t tx_compl_qtail_start;\n+\tuint32_t tx_compl_qtail_spacing;\n+\tuint64_t rx_buf_qtail_start;\n+\tuint32_t rx_buf_qtail_spacing;\n+};\n+\n+struct idpf_vport {\n+\tstruct idpf_adapter *adapter; /* Backreference to associated adapter */\n+\tuint16_t vport_id;\n+\tuint32_t txq_model;\n+\tuint32_t rxq_model;\n+\tuint16_t num_tx_q;\n+\t/* valid only if txq_model is split Q */\n+\tuint16_t num_tx_complq;\n+\tuint16_t num_rx_q;\n+\t/* valid only if rxq_model is split Q */\n+\tuint16_t num_rx_bufq;\n+\n+\tuint16_t max_mtu;\n+\tuint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\n+\tenum virtchnl_rss_algorithm rss_algorithm;\n+\tuint16_t rss_key_size;\n+\tuint16_t rss_lut_size;\n+\n+\tuint16_t sw_idx; /* SW idx */\n+\n+\tstruct rte_eth_dev_data *dev_data; /* Pointer to the device data */\n+\tuint16_t max_pkt_len; /* Maximum packet length */\n+\n+\t/* RSS info */\n+\tuint32_t *rss_lut;\n+\tuint8_t *rss_key;\n+\tuint64_t rss_hf;\n+\n+\t/* Chunk info */\n+\tstruct idpf_chunks_info chunks_info;\n+\n+\t/* Event from ipf */\n+\tbool link_up;\n+\tuint32_t link_speed;\n+\n+\tbool stopped;\n+\tstruct virtchnl2_vport_stats eth_stats_offset;\n+};\n+\n+struct idpf_adapter {\n+\tstruct iecm_hw hw;\n+\n+\tstruct virtchnl_version_info virtchnl_version;\n+\tstruct virtchnl2_get_capabilities *caps;\n+\n+\tvolatile enum virtchnl_ops pend_cmd; /* pending command not finished */\n+\tuint32_t cmd_retval; /* return value of the cmd response from ipf */\n+\tuint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */\n+\n+\tuint32_t txq_model;\n+\tuint32_t rxq_model;\n+\n+\t/* Vport info */\n+\tuint8_t **vport_req_info;\n+\tuint8_t **vport_recv_info;\n+\tstruct idpf_vport **vports;\n+\tuint16_t max_vport_nb;\n+\tuint16_t cur_vport_nb;\n+\tuint16_t next_vport_idx;\n+\n+\t/* Max config queue number per VC message */\n+\tuint32_t max_rxq_per_msg;\n+\tuint32_t max_txq_per_msg;\n+\n+\tuint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;\n+\n+\tbool initialized;\n+\tbool stopped;\n+};\n+\n+extern struct idpf_adapter *adapter;\n+\n+#define IDPF_DEV_TO_PCI(eth_dev)\t\t\\\n+\tRTE_DEV_TO_PCI((eth_dev)->device)\n+\n+/* structure used for sending and checking response of virtchnl ops */\n+struct idpf_cmd_info {\n+\tuint32_t ops;\n+\tuint8_t *in_args;       /* buffer for sending */\n+\tuint32_t in_args_size;  /* buffer size for sending */\n+\tuint8_t *out_buffer;    /* buffer for response */\n+\tuint32_t out_size;      /* buffer size for response */\n+};\n+\n+/* notify current command done. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+_notify_cmd(struct idpf_adapter *adapter, int msg_ret)\n+{\n+\tadapter->cmd_retval = msg_ret;\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+}\n+\n+/* clear current command. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+_clear_cmd(struct idpf_adapter *adapter)\n+{\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+\tadapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS;\n+}\n+\n+/* Check there is pending cmd in execution. If none, set new command. */\n+static inline int\n+_atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)\n+{\n+\tint ret = rte_atomic32_cmpset(&adapter->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);\n+\n+\tif (!ret)\n+\t\tPMD_DRV_LOG(ERR, \"There is incomplete cmd %d\", adapter->pend_cmd);\n+\n+\treturn !ret;\n+}\n+\n+void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);\n+int idpf_check_api_version(struct idpf_adapter *adapter);\n+int idpf_get_caps(struct idpf_adapter *adapter);\n+int idpf_create_vport(__rte_unused struct rte_eth_dev *dev);\n+int idpf_destroy_vport(struct idpf_vport *vport);\n+int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);\n+\n+#endif /* _IDPF_ETHDEV_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nnew file mode 100644\nindex 0000000000..4fc15d5b71\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -0,0 +1,476 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+\n+#include <rte_debug.h>\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+#include <rte_dev.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#include \"base/iecm_prototype.h\"\n+\n+#define IDPF_CTLQ_LEN\t64\n+\n+static int\n+idpf_vc_clean(struct idpf_adapter *adapter)\n+{\n+\tstruct iecm_ctlq_msg *q_msg[IDPF_CTLQ_LEN];\n+\tuint16_t num_q_msg = IDPF_CTLQ_LEN;\n+\tstruct iecm_dma_mem *dma_mem;\n+\tint err = 0;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < 10; i++) {\n+\t\terr = iecm_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);\n+\t\tmsleep(20);\n+\t\tif (num_q_msg)\n+\t\t\tbreak;\n+\t}\n+\tif (err)\n+\t\tgoto error;\n+\n+\t/* Empty queue is not an error */\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tdma_mem = q_msg[i]->ctx.indirect.payload;\n+\t\tif (dma_mem) {\n+\t\t\tiecm_free_dma_mem(&adapter->hw, dma_mem);\n+\t\t\trte_free(dma_mem);\n+\t\t}\n+\t\trte_free(q_msg[i]);\n+\t}\n+\n+error:\n+\treturn err;\n+}\n+\n+static int\n+idpf_send_vc_msg(struct idpf_adapter *adapter, enum virtchnl_ops op,\n+\t\t uint16_t msg_size, uint8_t *msg)\n+{\n+\tstruct iecm_ctlq_msg *ctlq_msg;\n+\tstruct iecm_dma_mem *dma_mem;\n+\tint err = 0;\n+\n+\terr = idpf_vc_clean(adapter);\n+\tif (err)\n+\t\tgoto err;\n+\n+\tctlq_msg = (struct iecm_ctlq_msg *)rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct iecm_ctlq_msg), 0);\n+\tif (!ctlq_msg) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tdma_mem = (struct iecm_dma_mem *)rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct iecm_dma_mem), 0);\n+\tif (!dma_mem) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_mem_error;\n+\t}\n+\n+\tdma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;\n+\tiecm_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);\n+\tif (!dma_mem->va) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_alloc_error;\n+\t}\n+\n+\tmemcpy(dma_mem->va, msg, msg_size);\n+\n+\tctlq_msg->opcode = iecm_mbq_opc_send_msg_to_pf;\n+\tctlq_msg->func_id = 0;\n+\tctlq_msg->data_len = msg_size;\n+\tctlq_msg->cookie.mbx.chnl_opcode = op;\n+\tctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;\n+\tctlq_msg->ctx.indirect.payload = dma_mem;\n+\n+\terr = iecm_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);\n+\tif (err)\n+\t\tgoto send_error;\n+\n+\treturn err;\n+\n+send_error:\n+\tiecm_free_dma_mem(&adapter->hw, dma_mem);\n+dma_alloc_error:\n+\trte_free(dma_mem);\n+dma_mem_error:\n+\trte_free(ctlq_msg);\n+err:\n+\treturn err;\n+}\n+\n+static enum idpf_vc_result\n+idpf_read_msg_from_ipf(struct idpf_adapter *adapter, uint16_t buf_len,\n+\t\t       uint8_t *buf)\n+{\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tstruct iecm_ctlq_msg ctlq_msg;\n+\tstruct iecm_dma_mem *dma_mem = NULL;\n+\tenum idpf_vc_result result = IDPF_MSG_NON;\n+\tenum virtchnl_ops opcode;\n+\tuint16_t pending = 1;\n+\tint ret;\n+\n+\tret = iecm_ctlq_recv(hw->arq, &pending, &ctlq_msg);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Can't read msg from AQ\");\n+\t\tif (ret != IECM_ERR_CTLQ_NO_WORK)\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\treturn result;\n+\t}\n+\n+\trte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);\n+\n+\topcode = (enum virtchnl_ops)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);\n+\tadapter->cmd_retval =\n+\t\t(enum virtchnl_status_code)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);\n+\n+\tPMD_DRV_LOG(DEBUG, \"CQ from ipf carries opcode %u, retval %d\",\n+\t\t    opcode, adapter->cmd_retval);\n+\n+\tif (opcode == VIRTCHNL2_OP_EVENT) {\n+\t\tstruct virtchnl2_event *ve =\n+\t\t\t(struct virtchnl2_event *)ctlq_msg.ctx.indirect.payload->va;\n+\n+\t\tresult = IDPF_MSG_SYS;\n+\t\tswitch (ve->event) {\n+\t\tcase VIRTCHNL2_EVENT_LINK_CHANGE:\n+\t\t\t/* TBD */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_DRV_LOG(ERR, \"%s: Unknown event %d from ipf\",\n+\t\t\t\t    __func__, ve->event);\n+\t\t\tbreak;\n+\t\t}\n+\t} else {\n+\t\t/* async reply msg on command issued by pf previously */\n+\t\tresult = IDPF_MSG_CMD;\n+\t\tif (opcode != adapter->pend_cmd) {\n+\t\t\tPMD_DRV_LOG(WARNING, \"command mismatch, expect %u, get %u\",\n+\t\t\t\t    adapter->pend_cmd, opcode);\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\t}\n+\t}\n+\n+\tif (ctlq_msg.data_len)\n+\t\tdma_mem = ctlq_msg.ctx.indirect.payload;\n+\telse\n+\t\tpending = 0;\n+\n+\tret = iecm_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);\n+\tif (ret && dma_mem)\n+\t\tiecm_free_dma_mem(hw, dma_mem);\n+\n+\treturn result;\n+}\n+\n+#define MAX_TRY_TIMES 200\n+#define ASQ_DELAY_MS  10\n+\n+static int\n+idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)\n+{\n+\tenum idpf_vc_result result;\n+\tint err = 0;\n+\tint i = 0;\n+\tint ret;\n+\n+\tif (_atomic_set_cmd(adapter, args->ops))\n+\t\treturn -1;\n+\n+\tret = idpf_send_vc_msg(adapter, args->ops,\n+\t\t\t       args->in_args_size,\n+\t\t\t       args->in_args);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to send cmd %d\", args->ops);\n+\t\t_clear_cmd(adapter);\n+\t\treturn ret;\n+\t}\n+\n+\tswitch (args->ops) {\n+\tcase VIRTCHNL_OP_VERSION:\n+\tcase VIRTCHNL2_OP_GET_CAPS:\n+\tcase VIRTCHNL2_OP_CREATE_VPORT:\n+\tcase VIRTCHNL2_OP_DESTROY_VPORT:\n+\tcase VIRTCHNL2_OP_SET_RSS_KEY:\n+\tcase VIRTCHNL2_OP_SET_RSS_LUT:\n+\tcase VIRTCHNL2_OP_SET_RSS_HASH:\n+\tcase VIRTCHNL2_OP_CONFIG_RX_QUEUES:\n+\tcase VIRTCHNL2_OP_CONFIG_TX_QUEUES:\n+\tcase VIRTCHNL2_OP_ENABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_DISABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_ENABLE_VPORT:\n+\tcase VIRTCHNL2_OP_DISABLE_VPORT:\n+\t\t/* for init virtchnl ops, need to poll the response */\n+\t\tdo {\n+\t\t\tresult = idpf_read_msg_from_ipf(adapter,\n+\t\t\t\t\t\t\targs->out_size,\n+\t\t\t\t\t\t\targs->out_buffer);\n+\t\t\tif (result == IDPF_MSG_CMD)\n+\t\t\t\tbreak;\n+\t\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t\t} while (i++ < MAX_TRY_TIMES);\n+\t\tif (i >= MAX_TRY_TIMES ||\n+\t\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\t\terr = -1;\n+\t\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t\t    adapter->cmd_retval, args->ops);\n+\t\t}\n+\t\t_clear_cmd(adapter);\n+\t\tbreak;\n+\tdefault:\n+\t\t/* For other virtchnl ops in running time,\n+\t\t * wait for the cmd done flag.\n+\t\t */\n+\t\tdo {\n+\t\t\tif (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)\n+\t\t\t\tbreak;\n+\t\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t\t\t/* If don't read msg or read sys event, continue */\n+\t\t} while (i++ < MAX_TRY_TIMES);\n+\t\t/* If there's no response is received, clear command */\n+\t\tif (i >= MAX_TRY_TIMES  ||\n+\t\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\t\terr = -1;\n+\t\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t\t    adapter->cmd_retval, args->ops);\n+\t\t\t_clear_cmd(adapter);\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_check_api_version(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl_version_info version;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tmemset(&version, 0, sizeof(struct virtchnl_version_info));\n+\tversion.major = VIRTCHNL_VERSION_MAJOR_2;\n+\tversion.minor = VIRTCHNL_VERSION_MINOR_0;\n+\n+\targs.ops = VIRTCHNL_OP_VERSION;\n+\targs.in_args = (uint8_t *)&version;\n+\targs.in_args_size = sizeof(version);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL_OP_VERSION\");\n+\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_get_caps(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_get_capabilities caps_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\t memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));\n+\t caps_msg.csum_caps =\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_GENERIC\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_GENERIC;\n+\n+\t caps_msg.seg_caps =\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_GENERIC;\n+\n+\t caps_msg.rss_caps =\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;\n+\n+\t caps_msg.hsplit_caps =\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L2\t\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L3\t\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6;\n+\n+\t caps_msg.rsc_caps =\n+\t\t VIRTCHNL2_CAP_RSC_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV6_SCTP;\n+\n+\t caps_msg.other_caps =\n+\t\t VIRTCHNL2_CAP_RDMA\t\t\t|\n+\t\t VIRTCHNL2_CAP_SRIOV\t\t\t|\n+\t\t VIRTCHNL2_CAP_MACFILTER\t\t|\n+\t\t VIRTCHNL2_CAP_FLOW_DIRECTOR\t\t|\n+\t\t VIRTCHNL2_CAP_SPLITQ_QSCHED\t\t|\n+\t\t VIRTCHNL2_CAP_CRC\t\t\t|\n+\t\t VIRTCHNL2_CAP_WB_ON_ITR\t\t|\n+\t\t VIRTCHNL2_CAP_PROMISC\t\t\t|\n+\t\t VIRTCHNL2_CAP_LINK_SPEED\t\t|\n+\t\t VIRTCHNL2_CAP_VLAN;\n+\n+\targs.ops = VIRTCHNL2_OP_GET_CAPS;\n+\targs.in_args = (uint8_t *)&caps_msg;\n+\targs.in_args_size = sizeof(caps_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_GET_CAPS\");\n+\t\treturn err;\n+\t}\n+\n+\trte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_create_vport(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tuint16_t idx = adapter->next_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_req_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\tstruct virtchnl2_create_vport vport_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err = -1;\n+\n+\tmemset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));\n+\tvport_msg.vport_type = vport_req_info->vport_type;\n+\tvport_msg.txq_model = vport_req_info->txq_model;\n+\tvport_msg.rxq_model = vport_req_info->rxq_model;\n+\tvport_msg.num_tx_q = vport_req_info->num_tx_q;\n+\tvport_msg.num_tx_complq = vport_req_info->num_tx_complq;\n+\tvport_msg.num_rx_q = vport_req_info->num_rx_q;\n+\tvport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CREATE_VPORT;\n+\targs.in_args = (uint8_t *)&vport_msg;\n+\targs.in_args_size = sizeof(vport_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\tif (!adapter->vport_recv_info[idx]) {\n+\t\tadapter->vport_recv_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\t\t\t    IDPF_DFLT_MBX_BUF_SIZE, 0);\n+\t\tif (!adapter->vport_recv_info[idx]) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to alloc vport_recv_info.\");\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\trte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,\n+\t\t   IDPF_DFLT_MBX_BUF_SIZE);\n+\treturn err;\n+}\n+\n+int\n+idpf_destroy_vport(struct idpf_vport *vport)\n+{\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_DESTROY_VPORT;\n+\targs.in_args = (uint8_t *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_ena_dis_vport(struct idpf_vport *vport, bool enable)\n+{\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\targs.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :\n+\t\t\t    VIRTCHNL2_OP_DISABLE_VPORT;\n+\targs.in_args = (u8 *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_VPORT\",\n+\t\t\t    enable ? \"ENABLE\" : \"DISABLE\");\n+\t}\n+\n+\treturn err;\n+}\n+\ndiff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build\nnew file mode 100644\nindex 0000000000..3a84162f93\n--- /dev/null\n+++ b/drivers/net/idpf/meson.build\n@@ -0,0 +1,18 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2022 Intel Corporation\n+\n+if is_windows\n+\tbuild = false\n+\treason = 'not supported on Windows'\n+\tsubdir_done()\n+endif\n+\n+subdir('base')\n+objs = [base_objs]\n+\n+sources = files(\n+\t'idpf_ethdev.c',\n+\t'idpf_vchnl.c',\n+)\n+\n+includes += include_directories('base')\ndiff --git a/drivers/net/idpf/version.map b/drivers/net/idpf/version.map\nnew file mode 100644\nindex 0000000000..b7da224860\n--- /dev/null\n+++ b/drivers/net/idpf/version.map\n@@ -0,0 +1,3 @@\n+DPDK_22 {\n+\tlocal: *;\n+};\n\\ No newline at end of file\ndiff --git a/drivers/net/meson.build b/drivers/net/meson.build\nindex e35652fe63..8910154544 100644\n--- a/drivers/net/meson.build\n+++ b/drivers/net/meson.build\n@@ -28,6 +28,7 @@ drivers = [\n         'i40e',\n         'iavf',\n         'ice',\n+        'idpf',\n         'igc',\n         'ionic',\n         'ipn3ke',\n",
    "prefixes": [
        "03/13"
    ]
}