get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95465/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95465,
    "url": "http://patches.dpdk.org/api/patches/95465/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210706164418.32615-3-vsrivast@xilinx.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210706164418.32615-3-vsrivast@xilinx.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210706164418.32615-3-vsrivast@xilinx.com",
    "date": "2021-07-06T16:44:10",
    "name": "[02/10] vdpa/sfc: add support for device initialization",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "61a9677c0330542481e9c926378567c80cbe8d78",
    "submitter": {
        "id": 2281,
        "url": "http://patches.dpdk.org/api/people/2281/?format=api",
        "name": "Vijay Srivastava",
        "email": "vijay.srivastava@xilinx.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210706164418.32615-3-vsrivast@xilinx.com/mbox/",
    "series": [
        {
            "id": 17687,
            "url": "http://patches.dpdk.org/api/series/17687/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=17687",
            "date": "2021-07-06T16:44:08",
            "name": "vdpa/sfc: introduce Xilinx vDPA driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/17687/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/95465/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/95465/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 60BA2A0C4A;\n\tWed,  7 Jul 2021 10:25:51 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id F3EC24148B;\n\tWed,  7 Jul 2021 10:25:33 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2080.outbound.protection.outlook.com [40.107.93.80])\n by mails.dpdk.org (Postfix) with ESMTP id 829A74120E\n for <dev@dpdk.org>; Tue,  6 Jul 2021 18:49:22 +0200 (CEST)",
            "from BN1PR13CA0007.namprd13.prod.outlook.com (2603:10b6:408:e2::12)\n by CH2PR02MB6038.namprd02.prod.outlook.com (2603:10b6:610:12::24)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.27; Tue, 6 Jul\n 2021 16:49:18 +0000",
            "from BN1NAM02FT022.eop-nam02.prod.protection.outlook.com\n (2603:10b6:408:e2:cafe::a1) by BN1PR13CA0007.outlook.office365.com\n (2603:10b6:408:e2::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.8 via Frontend\n Transport; Tue, 6 Jul 2021 16:49:18 +0000",
            "from xsj-pvapexch01.xlnx.xilinx.com (149.199.62.198) by\n BN1NAM02FT022.mail.protection.outlook.com (10.13.2.136) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.20.4287.22 via Frontend Transport; Tue, 6 Jul 2021 16:49:18 +0000",
            "from xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) by\n xsj-pvapexch01.xlnx.xilinx.com (172.19.86.40) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.1.2176.2; Tue, 6 Jul 2021 09:49:17 -0700",
            "from smtp.xilinx.com (172.19.127.96) by\n xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) with Microsoft SMTP Server id\n 15.1.2176.2 via Frontend Transport; Tue, 6 Jul 2021 09:49:17 -0700",
            "from [10.177.4.108] (port=54950 helo=xndengvm004108.xilinx.com)\n by smtp.xilinx.com with esmtp (Exim 4.90)\n (envelope-from <vijay.srivastava@xilinx.com>)\n id 1m0oFz-0000pF-LP; Tue, 06 Jul 2021 09:49:12 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=X4JEqOuEtUnVA+1o0sB4voK8S2hQw0cP74kIZcneg3nQ50ANV3h055o0NbAcWO/aHALwc7SYveVf8MlB3gG9+GgI4TLXdPmfQC7hPaKcTBk+slIC0JnFuWUV6ytPZxcti/jHMQiYEnx5s1s3VTx1uRdMneowZ6c7iaEyKGbaC0mCDzW1oHx70y3iifcyk5H7E4mRZ0R/i7V3mLcyNcF31KO1ubiY+6+hd0aGC/G1bYebJ5ClSj+GQlTxLDscUv8vzKs0f9NX0RLEPScb/Z60ZQY33AkgLWWntLBsfBeX74e6m78cpoNCGdLjIlmCYCwN+hzko7SuIvgyIqYR/Qlo5w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=9G/kXqnPyP/+93HG60mebkZmzw9w+UGJF13I1QOXSP0=;\n b=ji5TK9wMRWwVI8faX7inuA4FAxcgBY6exhSl5gr1YKkSRbjPUzI2dFLev+/dX8ndx8N8/v/N23X+lJMV7RtZ5v30YXuS5PYcqk8vfA+BGxGtFR6p1VHhWU7esb7BoQ+OeyscB3lufZODNU3RSjQL2xAPOICs6SRAc7iad3/wvjjli6PrzZp+b4wioGnfM41kWAOiyT+Oy5Kv6vz6nKHii7K0KJkEx/CNCj4jCLnaKBs5O/rTUxIeVibWDhrciPH3H6RjAorqHgRJ15XBZrkBRQRIG8Z+ubrtEzecPBT2/VxDdRUU1DNguTLHT3AiIMdGiLnC92YldkG2/LFF8Hlv0Q==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 149.199.62.198) smtp.rcpttodomain=dpdk.org smtp.mailfrom=xilinx.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=xilinx.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=xilinx.onmicrosoft.com; s=selector2-xilinx-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=9G/kXqnPyP/+93HG60mebkZmzw9w+UGJF13I1QOXSP0=;\n b=BmGQKVd8UFfu8kuvnY74FWkK4BDhwuO9xwlHqlykspDvju18I6/yUHcfaeWsgFsosI0t67jmvgOE2WNvgYqgIbyaQXiXt6pa4fwoUfqrWlp1QvtzBtN7LJE6cnhCWgAHcl6iM2OgcYTcQ8QikgIK3YGPrYh+wFIK3QoV2Yw0400=",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 149.199.62.198)\n smtp.mailfrom=xilinx.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=xilinx.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of xilinx.com designates\n 149.199.62.198 as permitted sender) receiver=protection.outlook.com;\n client-ip=149.199.62.198; helo=xsj-pvapexch01.xlnx.xilinx.com;",
        "Envelope-to": "dev@dpdk.org, maxime.coquelin@redhat.com, chenbo.xia@intel.com,\n andrew.rybchenko@oktetlabs.ru",
        "From": "Vijay Srivastava <vijay.srivastava@xilinx.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<maxime.coquelin@redhat.com>, <chenbo.xia@intel.com>,\n <andrew.rybchenko@oktetlabs.ru>, Vijay Kumar Srivastava <vsrivast@xilinx.com>",
        "Date": "Tue, 6 Jul 2021 22:14:10 +0530",
        "Message-ID": "<20210706164418.32615-3-vsrivast@xilinx.com>",
        "X-Mailer": "git-send-email 2.25.0",
        "In-Reply-To": "<20210706164418.32615-1-vsrivast@xilinx.com>",
        "References": "<20210706164418.32615-1-vsrivast@xilinx.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e84b5f69-c158-4440-f5cf-08d9409dff3e",
        "X-MS-TrafficTypeDiagnostic": "CH2PR02MB6038:",
        "X-Microsoft-Antispam-PRVS": "\n <CH2PR02MB60381F79B9A738DA5A348818B91B9@CH2PR02MB6038.namprd02.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:1227;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n jHmyQOk29bTdx4hWC15rR599n0vJBd9aBV6tPZ2Z/c2ud8DHO0jnNDAC0Io6zK09KB02VfNamkgR+0UmppnFTbnOOgp8QSUDMYvCCmT9dm6FppVY9X5b7fUn0BZQNn165kLgU0FWtMr+6lb/Pf7vhx0rUcwR4gszmAMZ/XH0ii4wdB+DPM1RvTuPLQ7WQm3LqII/nLQsmN2LoQT5LOlV7xV4Pqk08B0FdUz4Tbf+mcmxHK4P3CQjrSr1agPXekcveEGjJFzZ4OOQ49rfz1HO+5M+FSiSBBGvcyTf12Ze//bnxG+BTk4WzJyycI4Uyo1El7hN2nW0YmP5buUBQMDcwJ1WYay97MasiMEhCOVwfAh5Poia1K90elP+qWPdYLZdOUXKBwQ7WBi270di/SiA5lPQLCeAdSfplXg5wSyxrXL1IfBowppNZsv9Yj17KUqC+9wa8XpIg5K6c41LATq76Un3gZYeVj94sDM7HqQoDP/QrVpEebT6IvrrwWNGowoPoP1HRjFXGnnH+Ft1GZHGfhDcofSRB7qGnN/e1A/bFRjbG6qUyhg+j7jgCfjwgD7ZGD17FaAdPCfUEuDmVtWMmQC9lnFT6lrJBARBtuI7xyc0QVUVXGE0Afhz3fiZnQcwXP9WHYX9tP4hvIxwFCQbUiRvY0/Z+vV2hz9CTu3bvCMm3sKnkEV04oVA1MERTrYV4bKVfiCCITc8/qSGWvtYQzRGWVZpPG1hdaKwK8KOnu8=",
        "X-Forefront-Antispam-Report": "CIP:149.199.62.198; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:xsj-pvapexch01.xlnx.xilinx.com;\n PTR:unknown-62-198.xilinx.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(82310400003)(426003)(26005)(4326008)(54906003)(186003)(2616005)(36860700001)(83380400001)(30864003)(8936002)(356005)(8676002)(2906002)(36756003)(1076003)(47076005)(44832011)(70586007)(7636003)(36906005)(107886003)(7696005)(6916009)(70206006)(336012)(5660300002)(9786002)(498600001)(102446001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "xilinx.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jul 2021 16:49:18.4515 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e84b5f69-c158-4440-f5cf-08d9409dff3e",
        "X-MS-Exchange-CrossTenant-Id": "657af505-d5df-48d0-8300-c31994686c5c",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=657af505-d5df-48d0-8300-c31994686c5c; Ip=[149.199.62.198];\n Helo=[xsj-pvapexch01.xlnx.xilinx.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN1NAM02FT022.eop-nam02.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CH2PR02MB6038",
        "X-Mailman-Approved-At": "Wed, 07 Jul 2021 10:25:28 +0200",
        "Subject": "[dpdk-dev] [PATCH 02/10] vdpa/sfc: add support for device\n initialization",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Vijay Kumar Srivastava <vsrivast@xilinx.com>\n\nAdd HW initialization and vDPA device registration support.\n\nSigned-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>\n---\n doc/guides/vdpadevs/sfc.rst       |   6 +\n drivers/vdpa/sfc/meson.build      |   3 +\n drivers/vdpa/sfc/sfc_vdpa.c       |  23 +++\n drivers/vdpa/sfc/sfc_vdpa.h       |  49 +++++-\n drivers/vdpa/sfc/sfc_vdpa_debug.h |  21 +++\n drivers/vdpa/sfc/sfc_vdpa_hw.c    | 322 ++++++++++++++++++++++++++++++++++++++\n drivers/vdpa/sfc/sfc_vdpa_log.h   |   3 +\n drivers/vdpa/sfc/sfc_vdpa_mcdi.c  |  74 +++++++++\n drivers/vdpa/sfc/sfc_vdpa_ops.c   | 129 +++++++++++++++\n drivers/vdpa/sfc/sfc_vdpa_ops.h   |  36 +++++\n 10 files changed, 665 insertions(+), 1 deletion(-)\n create mode 100644 drivers/vdpa/sfc/sfc_vdpa_debug.h\n create mode 100644 drivers/vdpa/sfc/sfc_vdpa_hw.c\n create mode 100644 drivers/vdpa/sfc/sfc_vdpa_mcdi.c\n create mode 100644 drivers/vdpa/sfc/sfc_vdpa_ops.c\n create mode 100644 drivers/vdpa/sfc/sfc_vdpa_ops.h",
    "diff": "diff --git a/doc/guides/vdpadevs/sfc.rst b/doc/guides/vdpadevs/sfc.rst\nindex 59f990b..abb5900 100644\n--- a/doc/guides/vdpadevs/sfc.rst\n+++ b/doc/guides/vdpadevs/sfc.rst\n@@ -95,3 +95,9 @@ SFC vDPA PMD provides the following log types available for control:\n   Matches a subset of per-port log types registered during runtime.\n   A full name for a particular type may be obtained by appending a\n   dot and a PCI device identifier (``XXXX:XX:XX.X``) to the prefix.\n+\n+- ``pmd.vdpa.sfc.mcdi`` (default level is **notice**)\n+\n+  Extra logging of the communication with the NIC's management CPU.\n+  The format of the log is consumed by the netlogdecode cross-platform\n+  tool. May be managed per-port, as explained above.\ndiff --git a/drivers/vdpa/sfc/meson.build b/drivers/vdpa/sfc/meson.build\nindex d916389..aac7c51 100644\n--- a/drivers/vdpa/sfc/meson.build\n+++ b/drivers/vdpa/sfc/meson.build\n@@ -30,4 +30,7 @@ endforeach\n deps += ['common_sfc_efx', 'bus_pci']\n sources = files(\n \t'sfc_vdpa.c',\n+\t'sfc_vdpa_hw.c',\n+\t'sfc_vdpa_mcdi.c',\n+\t'sfc_vdpa_ops.c',\n )\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa.c b/drivers/vdpa/sfc/sfc_vdpa.c\nindex d8faaca..12e8d6e 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa.c\n+++ b/drivers/vdpa/sfc/sfc_vdpa.c\n@@ -232,6 +232,19 @@ struct sfc_vdpa_adapter *\n \t\tgoto fail_vfio_setup;\n \t}\n \n+\tsfc_vdpa_log_init(sva, \"hw init\");\n+\tif (sfc_vdpa_hw_init(sva) != 0) {\n+\t\tsfc_vdpa_err(sva, \"failed to init HW %s\", pci_dev->name);\n+\t\tgoto fail_hw_init;\n+\t}\n+\n+\tsfc_vdpa_log_init(sva, \"dev init\");\n+\tsva->ops_data = sfc_vdpa_device_init(sva, SFC_VDPA_AS_VF);\n+\tif (sva->ops_data == NULL) {\n+\t\tsfc_vdpa_err(sva, \"failed vDPA dev init %s\", pci_dev->name);\n+\t\tgoto fail_dev_init;\n+\t}\n+\n \tpthread_mutex_lock(&sfc_vdpa_adapter_list_lock);\n \tTAILQ_INSERT_TAIL(&sfc_vdpa_adapter_list, sva, next);\n \tpthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);\n@@ -240,6 +253,12 @@ struct sfc_vdpa_adapter *\n \n \treturn 0;\n \n+fail_dev_init:\n+\tsfc_vdpa_hw_fini(sva);\n+\n+fail_hw_init:\n+\tsfc_vdpa_vfio_teardown(sva);\n+\n fail_vfio_setup:\n fail_set_log_prefix:\n \trte_free(sva);\n@@ -266,6 +285,10 @@ struct sfc_vdpa_adapter *\n \tTAILQ_REMOVE(&sfc_vdpa_adapter_list, sva, next);\n \tpthread_mutex_unlock(&sfc_vdpa_adapter_list_lock);\n \n+\tsfc_vdpa_device_fini(sva->ops_data);\n+\n+\tsfc_vdpa_hw_fini(sva);\n+\n \tsfc_vdpa_vfio_teardown(sva);\n \n \trte_free(sva);\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa.h b/drivers/vdpa/sfc/sfc_vdpa.h\nindex 3b77900..fb97258 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa.h\n@@ -11,14 +11,38 @@\n \n #include <rte_bus_pci.h>\n \n+#include \"sfc_efx.h\"\n+#include \"sfc_efx_mcdi.h\"\n+#include \"sfc_vdpa_debug.h\"\n #include \"sfc_vdpa_log.h\"\n+#include \"sfc_vdpa_ops.h\"\n+\n+#define SFC_VDPA_DEFAULT_MCDI_IOVA\t\t0x200000000000\n \n /* Adapter private data */\n struct sfc_vdpa_adapter {\n \tTAILQ_ENTRY(sfc_vdpa_adapter)\tnext;\n+\t/*\n+\t * PMD setup and configuration is not thread safe. Since it is not\n+\t * performance sensitive, it is better to guarantee thread-safety\n+\t * and add device level lock. vDPA control operations which\n+\t * change its state should acquire the lock.\n+\t */\n+\trte_spinlock_t\t\t\tlock;\n \tstruct rte_pci_device\t\t*pdev;\n \tstruct rte_pci_addr\t\tpci_addr;\n \n+\tefx_family_t\t\t\tfamily;\n+\tefx_nic_t\t\t\t*nic;\n+\trte_spinlock_t\t\t\tnic_lock;\n+\n+\tefsys_bar_t\t\t\tmem_bar;\n+\n+\tstruct sfc_efx_mcdi\t\tmcdi;\n+\tsize_t\t\t\t\tmcdi_buff_size;\n+\n+\tuint32_t\t\t\tmax_queue_count;\n+\n \tchar\t\t\t\tlog_prefix[SFC_VDPA_LOG_PREFIX_MAX];\n \tuint32_t\t\t\tlogtype_main;\n \n@@ -26,6 +50,7 @@ struct sfc_vdpa_adapter {\n \tint\t\t\t\tvfio_dev_fd;\n \tint\t\t\t\tvfio_container_fd;\n \tint\t\t\t\tiommu_group_num;\n+\tstruct sfc_vdpa_ops_data\t*ops_data;\n };\n \n uint32_t\n@@ -36,5 +61,27 @@ struct sfc_vdpa_adapter {\n struct sfc_vdpa_adapter *\n sfc_vdpa_get_adapter_by_dev(struct rte_pci_device *pdev);\n \n-#endif  /* _SFC_VDPA_H */\n+int\n+sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva);\n+void\n+sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sa);\n \n+int\n+sfc_vdpa_mcdi_init(struct sfc_vdpa_adapter *sva);\n+void\n+sfc_vdpa_mcdi_fini(struct sfc_vdpa_adapter *sva);\n+\n+int\n+sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,\n+\t\t   size_t len, efsys_mem_t *esmp);\n+\n+void\n+sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);\n+\n+static inline struct sfc_vdpa_adapter *\n+sfc_vdpa_adapter_by_dev_handle(void *dev_handle)\n+{\n+\treturn (struct sfc_vdpa_adapter *)dev_handle;\n+}\n+\n+#endif  /* _SFC_VDPA_H */\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_debug.h b/drivers/vdpa/sfc/sfc_vdpa_debug.h\nnew file mode 100644\nindex 0000000..cfa8cc5\n--- /dev/null\n+++ b/drivers/vdpa/sfc/sfc_vdpa_debug.h\n@@ -0,0 +1,21 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2020-2021 Xilinx, Inc.\n+ */\n+\n+#ifndef _SFC_VDPA_DEBUG_H_\n+#define _SFC_VDPA_DEBUG_H_\n+\n+#include <rte_debug.h>\n+\n+#ifdef RTE_LIBRTE_SFC_VDPA_DEBUG\n+/* Avoid dependency from RTE_LOG_DP_LEVEL to be able to enable debug check\n+ * in the driver only.\n+ */\n+#define SFC_VDPA_ASSERT(exp)\t\t\tRTE_VERIFY(exp)\n+#else\n+/* If the driver debug is not enabled, follow DPDK debug/non-debug */\n+#define SFC_VDPA_ASSERT(exp)\t\t\tRTE_ASSERT(exp)\n+#endif\n+\n+#endif /* _SFC_VDPA_DEBUG_H_ */\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_hw.c b/drivers/vdpa/sfc/sfc_vdpa_hw.c\nnew file mode 100644\nindex 0000000..83f3696\n--- /dev/null\n+++ b/drivers/vdpa/sfc/sfc_vdpa_hw.c\n@@ -0,0 +1,322 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2020-2021 Xilinx, Inc.\n+ */\n+\n+#include <unistd.h>\n+\n+#include <rte_common.h>\n+#include <rte_errno.h>\n+#include <rte_vfio.h>\n+\n+#include \"efx.h\"\n+#include \"sfc_vdpa.h\"\n+#include \"sfc_vdpa_ops.h\"\n+\n+extern uint32_t sfc_logtype_driver;\n+\n+#ifndef PAGE_SIZE\n+#define PAGE_SIZE   (sysconf(_SC_PAGESIZE))\n+#endif\n+\n+int\n+sfc_vdpa_dma_alloc(struct sfc_vdpa_adapter *sva, const char *name,\n+\t\t   size_t len, efsys_mem_t *esmp)\n+{\n+\tvoid *mcdi_buf;\n+\tuint64_t mcdi_iova;\n+\tsize_t mcdi_buff_size;\n+\tint ret;\n+\n+\tmcdi_buff_size = RTE_ALIGN_CEIL(len, PAGE_SIZE);\n+\n+\tsfc_vdpa_log_init(sva, \"name=%s, len=%zu\", name, len);\n+\n+\tmcdi_buf = rte_zmalloc(name, mcdi_buff_size, PAGE_SIZE);\n+\tif (mcdi_buf == NULL) {\n+\t\tsfc_vdpa_err(sva, \"cannot reserve memory for %s: len=%#x: %s\",\n+\t\t\t     name, (unsigned int)len, rte_strerror(rte_errno));\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* IOVA address for MCDI would be re-calculated if mapping\n+\t * using default IOVA would fail.\n+\t * TODO: Earlier there was no way to get valid IOVA range.\n+\t * Recently a patch has been submitted to get the IOVA range\n+\t * using ioctl. VFIO_IOMMU_GET_INFO. This patch is available\n+\t * in the kernel version >= 5.4. Support to get the default\n+\t * IOVA address for MCDI buffer using available IOVA range\n+\t * would be added later. Meanwhile default IOVA for MCDI buffer\n+\t * is kept at high mem at 2TB. In case of overlap new available\n+\t * addresses would be searched and same would be used.\n+\t */\n+\tmcdi_iova = SFC_VDPA_DEFAULT_MCDI_IOVA;\n+\n+\tdo {\n+\t\tret = rte_vfio_container_dma_map(sva->vfio_container_fd,\n+\t\t\t\t\t\t (uint64_t)mcdi_buf, mcdi_iova,\n+\t\t\t\t\t\t mcdi_buff_size);\n+\t\tif (ret == 0)\n+\t\t\tbreak;\n+\n+\t\tmcdi_iova = mcdi_iova >> 1;\n+\t\tif (mcdi_iova < mcdi_buff_size)\t{\n+\t\t\tsfc_vdpa_err(sva,\n+\t\t\t\t     \"DMA mapping failed for MCDI : %s\",\n+\t\t\t\t     rte_strerror(rte_errno));\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t} while (ret < 0);\n+\n+\tesmp->esm_addr = mcdi_iova;\n+\tesmp->esm_base = mcdi_buf;\n+\tsva->mcdi_buff_size = mcdi_buff_size;\n+\n+\tsfc_vdpa_info(sva,\n+\t\t      \"DMA name=%s len=%zu => virt=%p iova=%\" PRIx64,\n+\t\t      name, len, esmp->esm_base, esmp->esm_addr);\n+\n+\treturn 0;\n+}\n+\n+void\n+sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp)\n+{\n+\tint ret;\n+\n+\tsfc_vdpa_log_init(sva, \"name=%s\", esmp->esm_mz->name);\n+\n+\tret = rte_vfio_container_dma_unmap(sva->vfio_container_fd,\n+\t\t\t\t\t   (uint64_t)esmp->esm_base,\n+\t\t\t\t\t   esmp->esm_addr, sva->mcdi_buff_size);\n+\tif (ret < 0)\n+\t\tsfc_vdpa_err(sva, \"DMA unmap failed for MCDI : %s\",\n+\t\t\t     rte_strerror(rte_errno));\n+\n+\tsfc_vdpa_info(sva,\n+\t\t      \"DMA free name=%s => virt=%p iova=%\" PRIx64,\n+\t\t      esmp->esm_mz->name, esmp->esm_base, esmp->esm_addr);\n+\n+\trte_free((void *)(esmp->esm_base));\n+\n+\tsva->mcdi_buff_size = 0;\n+\tmemset(esmp, 0, sizeof(*esmp));\n+}\n+\n+static int\n+sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,\n+\t\t      const efx_bar_region_t *mem_ebrp)\n+{\n+\tstruct rte_pci_device *pci_dev = sva->pdev;\n+\tefsys_bar_t *ebp = &sva->mem_bar;\n+\tstruct rte_mem_resource *res =\n+\t\t&pci_dev->mem_resource[mem_ebrp->ebr_index];\n+\n+\tSFC_BAR_LOCK_INIT(ebp, pci_dev->name);\n+\tebp->esb_rid = mem_ebrp->ebr_index;\n+\tebp->esb_dev = pci_dev;\n+\tebp->esb_base = res->addr;\n+\n+\treturn 0;\n+}\n+\n+static void\n+sfc_vdpa_mem_bar_fini(struct sfc_vdpa_adapter *sva)\n+{\n+\tefsys_bar_t *ebp = &sva->mem_bar;\n+\n+\tSFC_BAR_LOCK_DESTROY(ebp);\n+\tmemset(ebp, 0, sizeof(*ebp));\n+}\n+\n+static int\n+sfc_vdpa_nic_probe(struct sfc_vdpa_adapter *sva)\n+{\n+\tefx_nic_t *enp = sva->nic;\n+\tint rc;\n+\n+\trc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);\n+\tif (rc != 0)\n+\t\tsfc_vdpa_err(sva, \"nic probe failed: %s\", rte_strerror(rc));\n+\n+\treturn rc;\n+}\n+\n+static int\n+sfc_vdpa_estimate_resource_limits(struct sfc_vdpa_adapter *sva)\n+{\n+\tefx_drv_limits_t limits;\n+\tint rc;\n+\tuint32_t evq_allocated;\n+\tuint32_t rxq_allocated;\n+\tuint32_t txq_allocated;\n+\tuint32_t max_queue_cnt;\n+\n+\tmemset(&limits, 0, sizeof(limits));\n+\n+\t/* Request at least one Rx and Tx queue */\n+\tlimits.edl_min_rxq_count = 1;\n+\tlimits.edl_min_txq_count = 1;\n+\t/* Management event queue plus event queue for Tx/Rx queue */\n+\tlimits.edl_min_evq_count =\n+\t\t1 + RTE_MAX(limits.edl_min_rxq_count, limits.edl_min_txq_count);\n+\n+\tlimits.edl_max_rxq_count = SFC_VDPA_MAX_QUEUE_PAIRS;\n+\tlimits.edl_max_txq_count = SFC_VDPA_MAX_QUEUE_PAIRS;\n+\tlimits.edl_max_evq_count = 1 + SFC_VDPA_MAX_QUEUE_PAIRS;\n+\n+\tSFC_VDPA_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);\n+\tSFC_VDPA_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);\n+\tSFC_VDPA_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);\n+\n+\t/* Configure the minimum required resources needed for the\n+\t * driver to operate, and the maximum desired resources that the\n+\t * driver is capable of using.\n+\t */\n+\tsfc_vdpa_log_init(sva, \"set drv limit\");\n+\tefx_nic_set_drv_limits(sva->nic, &limits);\n+\n+\tsfc_vdpa_log_init(sva, \"init nic\");\n+\trc = efx_nic_init(sva->nic);\n+\tif (rc != 0) {\n+\t\tsfc_vdpa_err(sva, \"nic init failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_nic_init;\n+\t}\n+\n+\t/* Find resource dimensions assigned by firmware to this function */\n+\trc = efx_nic_get_vi_pool(sva->nic, &evq_allocated, &rxq_allocated,\n+\t\t\t\t &txq_allocated);\n+\tif (rc != 0) {\n+\t\tsfc_vdpa_err(sva, \"vi pool get failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_get_vi_pool;\n+\t}\n+\n+\t/* It still may allocate more than maximum, ensure limit */\n+\tevq_allocated = RTE_MIN(evq_allocated, limits.edl_max_evq_count);\n+\trxq_allocated = RTE_MIN(rxq_allocated, limits.edl_max_rxq_count);\n+\ttxq_allocated = RTE_MIN(txq_allocated, limits.edl_max_txq_count);\n+\n+\n+\tmax_queue_cnt = RTE_MIN(rxq_allocated, txq_allocated);\n+\t/* Subtract management EVQ not used for traffic */\n+\tmax_queue_cnt = RTE_MIN(evq_allocated - 1, max_queue_cnt);\n+\n+\tSFC_VDPA_ASSERT(max_queue_cnt > 0);\n+\n+\tsva->max_queue_count = max_queue_cnt;\n+\n+\treturn 0;\n+\n+fail_get_vi_pool:\n+\tefx_nic_fini(sva->nic);\n+fail_nic_init:\n+\tsfc_vdpa_log_init(sva, \"failed: %s\", rte_strerror(rc));\n+\treturn rc;\n+}\n+\n+int\n+sfc_vdpa_hw_init(struct sfc_vdpa_adapter *sva)\n+{\n+\tefx_bar_region_t mem_ebr;\n+\tefx_nic_t *enp;\n+\tint rc;\n+\n+\tsfc_vdpa_log_init(sva, \"entry\");\n+\n+\tsfc_vdpa_log_init(sva, \"get family\");\n+\trc = sfc_efx_family(sva->pdev, &mem_ebr, &sva->family);\n+\tif (rc != 0)\n+\t\tgoto fail_family;\n+\tsfc_vdpa_log_init(sva,\n+\t\t\t  \"family is %u, membar is %u,\"\n+\t\t\t  \"function control window offset is %#\" PRIx64,\n+\t\t\t  sva->family, mem_ebr.ebr_index, mem_ebr.ebr_offset);\n+\n+\tsfc_vdpa_log_init(sva, \"init mem bar\");\n+\trc = sfc_vdpa_mem_bar_init(sva, &mem_ebr);\n+\tif (rc != 0)\n+\t\tgoto fail_mem_bar_init;\n+\n+\tsfc_vdpa_log_init(sva, \"create nic\");\n+\trte_spinlock_init(&sva->nic_lock);\n+\trc = efx_nic_create(sva->family, (efsys_identifier_t *)sva,\n+\t\t\t    &sva->mem_bar, mem_ebr.ebr_offset,\n+\t\t\t    &sva->nic_lock, &enp);\n+\tif (rc != 0) {\n+\t\tsfc_vdpa_err(sva, \"nic create failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_nic_create;\n+\t}\n+\tsva->nic = enp;\n+\n+\tsfc_vdpa_log_init(sva, \"init mcdi\");\n+\trc = sfc_vdpa_mcdi_init(sva);\n+\tif (rc != 0) {\n+\t\tsfc_vdpa_err(sva, \"mcdi init failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_mcdi_init;\n+\t}\n+\n+\tsfc_vdpa_log_init(sva, \"probe nic\");\n+\trc = sfc_vdpa_nic_probe(sva);\n+\tif (rc != 0)\n+\t\tgoto fail_nic_probe;\n+\n+\tsfc_vdpa_log_init(sva, \"reset nic\");\n+\trc = efx_nic_reset(enp);\n+\tif (rc != 0) {\n+\t\tsfc_vdpa_err(sva, \"nic reset failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_nic_reset;\n+\t}\n+\n+\tsfc_vdpa_log_init(sva, \"estimate resource limits\");\n+\trc = sfc_vdpa_estimate_resource_limits(sva);\n+\tif (rc != 0)\n+\t\tgoto fail_estimate_rsrc_limits;\n+\n+\tsfc_vdpa_log_init(sva, \"done\");\n+\n+\treturn 0;\n+\n+fail_estimate_rsrc_limits:\n+fail_nic_reset:\n+\tefx_nic_unprobe(enp);\n+\n+fail_nic_probe:\n+\tsfc_vdpa_mcdi_fini(sva);\n+\n+fail_mcdi_init:\n+\tsfc_vdpa_log_init(sva, \"destroy nic\");\n+\tsva->nic = NULL;\n+\tefx_nic_destroy(enp);\n+\n+fail_nic_create:\n+\tsfc_vdpa_mem_bar_fini(sva);\n+\n+fail_mem_bar_init:\n+fail_family:\n+\tsfc_vdpa_log_init(sva, \"failed: %s\", rte_strerror(rc));\n+\treturn rc;\n+}\n+\n+void\n+sfc_vdpa_hw_fini(struct sfc_vdpa_adapter *sva)\n+{\n+\tefx_nic_t *enp = sva->nic;\n+\n+\tsfc_vdpa_log_init(sva, \"entry\");\n+\n+\tsfc_vdpa_log_init(sva, \"unprobe nic\");\n+\tefx_nic_unprobe(enp);\n+\n+\tsfc_vdpa_log_init(sva, \"mcdi fini\");\n+\tsfc_vdpa_mcdi_fini(sva);\n+\n+\tsfc_vdpa_log_init(sva, \"nic fini\");\n+\tefx_nic_fini(enp);\n+\n+\tsfc_vdpa_log_init(sva, \"destroy nic\");\n+\tsva->nic = NULL;\n+\tefx_nic_destroy(enp);\n+\n+\tsfc_vdpa_mem_bar_fini(sva);\n+}\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_log.h b/drivers/vdpa/sfc/sfc_vdpa_log.h\nindex 0a3d6ad..59af790 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_log.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa_log.h\n@@ -21,6 +21,9 @@\n /** Name prefix for the per-device log type used to report basic information */\n #define SFC_VDPA_LOGTYPE_MAIN_STR\tSFC_VDPA_LOGTYPE_PREFIX \"main\"\n \n+/** Device MCDI log type name prefix */\n+#define SFC_VDPA_LOGTYPE_MCDI_STR\tSFC_VDPA_LOGTYPE_PREFIX \"mcdi\"\n+\n #define SFC_VDPA_LOG_PREFIX_MAX\t32\n \n /* Log PMD message, automatically add prefix and \\n */\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_mcdi.c b/drivers/vdpa/sfc/sfc_vdpa_mcdi.c\nnew file mode 100644\nindex 0000000..961d2d3\n--- /dev/null\n+++ b/drivers/vdpa/sfc/sfc_vdpa_mcdi.c\n@@ -0,0 +1,74 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2020-2021 Xilinx, Inc.\n+ */\n+\n+#include \"sfc_efx_mcdi.h\"\n+\n+#include \"sfc_vdpa.h\"\n+#include \"sfc_vdpa_debug.h\"\n+#include \"sfc_vdpa_log.h\"\n+\n+static sfc_efx_mcdi_dma_alloc_cb sfc_vdpa_mcdi_dma_alloc;\n+static int\n+sfc_vdpa_mcdi_dma_alloc(void *cookie, const char *name, size_t len,\n+\t\t\tefsys_mem_t *esmp)\n+{\n+\tstruct sfc_vdpa_adapter *sva = cookie;\n+\n+\treturn sfc_vdpa_dma_alloc(sva, name, len, esmp);\n+}\n+\n+static sfc_efx_mcdi_dma_free_cb sfc_vdpa_mcdi_dma_free;\n+static void\n+sfc_vdpa_mcdi_dma_free(void *cookie, efsys_mem_t *esmp)\n+{\n+\tstruct sfc_vdpa_adapter *sva = cookie;\n+\n+\tsfc_vdpa_dma_free(sva, esmp);\n+}\n+\n+static sfc_efx_mcdi_sched_restart_cb sfc_vdpa_mcdi_sched_restart;\n+static void\n+sfc_vdpa_mcdi_sched_restart(void *cookie)\n+{\n+\tRTE_SET_USED(cookie);\n+}\n+\n+static sfc_efx_mcdi_mgmt_evq_poll_cb sfc_vdpa_mcdi_mgmt_evq_poll;\n+static void\n+sfc_vdpa_mcdi_mgmt_evq_poll(void *cookie)\n+{\n+\tRTE_SET_USED(cookie);\n+}\n+\n+static const struct sfc_efx_mcdi_ops sfc_vdpa_mcdi_ops = {\n+\t.dma_alloc\t= sfc_vdpa_mcdi_dma_alloc,\n+\t.dma_free\t= sfc_vdpa_mcdi_dma_free,\n+\t.sched_restart  = sfc_vdpa_mcdi_sched_restart,\n+\t.mgmt_evq_poll  = sfc_vdpa_mcdi_mgmt_evq_poll,\n+\n+};\n+\n+int\n+sfc_vdpa_mcdi_init(struct sfc_vdpa_adapter *sva)\n+{\n+\tuint32_t logtype;\n+\n+\tsfc_vdpa_log_init(sva, \"entry\");\n+\n+\tlogtype = sfc_vdpa_register_logtype(&(sva->pdev->addr),\n+\t\t\t\t\t    SFC_VDPA_LOGTYPE_MCDI_STR,\n+\t\t\t\t\t    RTE_LOG_NOTICE);\n+\n+\treturn sfc_efx_mcdi_init(&sva->mcdi, logtype,\n+\t\t\t\t sva->log_prefix, sva->nic,\n+\t\t\t\t &sfc_vdpa_mcdi_ops, sva);\n+}\n+\n+void\n+sfc_vdpa_mcdi_fini(struct sfc_vdpa_adapter *sva)\n+{\n+\tsfc_vdpa_log_init(sva, \"entry\");\n+\tsfc_efx_mcdi_fini(&sva->mcdi);\n+}\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c\nnew file mode 100644\nindex 0000000..71696be\n--- /dev/null\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c\n@@ -0,0 +1,129 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2020-2021 Xilinx, Inc.\n+ */\n+\n+#include <rte_malloc.h>\n+#include <rte_vdpa.h>\n+#include <rte_vdpa_dev.h>\n+#include <rte_vhost.h>\n+\n+#include \"sfc_vdpa_ops.h\"\n+#include \"sfc_vdpa.h\"\n+\n+/* Dummy functions for mandatory vDPA ops to pass vDPA device registration.\n+ * In subsequent patches these ops would be implemented.\n+ */\n+static int\n+sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)\n+{\n+\tRTE_SET_USED(vdpa_dev);\n+\tRTE_SET_USED(queue_num);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)\n+{\n+\tRTE_SET_USED(vdpa_dev);\n+\tRTE_SET_USED(features);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_get_protocol_features(struct rte_vdpa_device *vdpa_dev,\n+\t\t\t       uint64_t *features)\n+{\n+\tRTE_SET_USED(vdpa_dev);\n+\tRTE_SET_USED(features);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_dev_config(int vid)\n+{\n+\tRTE_SET_USED(vid);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_dev_close(int vid)\n+{\n+\tRTE_SET_USED(vid);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_set_vring_state(int vid, int vring, int state)\n+{\n+\tRTE_SET_USED(vid);\n+\tRTE_SET_USED(vring);\n+\tRTE_SET_USED(state);\n+\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_set_features(int vid)\n+{\n+\tRTE_SET_USED(vid);\n+\n+\treturn -1;\n+}\n+\n+static struct rte_vdpa_dev_ops sfc_vdpa_ops = {\n+\t.get_queue_num = sfc_vdpa_get_queue_num,\n+\t.get_features = sfc_vdpa_get_features,\n+\t.get_protocol_features = sfc_vdpa_get_protocol_features,\n+\t.dev_conf = sfc_vdpa_dev_config,\n+\t.dev_close = sfc_vdpa_dev_close,\n+\t.set_vring_state = sfc_vdpa_set_vring_state,\n+\t.set_features = sfc_vdpa_set_features,\n+};\n+\n+struct sfc_vdpa_ops_data *\n+sfc_vdpa_device_init(void *dev_handle, enum sfc_vdpa_context context)\n+{\n+\tstruct sfc_vdpa_ops_data *ops_data;\n+\tstruct rte_pci_device *pci_dev;\n+\n+\t/* Create vDPA ops context */\n+\tops_data = rte_zmalloc(\"vdpa\", sizeof(struct sfc_vdpa_ops_data), 0);\n+\tif (ops_data == NULL)\n+\t\treturn NULL;\n+\n+\tops_data->vdpa_context = context;\n+\tops_data->dev_handle = dev_handle;\n+\n+\tpci_dev = sfc_vdpa_adapter_by_dev_handle(dev_handle)->pdev;\n+\n+\t/* Register vDPA Device */\n+\tsfc_vdpa_log_init(dev_handle, \"register vDPA device\");\n+\tops_data->vdpa_dev =\n+\t\trte_vdpa_register_device(&pci_dev->device, &sfc_vdpa_ops);\n+\tif (ops_data->vdpa_dev == NULL) {\n+\t\tsfc_vdpa_err(dev_handle, \"vDPA device registration failed\");\n+\t\tgoto fail_register_device;\n+\t}\n+\n+\tops_data->state = SFC_VDPA_STATE_INITIALIZED;\n+\n+\treturn ops_data;\n+\n+fail_register_device:\n+\trte_free(ops_data);\n+\treturn NULL;\n+}\n+\n+void\n+sfc_vdpa_device_fini(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\trte_vdpa_unregister_device(ops_data->vdpa_dev);\n+\n+\trte_free(ops_data);\n+}\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h\nnew file mode 100644\nindex 0000000..817b302\n--- /dev/null\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h\n@@ -0,0 +1,36 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Copyright(c) 2020-2021 Xilinx, Inc.\n+ */\n+\n+#ifndef _SFC_VDPA_OPS_H\n+#define _SFC_VDPA_OPS_H\n+\n+#include <rte_vdpa.h>\n+\n+#define SFC_VDPA_MAX_QUEUE_PAIRS\t\t1\n+\n+enum sfc_vdpa_context {\n+\tSFC_VDPA_AS_PF = 0,\n+\tSFC_VDPA_AS_VF\n+};\n+\n+enum sfc_vdpa_state {\n+\tSFC_VDPA_STATE_UNINITIALIZED = 0,\n+\tSFC_VDPA_STATE_INITIALIZED,\n+\tSFC_VDPA_STATE_NSTATES\n+};\n+\n+struct sfc_vdpa_ops_data {\n+\tvoid\t\t\t\t*dev_handle;\n+\tstruct rte_vdpa_device\t\t*vdpa_dev;\n+\tenum sfc_vdpa_context\t\tvdpa_context;\n+\tenum sfc_vdpa_state\t\tstate;\n+};\n+\n+struct sfc_vdpa_ops_data *\n+sfc_vdpa_device_init(void *adapter, enum sfc_vdpa_context context);\n+void\n+sfc_vdpa_device_fini(struct sfc_vdpa_ops_data *ops_data);\n+\n+#endif /* _SFC_VDPA_OPS_H */\n",
    "prefixes": [
        "02/10"
    ]
}