get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95468/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95468,
    "url": "http://patches.dpdk.org/api/patches/95468/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210706164418.32615-7-vsrivast@xilinx.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210706164418.32615-7-vsrivast@xilinx.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210706164418.32615-7-vsrivast@xilinx.com",
    "date": "2021-07-06T16:44:14",
    "name": "[06/10] vdpa/sfc: add support for dev conf and dev close ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "72fd3a13497c567ae876055ff9fdf81ac9dc5984",
    "submitter": {
        "id": 2281,
        "url": "http://patches.dpdk.org/api/people/2281/?format=api",
        "name": "Vijay Srivastava",
        "email": "vijay.srivastava@xilinx.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210706164418.32615-7-vsrivast@xilinx.com/mbox/",
    "series": [
        {
            "id": 17687,
            "url": "http://patches.dpdk.org/api/series/17687/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=17687",
            "date": "2021-07-06T16:44:08",
            "name": "vdpa/sfc: introduce Xilinx vDPA driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/17687/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/95468/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/95468/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 2CF51A0C4A;\n\tWed,  7 Jul 2021 10:26:14 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A0144414A1;\n\tWed,  7 Jul 2021 10:25:37 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2081.outbound.protection.outlook.com [40.107.243.81])\n by mails.dpdk.org (Postfix) with ESMTP id 865F64120E\n for <dev@dpdk.org>; Tue,  6 Jul 2021 18:50:04 +0200 (CEST)",
            "from BN0PR04CA0113.namprd04.prod.outlook.com (2603:10b6:408:ec::28)\n by SA1PR02MB8478.namprd02.prod.outlook.com (2603:10b6:806:1f9::16)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.27; Tue, 6 Jul\n 2021 16:50:01 +0000",
            "from BN1NAM02FT056.eop-nam02.prod.protection.outlook.com\n (2603:10b6:408:ec:cafe::88) by BN0PR04CA0113.outlook.office365.com\n (2603:10b6:408:ec::28) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.20 via Frontend\n Transport; Tue, 6 Jul 2021 16:50:01 +0000",
            "from xsj-pvapexch01.xlnx.xilinx.com (149.199.62.198) by\n BN1NAM02FT056.mail.protection.outlook.com (10.13.2.164) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.20.4287.22 via Frontend Transport; Tue, 6 Jul 2021 16:50:01 +0000",
            "from xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) by\n xsj-pvapexch01.xlnx.xilinx.com (172.19.86.40) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.1.2176.2; Tue, 6 Jul 2021 09:50:00 -0700",
            "from smtp.xilinx.com (172.19.127.96) by\n xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) with Microsoft SMTP Server id\n 15.1.2176.2 via Frontend Transport; Tue, 6 Jul 2021 09:50:00 -0700",
            "from [10.177.4.108] (port=54950 helo=xndengvm004108.xilinx.com)\n by smtp.xilinx.com with esmtp (Exim 4.90)\n (envelope-from <vijay.srivastava@xilinx.com>)\n id 1m0oGl-0000pF-TA; Tue, 06 Jul 2021 09:50:00 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=E+RfxSsAUxPLZg4xw7uZ1ZY9b9RPD/nuHc6nvN2Z4MG3oniUPHdD9jZXqBUhiM/PKIE7POTM5Wo3X4BRmoPMCWcQhMlCzg3ehk7emKJJeznMQG1Ztcg6u1efGnVKfzuYG4CYpXZVqL1vSxhNoIGUw9tWoYgoBxJT9CXkvd+SkK9vTmHrIZgxaVhvljWKyIYDEu86y3b1j8V/U3ZkjDW9FhAKJ9B/mwBTN2tjK+axjZWZBtPPlhFIJ6GyTIvULjS/4+MjJDjvmXXxLm0mwBAvZzvdz6pFgMwMM+K82mLWJbzY/Gg3uUtA65mJmOVCawsSimp/GeSzmWY7j8pZdvyuTw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=F1Oo26l+W9ky0FcHQPhXkgvlbhPb+1VgS8tYrHOOTxA=;\n b=iv4+E8vL+Dcwb5B+ydvDKWzIsw4ikbtjQzNBqquy/6pspSff71lrqiISi0VV0a3aVIoIwaxTMoVQO7tdXyp7ST8tdX5oZYp4qsMG/wRZb5xQOKKo4OI7Xaxilufbk+1GLWs4wYfOiGbSpksYi+HdNY9ensP/84R85Tg583lJ+e7HOPbbWXsvaiwg2cFAy5DmZ1IYZyYpXYMmfrO8mE//FnF2rJe0GIRdI9HlfOy00JWZRuDbGrUkcufJkhkICbqZtGOaPNoJXHG53goJxvqPr2wjAx9eBgqPPVtlf3rKk6EE/T8dP1bCXDipL61fA6cqpK04+/rV0ImS6TtpfPKZMA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 149.199.62.198) smtp.rcpttodomain=dpdk.org smtp.mailfrom=xilinx.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=xilinx.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=xilinx.onmicrosoft.com; s=selector2-xilinx-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=F1Oo26l+W9ky0FcHQPhXkgvlbhPb+1VgS8tYrHOOTxA=;\n b=n+AqLNGPoSmVEdgtIehU1fiWnWk8LpOu93QxkT4bBCWo2jSoeRlH36iFWKRP48kxqy1cfxdfyAUoD6bzTum4OKHshCffGSfzNRmw97DssP4/5PHUi0TFYgMIEj2AA1/OLw0PM9QB6FrnWtvxE8OYdQnW0dMzHjc5ZVYEdztlu4s=",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 149.199.62.198)\n smtp.mailfrom=xilinx.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=xilinx.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of xilinx.com designates\n 149.199.62.198 as permitted sender) receiver=protection.outlook.com;\n client-ip=149.199.62.198; helo=xsj-pvapexch01.xlnx.xilinx.com;",
        "Envelope-to": "dev@dpdk.org, maxime.coquelin@redhat.com, chenbo.xia@intel.com,\n andrew.rybchenko@oktetlabs.ru",
        "From": "Vijay Srivastava <vijay.srivastava@xilinx.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<maxime.coquelin@redhat.com>, <chenbo.xia@intel.com>,\n <andrew.rybchenko@oktetlabs.ru>, Vijay Kumar Srivastava <vsrivast@xilinx.com>",
        "Date": "Tue, 6 Jul 2021 22:14:14 +0530",
        "Message-ID": "<20210706164418.32615-7-vsrivast@xilinx.com>",
        "X-Mailer": "git-send-email 2.25.0",
        "In-Reply-To": "<20210706164418.32615-1-vsrivast@xilinx.com>",
        "References": "<20210706164418.32615-1-vsrivast@xilinx.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "a847b785-ebbd-4ace-5b30-08d9409e18d9",
        "X-MS-TrafficTypeDiagnostic": "SA1PR02MB8478:",
        "X-Microsoft-Antispam-PRVS": "\n <SA1PR02MB84784489A5809473A4C99622B91B9@SA1PR02MB8478.namprd02.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:183;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n FFRkBS7+ZyQ/XyUTPenxr+BkD7wwnAuGOD6XczhOjtZ30esafjSe0MNO8XQ1np6C1wZQK0Jouc3ZXjAVRBAcSvsqx4H9H++yXWlOxfjHkxvGOozmeU5XMHUR8bq+lx+i7yfzGb9tGE/TlM0cVKUf0CnJO3cOkgluOjJlx3tMFNrO+z4/AOK5sXt0M6SU5sT0wAmW0PhMAlDsJMsEfLc33842dQzOg/ddHtA1DdDNjxiLRrCeQrmywApmILhMJ3RRKDPp8wbpoNciq5/yVWy2uyFBm6rNq0KsVATU3KVPpC8/dR6w3SifBShbWFMOaq+o+Hx8JOb4GcBaeTf6zB19UFIt1e7OWuqyj17GRBvsXFwB4DB9z3tW8lhuupB/NFXA03AGvtIhzUNXDBGEFAnoqa6TaNsBPcqlBnahH8s3DoXAMVY+p7TED+IvSrhYSottS9cOxjryV8cGSz2sGBCF9VLvaFZlpvZHzreJ5KF+9psvQCmhD2c25JaxeCPQIGUbtdJxsMzwvAK2Ky0fC2it/+cU0B+Htr1jSSflPn33o/+ZLBHFbBsmU6HXP53FvNFUMAF/fvpdt2eSOB/Ciq7kcaTV5YfhyEjDtTUe3xYZlW1IE1OSsEniLVprwp+LbYwxuianL7TFO8QLV/qgJLQnrJ6anP0g9/VVLjX/+wNWhrgbaGKmYC7kWALOMwRtM7hU++BK2lGVbyGBFtBM+HbdAcXe+eOdn/FCvcUteq+aRUk=",
        "X-Forefront-Antispam-Report": "CIP:149.199.62.198; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:xsj-pvapexch01.xlnx.xilinx.com;\n PTR:unknown-62-198.xilinx.com; CAT:NONE;\n SFS:(4636009)(136003)(39860400002)(376002)(346002)(396003)(36840700001)(46966006)(36906005)(316002)(70586007)(44832011)(82740400003)(2616005)(8936002)(70206006)(83380400001)(54906003)(478600001)(26005)(47076005)(36756003)(4326008)(9786002)(82310400003)(6666004)(336012)(8676002)(426003)(186003)(5660300002)(30864003)(7636003)(107886003)(356005)(6916009)(1076003)(2906002)(36860700001)(7696005)(102446001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "xilinx.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jul 2021 16:50:01.4054 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n a847b785-ebbd-4ace-5b30-08d9409e18d9",
        "X-MS-Exchange-CrossTenant-Id": "657af505-d5df-48d0-8300-c31994686c5c",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=657af505-d5df-48d0-8300-c31994686c5c; Ip=[149.199.62.198];\n Helo=[xsj-pvapexch01.xlnx.xilinx.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN1NAM02FT056.eop-nam02.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SA1PR02MB8478",
        "X-Mailman-Approved-At": "Wed, 07 Jul 2021 10:25:28 +0200",
        "Subject": "[dpdk-dev] [PATCH 06/10] vdpa/sfc: add support for dev conf and dev\n close ops",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Vijay Kumar Srivastava <vsrivast@xilinx.com>\n\nImplement vDPA ops dev_conf and dev_close for DMA mapping,\ninterrupt and virtqueue configurations.\n\nSigned-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>\n---\n drivers/vdpa/sfc/sfc_vdpa.c     |   6 +\n drivers/vdpa/sfc/sfc_vdpa.h     |  43 ++++\n drivers/vdpa/sfc/sfc_vdpa_hw.c  |  70 ++++++\n drivers/vdpa/sfc/sfc_vdpa_ops.c | 527 ++++++++++++++++++++++++++++++++++++++--\n drivers/vdpa/sfc/sfc_vdpa_ops.h |  28 +++\n 5 files changed, 654 insertions(+), 20 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/sfc/sfc_vdpa.c b/drivers/vdpa/sfc/sfc_vdpa.c\nindex 9c12dcb..ca13483 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa.c\n+++ b/drivers/vdpa/sfc/sfc_vdpa.c\n@@ -246,6 +246,8 @@ struct sfc_vdpa_ops_data *\n \n \tsfc_vdpa_log_init(sva, \"entry\");\n \n+\tsfc_vdpa_adapter_lock_init(sva);\n+\n \tsfc_vdpa_log_init(sva, \"vfio init\");\n \tif (sfc_vdpa_vfio_setup(sva) < 0) {\n \t\tsfc_vdpa_err(sva, \"failed to setup device %s\", pci_dev->name);\n@@ -280,6 +282,8 @@ struct sfc_vdpa_ops_data *\n \tsfc_vdpa_vfio_teardown(sva);\n \n fail_vfio_setup:\n+\tsfc_vdpa_adapter_lock_fini(sva);\n+\n fail_set_log_prefix:\n \trte_free(sva);\n \n@@ -311,6 +315,8 @@ struct sfc_vdpa_ops_data *\n \n \tsfc_vdpa_vfio_teardown(sva);\n \n+\tsfc_vdpa_adapter_lock_fini(sva);\n+\n \trte_free(sva);\n \n \treturn 0;\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa.h b/drivers/vdpa/sfc/sfc_vdpa.h\nindex 08075e5..b103b0a 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa.h\n@@ -80,10 +80,53 @@ struct sfc_vdpa_ops_data *\n void\n sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);\n \n+int\n+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);\n+\n static inline struct sfc_vdpa_adapter *\n sfc_vdpa_adapter_by_dev_handle(void *dev_handle)\n {\n \treturn (struct sfc_vdpa_adapter *)dev_handle;\n }\n \n+/*\n+ * Add wrapper functions to acquire/release lock to be able to remove or\n+ * change the lock in one place.\n+ */\n+static inline void\n+sfc_vdpa_adapter_lock_init(struct sfc_vdpa_adapter *sva)\n+{\n+\trte_spinlock_init(&sva->lock);\n+}\n+\n+static inline int\n+sfc_vdpa_adapter_is_locked(struct sfc_vdpa_adapter *sva)\n+{\n+\treturn rte_spinlock_is_locked(&sva->lock);\n+}\n+\n+static inline void\n+sfc_vdpa_adapter_lock(struct sfc_vdpa_adapter *sva)\n+{\n+\trte_spinlock_lock(&sva->lock);\n+}\n+\n+static inline int\n+sfc_vdpa_adapter_trylock(struct sfc_vdpa_adapter *sva)\n+{\n+\treturn rte_spinlock_trylock(&sva->lock);\n+}\n+\n+static inline void\n+sfc_vdpa_adapter_unlock(struct sfc_vdpa_adapter *sva)\n+{\n+\trte_spinlock_unlock(&sva->lock);\n+}\n+\n+static inline void\n+sfc_vdpa_adapter_lock_fini(__rte_unused struct sfc_vdpa_adapter *sva)\n+{\n+\t/* Just for symmetry of the API */\n+}\n+\n #endif  /* _SFC_VDPA_H */\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_hw.c b/drivers/vdpa/sfc/sfc_vdpa_hw.c\nindex 84e680f..047bcc4 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_hw.c\n+++ b/drivers/vdpa/sfc/sfc_vdpa_hw.c\n@@ -8,6 +8,7 @@\n #include <rte_common.h>\n #include <rte_errno.h>\n #include <rte_vfio.h>\n+#include <rte_vhost.h>\n \n #include \"efx.h\"\n #include \"sfc_vdpa.h\"\n@@ -104,6 +105,75 @@\n \tmemset(esmp, 0, sizeof(*esmp));\n }\n \n+int\n+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)\n+{\n+\tuint32_t i, j;\n+\tint rc;\n+\tstruct rte_vhost_memory *vhost_mem = NULL;\n+\tstruct rte_vhost_mem_region *mem_reg = NULL;\n+\tint vfio_container_fd;\n+\tvoid *dev;\n+\n+\tdev = ops_data->dev_handle;\n+\tvfio_container_fd =\n+\t\tsfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;\n+\n+\trc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);\n+\tif (rc < 0) {\n+\t\tsfc_vdpa_err(dev,\n+\t\t\t     \"failed to get VM memory layout\");\n+\t\tgoto error;\n+\t}\n+\n+\tfor (i = 0; i < vhost_mem->nregions; i++) {\n+\t\tmem_reg = &vhost_mem->regions[i];\n+\n+\t\tif (do_map) {\n+\t\t\trc = rte_vfio_container_dma_map(vfio_container_fd,\n+\t\t\t\t\t\tmem_reg->host_user_addr,\n+\t\t\t\t\t\tmem_reg->guest_phys_addr,\n+\t\t\t\t\t\tmem_reg->size);\n+\t\t\tif (rc < 0) {\n+\t\t\t\tsfc_vdpa_err(dev,\n+\t\t\t\t\t     \"DMA map failed : %s\",\n+\t\t\t\t\t     rte_strerror(rte_errno));\n+\t\t\t\tgoto failed_vfio_dma_map;\n+\t\t\t}\n+\t\t} else {\n+\t\t\trc = rte_vfio_container_dma_unmap(vfio_container_fd,\n+\t\t\t\t\t\tmem_reg->host_user_addr,\n+\t\t\t\t\t\tmem_reg->guest_phys_addr,\n+\t\t\t\t\t\tmem_reg->size);\n+\t\t\tif (rc < 0) {\n+\t\t\t\tsfc_vdpa_err(dev,\n+\t\t\t\t\t     \"DMA unmap failed : %s\",\n+\t\t\t\t\t     rte_strerror(rte_errno));\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tfree(vhost_mem);\n+\n+\treturn 0;\n+\n+failed_vfio_dma_map:\n+\tfor (j = 0; j < i; j++) {\n+\t\tmem_reg = &vhost_mem->regions[j];\n+\t\trc = rte_vfio_container_dma_unmap(vfio_container_fd,\n+\t\t\t\t\t\t  mem_reg->host_user_addr,\n+\t\t\t\t\t\t  mem_reg->guest_phys_addr,\n+\t\t\t\t\t\t  mem_reg->size);\n+\t}\n+\n+error:\n+\tif (vhost_mem)\n+\t\tfree(vhost_mem);\n+\n+\treturn rc;\n+}\n+\n static int\n sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,\n \t\t      const efx_bar_region_t *mem_ebrp)\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c\nindex 5253adb..4228044 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_ops.c\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c\n@@ -3,10 +3,13 @@\n  * Copyright(c) 2020-2021 Xilinx, Inc.\n  */\n \n+#include <sys/ioctl.h>\n+\n #include <rte_errno.h>\n #include <rte_malloc.h>\n #include <rte_vdpa.h>\n #include <rte_vdpa_dev.h>\n+#include <rte_vfio.h>\n #include <rte_vhost.h>\n \n #include \"efx.h\"\n@@ -28,24 +31,12 @@\n #define SFC_VDPA_DEFAULT_FEATURES \\\n \t\t(1ULL << VHOST_USER_F_PROTOCOL_FEATURES)\n \n-static int\n-sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)\n-{\n-\tstruct sfc_vdpa_ops_data *ops_data;\n-\tvoid *dev;\n-\n-\tops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);\n-\tif (ops_data == NULL)\n-\t\treturn -1;\n-\n-\tdev = ops_data->dev_handle;\n-\t*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;\n+#define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \\\n+\t\t(sizeof(struct vfio_irq_set) + \\\n+\t\tsizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))\n \n-\tsfc_vdpa_info(dev, \"vDPA ops get_queue_num :: supported queue num : %d\",\n-\t\t      *queue_num);\n-\n-\treturn 0;\n-}\n+/* It will be used for target VF when calling function is not PF */\n+#define SFC_VDPA_VF_NULL\t\t0xFFFF\n \n static int\n sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)\n@@ -74,6 +65,438 @@\n \treturn 0;\n }\n \n+static uint64_t\n+hva_to_gpa(int vid, uint64_t hva)\n+{\n+\tstruct rte_vhost_memory *vhost_mem = NULL;\n+\tstruct rte_vhost_mem_region *mem_reg = NULL;\n+\tuint32_t i;\n+\tuint64_t gpa = 0;\n+\n+\tif (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)\n+\t\tgoto error;\n+\n+\tfor (i = 0; i < vhost_mem->nregions; i++) {\n+\t\tmem_reg = &vhost_mem->regions[i];\n+\n+\t\tif (hva >= mem_reg->host_user_addr &&\n+\t\t\t\thva < mem_reg->host_user_addr + mem_reg->size) {\n+\t\t\tgpa = (hva - mem_reg->host_user_addr) +\n+\t\t\t\tmem_reg->guest_phys_addr;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+error:\n+\tif (vhost_mem)\n+\t\tfree(vhost_mem);\n+\treturn gpa;\n+}\n+\n+static int\n+sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint rc;\n+\tint *irq_fd_ptr;\n+\tint vfio_dev_fd;\n+\tuint32_t i, num_vring;\n+\tstruct rte_vhost_vring vring;\n+\tstruct vfio_irq_set *irq_set;\n+\tstruct rte_pci_device *pci_dev;\n+\tchar irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];\n+\tvoid *dev;\n+\n+\tnum_vring = rte_vhost_get_vring_num(ops_data->vid);\n+\tdev = ops_data->dev_handle;\n+\tvfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;\n+\tpci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;\n+\n+\tirq_set = (struct vfio_irq_set *)irq_set_buf;\n+\tirq_set->argsz = sizeof(irq_set_buf);\n+\tirq_set->count = num_vring + 1;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |\n+\t\t\t VFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = 0;\n+\tirq_fd_ptr = (int *)&irq_set->data;\n+\tirq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = pci_dev->intr_handle.fd;\n+\n+\tfor (i = 0; i < num_vring; i++) {\n+\t\trte_vhost_get_vhost_vring(ops_data->vid, i, &vring);\n+\t\tirq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;\n+\t}\n+\n+\trc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (rc) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"error enabling MSI-X interrupts: %s\",\n+\t\t\t     strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint rc;\n+\tint vfio_dev_fd;\n+\tstruct vfio_irq_set *irq_set;\n+\tchar irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];\n+\tvoid *dev;\n+\n+\tdev = ops_data->dev_handle;\n+\tvfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;\n+\n+\tirq_set = (struct vfio_irq_set *)irq_set_buf;\n+\tirq_set->argsz = sizeof(irq_set_buf);\n+\tirq_set->count = 0;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = 0;\n+\n+\trc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (rc) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"error disabling MSI-X interrupts: %s\",\n+\t\t\t     strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,\n+\t\t\tint vq_num, struct sfc_vdpa_vring_info *vring)\n+{\n+\tint rc;\n+\tuint64_t gpa;\n+\tstruct rte_vhost_vring vq;\n+\n+\trc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);\n+\tif (rc < 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"get vhost vring failed: %s\", rte_strerror(rc));\n+\t\treturn rc;\n+\t}\n+\n+\tgpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);\n+\tif (gpa == 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"fail to get GPA for descriptor ring.\");\n+\t\tgoto fail_vring_map;\n+\t}\n+\tvring->desc = gpa;\n+\n+\tgpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);\n+\tif (gpa == 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"fail to get GPA for available ring.\");\n+\t\tgoto fail_vring_map;\n+\t}\n+\tvring->avail = gpa;\n+\n+\tgpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);\n+\tif (gpa == 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"fail to get GPA for used ring.\");\n+\t\tgoto fail_vring_map;\n+\t}\n+\tvring->used = gpa;\n+\n+\tvring->size = vq.size;\n+\n+\trc = rte_vhost_get_vring_base(ops_data->vid, vq_num,\n+\t\t\t\t      &vring->last_avail_idx,\n+\t\t\t\t      &vring->last_used_idx);\n+\n+\treturn rc;\n+\n+fail_vring_map:\n+\treturn -1;\n+}\n+\n+static int\n+sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)\n+{\n+\tint rc;\n+\tefx_virtio_vq_t *vq;\n+\tstruct sfc_vdpa_vring_info vring;\n+\tefx_virtio_vq_cfg_t vq_cfg;\n+\tefx_virtio_vq_dyncfg_t vq_dyncfg;\n+\n+\tvq = ops_data->vq_cxt[vq_num].vq;\n+\tif (vq == NULL)\n+\t\treturn -1;\n+\n+\trc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);\n+\tif (rc < 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"get vring info failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_vring_info;\n+\t}\n+\n+\tvq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;\n+\n+\t/* even virtqueue for RX and odd for TX */\n+\tif (vq_num % 2) {\n+\t\tvq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;\n+\t\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t\t      \"configure virtqueue # %d (TXQ)\", vq_num);\n+\t} else {\n+\t\tvq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;\n+\t\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t\t      \"configure virtqueue # %d (RXQ)\", vq_num);\n+\t}\n+\n+\tvq_cfg.evvc_vq_num = vq_num;\n+\tvq_cfg.evvc_desc_tbl_addr   = vring.desc;\n+\tvq_cfg.evvc_avail_ring_addr = vring.avail;\n+\tvq_cfg.evvc_used_ring_addr  = vring.used;\n+\tvq_cfg.evvc_vq_size = vring.size;\n+\n+\tvq_dyncfg.evvd_vq_pidx = vring.last_used_idx;\n+\tvq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;\n+\n+\t/* MSI-X vector is function-relative */\n+\tvq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;\n+\tif (ops_data->vdpa_context == SFC_VDPA_AS_VF)\n+\t\tvq_cfg.evvc_pas_id = 0;\n+\tvq_cfg.evcc_features = ops_data->dev_features &\n+\t\t\t       ops_data->req_features;\n+\n+\t/* Start virtqueue */\n+\trc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);\n+\tif (rc != 0) {\n+\t\t/* destroy virtqueue */\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"virtqueue start failed: %s\",\n+\t\t\t     rte_strerror(rc));\n+\t\tefx_virtio_qdestroy(vq);\n+\t\tgoto fail_virtio_qstart;\n+\t}\n+\n+\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t      \"virtqueue started successfully for vq_num %d\", vq_num);\n+\n+\tops_data->vq_cxt[vq_num].enable = B_TRUE;\n+\n+\treturn rc;\n+\n+fail_virtio_qstart:\n+fail_vring_info:\n+\treturn rc;\n+}\n+\n+static int\n+sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)\n+{\n+\tint rc;\n+\tefx_virtio_vq_dyncfg_t vq_idx;\n+\tefx_virtio_vq_t *vq;\n+\n+\tif (ops_data->vq_cxt[vq_num].enable != B_TRUE)\n+\t\treturn -1;\n+\n+\tvq = ops_data->vq_cxt[vq_num].vq;\n+\tif (vq == NULL)\n+\t\treturn -1;\n+\n+\t/* stop the vq */\n+\trc = efx_virtio_qstop(vq, &vq_idx);\n+\tif (rc == 0) {\n+\t\tops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;\n+\t\tops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;\n+\t}\n+\tops_data->vq_cxt[vq_num].enable = B_FALSE;\n+\n+\treturn rc;\n+}\n+\n+static int\n+sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint rc, i;\n+\tint nr_vring;\n+\tint max_vring_cnt;\n+\tefx_virtio_vq_t *vq;\n+\tefx_nic_t *nic;\n+\tvoid *dev;\n+\n+\tdev = ops_data->dev_handle;\n+\tnic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;\n+\n+\tSFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);\n+\n+\tops_data->state = SFC_VDPA_STATE_CONFIGURING;\n+\n+\tnr_vring = rte_vhost_get_vring_num(ops_data->vid);\n+\tmax_vring_cnt =\n+\t\t(sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);\n+\n+\t/* number of vring should not be more than supported max vq count */\n+\tif (nr_vring > max_vring_cnt) {\n+\t\tsfc_vdpa_err(dev,\n+\t\t\t     \"nr_vring (%d) is > max vring count (%d)\",\n+\t\t\t     nr_vring, max_vring_cnt);\n+\t\tgoto fail_vring_num;\n+\t}\n+\n+\trc = sfc_vdpa_dma_map(ops_data, true);\n+\tif (rc) {\n+\t\tsfc_vdpa_err(dev,\n+\t\t\t     \"DMA map failed: %s\", rte_strerror(rc));\n+\t\tgoto fail_dma_map;\n+\t}\n+\n+\tfor (i = 0; i < nr_vring; i++) {\n+\t\trc = efx_virtio_qcreate(nic, &vq);\n+\t\tif ((rc != 0) || (vq == NULL)) {\n+\t\t\tsfc_vdpa_err(dev,\n+\t\t\t\t     \"virtqueue create failed: %s\",\n+\t\t\t\t     rte_strerror(rc));\n+\t\t\tgoto fail_vq_create;\n+\t\t}\n+\n+\t\t/* store created virtqueue context */\n+\t\tops_data->vq_cxt[i].vq = vq;\n+\t}\n+\n+\tops_data->vq_count = i;\n+\n+\tops_data->state = SFC_VDPA_STATE_CONFIGURED;\n+\n+\treturn 0;\n+\n+fail_vq_create:\n+\tsfc_vdpa_dma_map(ops_data, false);\n+\n+fail_dma_map:\n+fail_vring_num:\n+\tops_data->state = SFC_VDPA_STATE_INITIALIZED;\n+\n+\treturn -1;\n+}\n+\n+static void\n+sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint i;\n+\n+\tif (ops_data->state != SFC_VDPA_STATE_CONFIGURED)\n+\t\treturn;\n+\n+\tops_data->state = SFC_VDPA_STATE_CLOSING;\n+\n+\tfor (i = 0; i < ops_data->vq_count; i++) {\n+\t\tif (ops_data->vq_cxt[i].vq == NULL)\n+\t\t\tcontinue;\n+\n+\t\tefx_virtio_qdestroy(ops_data->vq_cxt[i].vq);\n+\t}\n+\n+\tsfc_vdpa_dma_map(ops_data, false);\n+\n+\tops_data->state = SFC_VDPA_STATE_INITIALIZED;\n+}\n+\n+static void\n+sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint i;\n+\tint rc;\n+\n+\tif (ops_data->state != SFC_VDPA_STATE_STARTED)\n+\t\treturn;\n+\n+\tops_data->state = SFC_VDPA_STATE_STOPPING;\n+\n+\tfor (i = 0; i < ops_data->vq_count; i++) {\n+\t\trc = sfc_vdpa_virtq_stop(ops_data, i);\n+\t\tif (rc != 0)\n+\t\t\tcontinue;\n+\t}\n+\n+\tsfc_vdpa_disable_vfio_intr(ops_data);\n+\n+\tops_data->state = SFC_VDPA_STATE_CONFIGURED;\n+}\n+\n+static int\n+sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)\n+{\n+\tint i, j;\n+\tint rc;\n+\n+\tSFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"entry\");\n+\n+\tops_data->state = SFC_VDPA_STATE_STARTING;\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"enable interrupts\");\n+\trc = sfc_vdpa_enable_vfio_intr(ops_data);\n+\tif (rc < 0) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"vfio intr allocation failed: %s\",\n+\t\t\t     rte_strerror(rc));\n+\t\tgoto fail_enable_vfio_intr;\n+\t}\n+\n+\trte_vhost_get_negotiated_features(ops_data->vid,\n+\t\t\t\t\t  &ops_data->req_features);\n+\n+\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t      \"negotiated feature : 0x%\" PRIx64,\n+\t\t      ops_data->req_features);\n+\n+\tfor (i = 0; i < ops_data->vq_count; i++) {\n+\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t  \"starting vq# %d\", i);\n+\t\trc = sfc_vdpa_virtq_start(ops_data, i);\n+\t\tif (rc != 0)\n+\t\t\tgoto fail_vq_start;\n+\t}\n+\n+\tops_data->state = SFC_VDPA_STATE_STARTED;\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"done\");\n+\n+\treturn 0;\n+\n+fail_vq_start:\n+\t/* stop already started virtqueues */\n+\tfor (j = 0; j < i; j++)\n+\t\tsfc_vdpa_virtq_stop(ops_data, j);\n+\tsfc_vdpa_disable_vfio_intr(ops_data);\n+\n+fail_enable_vfio_intr:\n+\tops_data->state = SFC_VDPA_STATE_CONFIGURED;\n+\n+\treturn rc;\n+}\n+\n+static int\n+sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)\n+{\n+\tstruct sfc_vdpa_ops_data *ops_data;\n+\tvoid *dev;\n+\n+\tops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);\n+\tif (ops_data == NULL)\n+\t\treturn -1;\n+\n+\tdev = ops_data->dev_handle;\n+\t*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;\n+\n+\tsfc_vdpa_info(dev, \"vDPA ops get_queue_num :: supported queue num : %d\",\n+\t\t      *queue_num);\n+\n+\treturn 0;\n+}\n+\n static int\n sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)\n {\n@@ -114,7 +537,53 @@\n static int\n sfc_vdpa_dev_config(int vid)\n {\n-\tRTE_SET_USED(vid);\n+\tstruct rte_vdpa_device *vdpa_dev;\n+\tint rc;\n+\tstruct sfc_vdpa_ops_data *ops_data;\n+\n+\tvdpa_dev = rte_vhost_get_vdpa_device(vid);\n+\n+\tops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);\n+\tif (ops_data == NULL) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"invalid vDPA device : %p, vid : %d\",\n+\t\t\t     vdpa_dev, vid);\n+\t\treturn -1;\n+\t}\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"entry\");\n+\n+\tops_data->vid = vid;\n+\n+\tsfc_vdpa_adapter_lock(ops_data->dev_handle);\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"configuring\");\n+\trc = sfc_vdpa_configure(ops_data);\n+\tif (rc != 0)\n+\t\tgoto fail_vdpa_config;\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"starting\");\n+\trc = sfc_vdpa_start(ops_data);\n+\tif (rc != 0)\n+\t\tgoto fail_vdpa_start;\n+\n+\tsfc_vdpa_adapter_unlock(ops_data->dev_handle);\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"vhost notifier ctrl\");\n+\tif (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)\n+\t\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t\t      \"vDPA (%s): software relay for notify is used.\",\n+\t\t\t      vdpa_dev->device->name);\n+\n+\tsfc_vdpa_log_init(ops_data->dev_handle, \"done\");\n+\n+\treturn 0;\n+\n+fail_vdpa_start:\n+\tsfc_vdpa_close(ops_data);\n+\n+fail_vdpa_config:\n+\tsfc_vdpa_adapter_unlock(ops_data->dev_handle);\n \n \treturn -1;\n }\n@@ -122,9 +591,27 @@\n static int\n sfc_vdpa_dev_close(int vid)\n {\n-\tRTE_SET_USED(vid);\n+\tstruct rte_vdpa_device *vdpa_dev;\n+\tstruct sfc_vdpa_ops_data *ops_data;\n \n-\treturn -1;\n+\tvdpa_dev = rte_vhost_get_vdpa_device(vid);\n+\n+\tops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);\n+\tif (ops_data == NULL) {\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"invalid vDPA device : %p, vid : %d\",\n+\t\t\t     vdpa_dev, vid);\n+\t\treturn -1;\n+\t}\n+\n+\tsfc_vdpa_adapter_lock(ops_data->dev_handle);\n+\n+\tsfc_vdpa_stop(ops_data);\n+\tsfc_vdpa_close(ops_data);\n+\n+\tsfc_vdpa_adapter_unlock(ops_data->dev_handle);\n+\n+\treturn 0;\n }\n \n static int\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h\nindex 21cbb73..8d553c5 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_ops.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h\n@@ -18,17 +18,45 @@ enum sfc_vdpa_context {\n enum sfc_vdpa_state {\n \tSFC_VDPA_STATE_UNINITIALIZED = 0,\n \tSFC_VDPA_STATE_INITIALIZED,\n+\tSFC_VDPA_STATE_CONFIGURING,\n+\tSFC_VDPA_STATE_CONFIGURED,\n+\tSFC_VDPA_STATE_CLOSING,\n+\tSFC_VDPA_STATE_CLOSED,\n+\tSFC_VDPA_STATE_STARTING,\n+\tSFC_VDPA_STATE_STARTED,\n+\tSFC_VDPA_STATE_STOPPING,\n \tSFC_VDPA_STATE_NSTATES\n };\n \n+struct sfc_vdpa_vring_info {\n+\tuint64_t\tdesc;\n+\tuint64_t\tavail;\n+\tuint64_t\tused;\n+\tuint64_t\tsize;\n+\tuint16_t\tlast_avail_idx;\n+\tuint16_t\tlast_used_idx;\n+};\n+\n+typedef struct sfc_vdpa_vq_context_s {\n+\tuint8_t\t\t\t\tenable;\n+\tuint32_t\t\t\tpidx;\n+\tuint32_t\t\t\tcidx;\n+\tefx_virtio_vq_t\t\t\t*vq;\n+} sfc_vdpa_vq_context_t;\n+\n struct sfc_vdpa_ops_data {\n \tvoid\t\t\t\t*dev_handle;\n+\tint\t\t\t\tvid;\n \tstruct rte_vdpa_device\t\t*vdpa_dev;\n \tenum sfc_vdpa_context\t\tvdpa_context;\n \tenum sfc_vdpa_state\t\tstate;\n \n \tuint64_t\t\t\tdev_features;\n \tuint64_t\t\t\tdrv_features;\n+\tuint64_t\t\t\treq_features;\n+\n+\tuint16_t\t\t\tvq_count;\n+\tstruct sfc_vdpa_vq_context_s\tvq_cxt[SFC_VDPA_MAX_QUEUE_PAIRS * 2];\n };\n \n struct sfc_vdpa_ops_data *\n",
    "prefixes": [
        "06/10"
    ]
}