get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113962/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113962,
    "url": "http://patches.dpdk.org/api/patches/113962/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220714075202.31826-5-asaini@xilinx.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220714075202.31826-5-asaini@xilinx.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220714075202.31826-5-asaini@xilinx.com",
    "date": "2022-07-14T07:52:02",
    "name": "[5/5] vdpa/sfc: Add support for SW assisted live migration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "82dc189ccda2a47ff01144343b2a513969d41cdd",
    "submitter": {
        "id": 2468,
        "url": "http://patches.dpdk.org/api/people/2468/?format=api",
        "name": "",
        "email": "abhimanyu.saini@xilinx.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220714075202.31826-5-asaini@xilinx.com/mbox/",
    "series": [
        {
            "id": 23993,
            "url": "http://patches.dpdk.org/api/series/23993/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23993",
            "date": "2022-07-14T07:51:58",
            "name": "[1/5] common/sfc_efx/base: remove VQ index check during VQ start",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/23993/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113962/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/113962/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CBD3DA00C5;\n\tThu, 14 Jul 2022 09:52:48 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D8CE042B81;\n\tThu, 14 Jul 2022 09:52:30 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2041.outbound.protection.outlook.com [40.107.93.41])\n by mails.dpdk.org (Postfix) with ESMTP id E7E7E42B90\n for <dev@dpdk.org>; Thu, 14 Jul 2022 09:52:28 +0200 (CEST)",
            "from BN0PR04CA0172.namprd04.prod.outlook.com (2603:10b6:408:eb::27)\n by BL0PR12MB4708.namprd12.prod.outlook.com (2603:10b6:208:8d::24)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5417.20; Thu, 14 Jul\n 2022 07:52:26 +0000",
            "from BN8NAM11FT048.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:eb:cafe::33) by BN0PR04CA0172.outlook.office365.com\n (2603:10b6:408:eb::27) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5417.17 via Frontend\n Transport; Thu, 14 Jul 2022 07:52:26 +0000",
            "from\n mailrelay000000.14r1f435wfvunndds3vy4cdalc.xx.internal.cloudapp.net\n (20.83.241.18) by BN8NAM11FT048.mail.protection.outlook.com (10.13.177.117)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5438.12 via Frontend\n Transport; Thu, 14 Jul 2022 07:52:26 +0000",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12lp2049.outbound.protection.outlook.com [104.47.66.49])\n by mailrelay000000.14r1f435wfvunndds3vy4cdalc.xx.internal.cloudapp.net\n (Postfix) with ESMTPS id 5C1AB41F5D;\n Thu, 14 Jul 2022 07:52:25 +0000 (UTC)",
            "from DM6PR02CA0045.namprd02.prod.outlook.com (2603:10b6:5:177::22)\n by BYAPR02MB4182.namprd02.prod.outlook.com (2603:10b6:a02:fb::16) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5438.12; Thu, 14 Jul\n 2022 07:52:21 +0000",
            "from DM3NAM02FT039.eop-nam02.prod.protection.outlook.com\n (2603:10b6:5:177:cafe::a) by DM6PR02CA0045.outlook.office365.com\n (2603:10b6:5:177::22) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5417.25 via Frontend\n Transport; Thu, 14 Jul 2022 07:52:21 +0000",
            "from xsj-pvapexch01.xlnx.xilinx.com (149.199.62.198) by\n DM3NAM02FT039.mail.protection.outlook.com (10.13.5.22) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.20.5438.12 via Frontend Transport; Thu, 14 Jul 2022 07:52:20 +0000",
            "from xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) by\n xsj-pvapexch01.xlnx.xilinx.com (172.19.86.40) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.1.2176.14; Thu, 14 Jul 2022 00:52:19 -0700",
            "from smtp.xilinx.com (172.19.127.96) by\n xsj-pvapexch02.xlnx.xilinx.com (172.19.86.41) with Microsoft SMTP Server id\n 15.1.2176.14 via Frontend Transport; Thu, 14 Jul 2022 00:52:19 -0700",
            "from [10.170.66.118] (port=50692 helo=xndengvm004118.xilinx.com)\n by smtp.xilinx.com with esmtp (Exim 4.90)\n (envelope-from <abhimanyu.saini@xilinx.com>)\n id 1oBtdz-0002Lo-73; Thu, 14 Jul 2022 00:52:19 -0700"
        ],
        "ARC-Seal": [
            "i=2; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=pass;\n b=Bu1J6X1fqav5Cc6CQtXy5wbEtJPwXsO5K3sdFG6ePAEYvjdFWqwlNjGHQosWXsTDqBpY5fJxjs2v79RqtuTnhFK0stiBMf75RC31Mz2AUr+AH+35Gx5Rf4IdEfD4LCFJclcR+/IyKmlPlbEuNoPqehWfjeZHkFJFINHMB51Q39KuDjgQUWypXXkSMRgPu5xXs2oBTjfoJxdD0iF1o2Tbzk/yuUqR9HINT9vAIU3BKevVujNt1hdRnpvhStgfBIDpIBYzlSIrmS1e3cHG+CVrS/tZm/JgDe0NEFgmEDDyko5YQ69QuCMnbi7lvVqodBv2/S1E60tDe8UEPM2Ug0Q8QA==",
            "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=MZP4G5yN68krydmPTglYShvpsdOqOVMFS2/VQERufNfvY1yCnp4C7GzDE0fso+hPq9pd4ROiDD9eqFxUhSsqkEh1edwubgGV9ZVKFPbBSSzQmadRpCXnqZtzYh9JjRKXZyrZdLy7XpOc0z5OZ2qq5bjj41HVO5Ih3XixVbKV7FTo2g6WTIkPcxM/jbzykjwBfx2MQ7uR/xHExJgF2ftUMKSOrmvF9OZBtKyVJHcigVCE3XPXeC4qCUWWTCqBYD28hmGnPO+RZ32eYqQTmJUEhp+HxvNEmCRljwfRPJ644Eboom9q/S3rrystVklmc+8xUPRHhPal3Pj0bPwmmga/fg=="
        ],
        "ARC-Message-Signature": [
            "i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=vv5odaRCe1AIxidDAkn84wmaW6+YpJvXl6So1duxD2Y=;\n b=XKAjV3g9fasIOWiQUcJOppnnEAylgNYcYAqXdSoPoFnSgSX+mkXksmYsnyw7DH64KQ7r5YPYW5JGtRwlVx3EriS1vUkc0/Vj5mpYeCcvrqNrTQkvsG0a7Q4bth+e13XZ51HJ9Ej1ow5ZyLbBaneUStv53jO/HuciS4MMj823A+yWq+R+vWUYxki4CiKQWXoDec+ZGec5SYjC8/NH3XJyyogjVADvO8YE3avy0Ii93yss9tc1p9Wmp6907hP4dedK0ukXVOeNs/2UWLdJ+w8NMh0k/ITBc/zkpePN2Q/3BMRzn7f+ARz8KGNqsRjUDkU+5Xa6vtOM8Ir2x00ipA7qZw==",
            "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=vv5odaRCe1AIxidDAkn84wmaW6+YpJvXl6So1duxD2Y=;\n b=gW7HkkICTeQy+NBlJ/o2Xf8ZW0w2SAFsjLW5X/VFGeJdvoXZWUl3gxLzn2RsPkCFffGMskJhmjqm+Ktm1ZnXbvW5MKrcHzoFTj3yMwHWE55R/E0G8lnU8VY9x/SvFSckfjr3yTMnZnrR1NVMglbkCTEpIzGOAcb+WkqM2AfeD7auUXFQLoynv9eQG9lkKB9QyUX59nP4tvfiZH9G1Ju85/N/FCt0HKLN2ypEuAsagRKVqiphBlZdkOvfc76JG91dD2CL9hLBoznc5JHpOnEQd06iOa7yVUwHGBn+qux909gDABRC43mkhmLSGpW13w8O8YwWqVJXvZ0FlVpeY40ycg=="
        ],
        "ARC-Authentication-Results": [
            "i=2; mx.microsoft.com 1; spf=pass (sender ip is\n 20.83.241.18) smtp.rcpttodomain=dpdk.org smtp.mailfrom=amd.com; dmarc=fail\n (p=none sp=none pct=100) action=none header.from=xilinx.com; dkim=none\n (message not signed); arc=pass (0 oda=1 ltdi=1\n spf=[1,1,smtp.mailfrom=xilinx.com] dmarc=[1,1,header.from=xilinx.com])",
            "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 149.199.62.198) smtp.rcpttodomain=dpdk.org smtp.mailfrom=xilinx.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=xilinx.com;\n dkim=none (message not signed); arc=none"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=amdcloud.onmicrosoft.com; s=selector1-amdcloud-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=vv5odaRCe1AIxidDAkn84wmaW6+YpJvXl6So1duxD2Y=;\n b=YU2i109QQN4m5mt60lOU4yXlInPksH1DvTRTShdfYSRddVpmNkc8LXdErahoC49MaPS5bM+yIX2ics0EGWKD3TjJRSHnoppQkjPZXnioXnNLbJBWt5fXFKCoa31w+WfAH4h9lerhhHDs81awUFQo4KMbsCqHGr7Ij1CK8qHRnRzJnwk7u+ct2YWoVb6UQ6IOWvnNAapM9IKTKFMd76kt5DzWb+EtMb6Cm8Yijp8kYqrQ261w5MNUIb2zsjDmV75nY7iyAEzODzU0U8gtPx6GMzwbS3FxE7jrupRZDU9rSOeeQiszof6ULhv0jTsw7HQB0t6J9DGplxcXp2+ulYmKzg==",
        "X-MS-Exchange-Authentication-Results": [
            "spf=pass (sender IP is 20.83.241.18)\n smtp.mailfrom=amd.com; dkim=none (message not signed)\n header.d=none;dmarc=fail action=none header.from=xilinx.com;",
            "spf=pass (sender IP is 149.199.62.198)\n smtp.mailfrom=xilinx.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=xilinx.com;"
        ],
        "Received-SPF": "Pass (protection.outlook.com: domain of amd.com designates\n 20.83.241.18 as permitted sender) receiver=protection.outlook.com;\n client-ip=20.83.241.18;\n helo=mailrelay000000.14r1f435wfvunndds3vy4cdalc.xx.internal.cloudapp.net;\n pr=C",
        "Envelope-to": "dev@dpdk.org, chenbo.xia@intel.com, maxime.coquelin@redhat.com,\n andrew.rybchenko@oktetlabs.ru, absaini@amd.com",
        "From": "<abhimanyu.saini@xilinx.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<chenbo.xia@intel.com>, <maxime.coquelin@redhat.com>,\n <andrew.rybchenko@oktetlabs.ru>, Abhimanyu Saini <absaini@amd.com>",
        "Subject": "[PATCH 5/5] vdpa/sfc: Add support for SW assisted live migration",
        "Date": "Thu, 14 Jul 2022 13:22:02 +0530",
        "Message-ID": "<20220714075202.31826-5-asaini@xilinx.com>",
        "X-Mailer": "git-send-email 2.25.0",
        "In-Reply-To": "<20220714075202.31826-1-asaini@xilinx.com>",
        "References": "<20220708080135.31254-1-asaini@xilinx.com>\n <20220714075202.31826-1-asaini@xilinx.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-EOPAttributedMessage": "1",
        "X-MS-Office365-Filtering-Correlation-Id": "a18e6bd8-f0f5-4618-ca49-08da656dcb58",
        "X-MS-TrafficTypeDiagnostic": "\n BYAPR02MB4182:EE_|BN8NAM11FT048:EE_|BL0PR12MB4708:EE_",
        "X-MS-Exchange-SenderADCheck": "0",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam-Untrusted": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info-Original": "\n EGN6L86yyCBu5uu3SwCcjGXcOXksT7TBopYMaHBEUQ+v/irNgRpu7+nnwxt0IWNgmkaaxoE/Qh/xF+1CzYWpyejjMK9YjhEdlNGZ7LD6z/L7gCohmR3HGhKSBnsQOoXPjCaClsA5tef/ul04NLFbtK3z2NfVBe1FXCI7TtE5ufeLk+AInMfLvKkyvphPxekwbnHe5L/N9uxOjTACp+4cdFmEfsUR328Dk8VV7K/OkHM+ler6qMUiEJ9Kb8BIRw33kqkEl5m3+5vyhqq2a0597x3nl3LtdrbKFxdinTvkFqH2PTpTqIDb5FzbZMitXfwfrphoQ9vw62SNp8dkguwzlnl3J4UaZRV+QuGdmB5wuioKuTHwLZ5Q/XpCng9zFC3l7X+Ml6SdMtnnkCSnSWTCW60ppleN8eP5962MzMTqkE7EhJWLrnkpdDPPAqPjCRfxyLoZX+GBPi3qGMQWCt4h2T77LzoLsHnk8gejSvq2mE7SaCFRqB90aTNDeuVeU6dW9dyyU0ib4LrddNuLpGPxzw90f9YqT5PtYF2pY/WUMo3lVcB5267c2TVBDaBWg1/Hi6S+bbC5Z0myKf5C6rgQHQqLF0EfC0ItaNaxXKpfwkQ/MBzAW5MCijX9ck37ws8g4F+VJ0TiiObWYpD+c9/JouEJur1aoCajBBTY5ZHfPgg3ghlg6fugkaZTLqdZOkkrKY34AQZ0StD+yOvFBu7LgRSSpgrSu+POgrRJ2kXPu9uBm9oBEZXy5A4rWx3d36HzOxmbXz3/ccpYxzopszmo3h/fGzwEu5NiDQuROBGftG9imEIrto/YFn9lJGwyJgNkE+iDSx+Qjbw29bkWTbl2Mf59JK/NAzN5djARsiQkcxU=",
        "X-Forefront-Antispam-Report-Untrusted": "CIP:149.199.62.198; CTRY:US; LANG:en;\n SCL:1; SRV:; IPV:NLI; SFV:NSPM; H:xsj-pvapexch01.xlnx.xilinx.com;\n PTR:unknown-62-198.xilinx.com; CAT:NONE;\n SFS:(13230016)(4636009)(396003)(136003)(376002)(39860400002)(346002)(40470700004)(36840700001)(46966006)(83380400001)(2616005)(47076005)(7636003)(8676002)(426003)(336012)(1076003)(5660300002)(186003)(4326008)(356005)(40480700001)(30864003)(8936002)(2876002)(2906002)(9786002)(7696005)(82310400005)(26005)(70586007)(82740400003)(41300700001)(54906003)(6916009)(70206006)(6666004)(316002)(36756003)(36860700001)(40460700003)(478600001)(102446001);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": [
            "BYAPR02MB4182",
            "BL0PR12MB4708"
        ],
        "X-MS-Exchange-Transport-CrossTenantHeadersStripped": "\n BN8NAM11FT048.eop-nam11.prod.protection.outlook.com",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id-Prvs": "\n 35ca9b71-8d80-4bb5-7847-08da656dc82e",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n SR4EfwD82Q1xbojaDf1XVWiPESlQ1mRVWLjLSoboZx5A4hUJ9+cp+r1Y2d0/mCoE2BcntbEVUnmjYEvneugL/r/n2pXdvdy1JFSQR9wIJqPa/fLqiU6fcoxgD+a9xjFcEfXaDn3OGmvQPQK6wyFErEZXia5GNrUQcRy6147z+AnwKQ/spZQmHROMyiOnOaA5I309NnPfAmleBWVR1xf4qLkwVhjfIw2VENiUUz6s7LSjvEh4LIRmKq7wKHgzj26tBHphfquSqtjanG65YDGWI1fERnqLxeJYdYkceD6C54gqOaLFFhZlPjEXZ77k8gEvT0XAyNVbrhtVZ/Ry7TKVBLGUgaTuckfdIfQTAGGkRP5N/fAaGRxYM1lWlPjZT0+kXHowlpgo3HzkrCPw7KwzID3pZTX8MeKQ0k45WhjTITV6igasiqEt4XLlZjsP2VY0OMe4LkL5mAm3zCPg3SXVOdbmUVgYHTH/zdvH8LdgTZEpWVtlkAFjHrOlT1uflJsKnhehjd/VgXgbNeU7meI5FSrmwebKvpffeCuYNfwf3TF4ja0unNjynS5iRKY+KKV7E0cAC4MrClw317Ux54C0cXFXS3PKej4QYodMflCCGAsLp2vfxswCIf+ZjGinNQrWtcWfIMEojqQoZw3tz9LP/wdBg4GBzwmbYJCKXrB/lGcII7gmpVL8ciZwYTg2zDTLeTjA6k8D46lMfHxRJaXboSchkFQq7YtLcdz+1Lky5Zckj8pu3nVFb+TcgAnvpUgPul5jYbgA54tZehCCnZIF5IOPGyA3DTij4RtVINLXDllLN3wEcz+P9Tp9wvjZVg3UZZCEkHkd9fuSjl8avDjLAfkpR/EN+ZoV98azG3vnds8=",
        "X-Forefront-Antispam-Report": "CIP:20.83.241.18; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM;\n H:mailrelay000000.14r1f435wfvunndds3vy4cdalc.xx.internal.cloudapp.net;\n PTR:InfoDomainNonexistent; CAT:NONE;\n SFS:(13230016)(4636009)(376002)(39860400002)(346002)(136003)(396003)(46966006)(36840700001)(42882007)(8676002)(336012)(2876002)(47076005)(83170400001)(83380400001)(40480700001)(9786002)(426003)(54906003)(4326008)(82740400003)(6916009)(26005)(7696005)(70206006)(41300700001)(81166007)(6666004)(8936002)(2906002)(1076003)(2616005)(5660300002)(82310400005)(30864003)(36756003)(36860700001)(478600001)(186003)(316002)(102446001)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "amd.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "14 Jul 2022 07:52:26.1906 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n a18e6bd8-f0f5-4618-ca49-08da656dcb58",
        "X-MS-Exchange-CrossTenant-Id": "3dd8961f-e488-4e60-8e11-a82d994e183d",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=3dd8961f-e488-4e60-8e11-a82d994e183d; Ip=[20.83.241.18];\n Helo=[mailrelay000000.14r1f435wfvunndds3vy4cdalc.xx.internal.cloudapp.net]",
        "X-MS-Exchange-CrossTenant-AuthAs": "Internal",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n TreatMessagesAsInternal-BN8NAM11FT048.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Abhimanyu Saini <absaini@amd.com>\n\nIn SW assisted live migration, vDPA driver will stop all virtqueues\nand setup up SW vrings to relay the communication between the\nvirtio driver and the vDPA device using an event driven relay thread\n\nThis will allow vDPA driver to help on guest dirty page logging for\nlive migration.\n\nSigned-off-by: Abhimanyu Saini <absaini@amd.com>\n---\n drivers/vdpa/sfc/sfc_vdpa.h     |   1 +\n drivers/vdpa/sfc/sfc_vdpa_ops.c | 336 +++++++++++++++++++++++++++++++++++++---\n drivers/vdpa/sfc/sfc_vdpa_ops.h |  15 +-\n 3 files changed, 329 insertions(+), 23 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/sfc/sfc_vdpa.h b/drivers/vdpa/sfc/sfc_vdpa.h\nindex daeb27d..ae522ca 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa.h\n@@ -18,6 +18,7 @@\n \n #define SFC_VDPA_MAC_ADDR\t\t\t\"mac\"\n #define SFC_VDPA_DEFAULT_MCDI_IOVA\t\t0x200000000000\n+#define SFC_SW_VRING_IOVA\t\t\t0x300000000000\n \n /* Broadcast & Unicast MAC filters are supported */\n #define SFC_MAX_SUPPORTED_FILTERS\t\t3\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c\nindex 426c7ac..daf1db0 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_ops.c\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c\n@@ -4,10 +4,13 @@\n \n #include <pthread.h>\n #include <unistd.h>\n+#include <sys/epoll.h>\n #include <sys/ioctl.h>\n \n+#include <rte_eal_paging.h>\n #include <rte_errno.h>\n #include <rte_malloc.h>\n+#include <rte_memory.h>\n #include <rte_vdpa.h>\n #include <rte_vfio.h>\n #include <rte_vhost.h>\n@@ -33,7 +36,9 @@\n  */\n #define SFC_VDPA_DEFAULT_FEATURES \\\n \t\t((1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \\\n-\t\t (1ULL << VIRTIO_NET_F_MQ))\n+\t\t (1ULL << VIRTIO_NET_F_MQ) | \\\n+\t\t (1ULL << VHOST_F_LOG_ALL) | \\\n+\t\t (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE))\n \n #define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \\\n \t\t(sizeof(struct vfio_irq_set) + \\\n@@ -42,6 +47,142 @@\n /* It will be used for target VF when calling function is not PF */\n #define SFC_VDPA_VF_NULL\t\t0xFFFF\n \n+#define SFC_VDPA_DECODE_FD(data)\t(data.u64 >> 32)\n+#define SFC_VDPA_DECODE_QID(data)\t(data.u32 >> 1)\n+#define SFC_VDPA_DECODE_EV_TYPE(data)\t(data.u32 & 1)\n+\n+/*\n+ * Create q_num number of epoll events for kickfd interrupts\n+ * and q_num/2 events for callfd interrupts. Round up the\n+ * total to (q_num * 2) number of events.\n+ */\n+#define SFC_VDPA_SW_RELAY_EVENT_NUM(q_num)\t(q_num * 2)\n+\n+static inline uint64_t\n+sfc_vdpa_encode_ev_data(int type, uint32_t qid, int fd)\n+{\n+\tSFC_VDPA_ASSERT(fd > UINT32_MAX || qid > UINT32_MAX / 2);\n+\treturn type | (qid << 1) | (uint64_t)fd << 32;\n+}\n+\n+static inline void\n+sfc_vdpa_queue_relay(struct sfc_vdpa_ops_data *ops_data, uint32_t qid)\n+{\n+\trte_vdpa_relay_vring_used(ops_data->vid, qid, &ops_data->sw_vq[qid]);\n+\trte_vhost_vring_call(ops_data->vid, qid);\n+}\n+\n+static void*\n+sfc_vdpa_sw_relay(void *data)\n+{\n+\tuint64_t buf;\n+\tuint32_t qid, q_num;\n+\tstruct epoll_event ev;\n+\tstruct rte_vhost_vring vring;\n+\tint nbytes, i, ret, fd, epfd, nfds = 0;\n+\tstruct epoll_event events[SFC_VDPA_MAX_QUEUE_PAIRS * 2];\n+\tstruct sfc_vdpa_ops_data *ops_data = (struct sfc_vdpa_ops_data *)data;\n+\n+\tq_num = rte_vhost_get_vring_num(ops_data->vid);\n+\tepfd = epoll_create(SFC_VDPA_SW_RELAY_EVENT_NUM(q_num));\n+\tif (epfd < 0) {\n+\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t  \"failed to create epoll instance\");\n+\t\tgoto fail_epoll;\n+\t}\n+\tops_data->epfd = epfd;\n+\n+\tvring.kickfd = -1;\n+\tfor (qid = 0; qid < q_num; qid++) {\n+\t\tev.events = EPOLLIN | EPOLLPRI;\n+\t\tret = rte_vhost_get_vhost_vring(ops_data->vid, qid, &vring);\n+\t\tif (ret != 0) {\n+\t\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t\t  \"rte_vhost_get_vhost_vring error %s\",\n+\t\t\t\t\t  strerror(errno));\n+\t\t\tgoto fail_vring;\n+\t\t}\n+\n+\t\tev.data.u64 = sfc_vdpa_encode_ev_data(0, qid, vring.kickfd);\n+\t\tif (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {\n+\t\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t\t  \"epoll add error: %s\",\n+\t\t\t\t\t  strerror(errno));\n+\t\t\tgoto fail_epoll_add;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Register intr_fd created by vDPA driver in lieu of qemu's callfd\n+\t * to intercept rx queue notification. So that we can monitor rx\n+\t * notifications and issue rte_vdpa_relay_vring_used()\n+\t */\n+\tfor (qid = 0; qid < q_num; qid += 2) {\n+\t\tfd = ops_data->intr_fd[qid];\n+\t\tev.events = EPOLLIN | EPOLLPRI;\n+\t\tev.data.u64 = sfc_vdpa_encode_ev_data(1, qid, fd);\n+\t\tif (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev) < 0) {\n+\t\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t\t  \"epoll add error: %s\",\n+\t\t\t\t\t  strerror(errno));\n+\t\t\tgoto fail_epoll_add;\n+\t\t}\n+\t\tsfc_vdpa_queue_relay(ops_data, qid);\n+\t}\n+\n+\t/*\n+\t * virtio driver in VM was continuously sending queue notifications\n+\t * while were setting up software vrings and hence the HW misses\n+\t * these doorbell notifications. Since, it is safe to send duplicate\n+\t * doorbell, send another doorbell from vDPA driver.\n+\t */\n+\tfor (qid = 0; qid < q_num; qid++)\n+\t\trte_write16(qid, ops_data->vq_cxt[qid].doorbell);\n+\n+\tfor (;;) {\n+\t\tnfds = epoll_wait(epfd, events,\n+\t\t\t\t  SFC_VDPA_SW_RELAY_EVENT_NUM(q_num), -1);\n+\t\tif (nfds < 0) {\n+\t\t\tif (errno == EINTR)\n+\t\t\t\tcontinue;\n+\t\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t\t  \"epoll_wait return fail\\n\");\n+\t\t\tgoto fail_epoll_wait;\n+\t\t}\n+\n+\t\tfor (i = 0; i < nfds; i++) {\n+\t\t\tfd = SFC_VDPA_DECODE_FD(events[i].data);\n+\t\t\t/* Ensure kickfd is not busy before proceeding */\n+\t\t\tfor (;;) {\n+\t\t\t\tnbytes = read(fd, &buf, 8);\n+\t\t\t\tif (nbytes < 0) {\n+\t\t\t\t\tif (errno == EINTR ||\n+\t\t\t\t\t    errno == EWOULDBLOCK ||\n+\t\t\t\t\t    errno == EAGAIN)\n+\t\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tqid = SFC_VDPA_DECODE_QID(events[i].data);\n+\t\t\tif (SFC_VDPA_DECODE_EV_TYPE(events[i].data))\n+\t\t\t\tsfc_vdpa_queue_relay(ops_data, qid);\n+\t\t\telse\n+\t\t\t\trte_write16(qid, ops_data->vq_cxt[qid].doorbell);\n+\t\t}\n+\t}\n+\n+\treturn NULL;\n+\n+fail_epoll:\n+fail_vring:\n+fail_epoll_add:\n+fail_epoll_wait:\n+\tclose(epfd);\n+\tops_data->epfd = -1;\n+\treturn NULL;\n+}\n+\n static int\n sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)\n {\n@@ -99,7 +240,7 @@\n static int\n sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)\n {\n-\tint rc;\n+\tint rc, fd;\n \tint *irq_fd_ptr;\n \tint vfio_dev_fd;\n \tuint32_t i, num_vring;\n@@ -131,6 +272,17 @@\n \t\t\treturn -1;\n \n \t\tirq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;\n+\t\tif (ops_data->sw_fallback_mode && !(i & 1)) {\n+\t\t\tfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n+\t\t\tif (fd < 0) {\n+\t\t\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t\t\t     \"failed to create eventfd\");\n+\t\t\t\tgoto fail_eventfd;\n+\t\t\t}\n+\t\t\tops_data->intr_fd[i] = fd;\n+\t\t\tirq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;\n+\t\t} else\n+\t\t\tops_data->intr_fd[i] = -1;\n \t}\n \n \trc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n@@ -138,16 +290,26 @@\n \t\tsfc_vdpa_err(ops_data->dev_handle,\n \t\t\t     \"error enabling MSI-X interrupts: %s\",\n \t\t\t     strerror(errno));\n-\t\treturn -1;\n+\t\tgoto fail_ioctl;\n \t}\n \n \treturn 0;\n+\n+fail_ioctl:\n+fail_eventfd:\n+\tfor (i = 0; i < num_vring; i++) {\n+\t\tif (ops_data->intr_fd[i] != -1) {\n+\t\t\tclose(ops_data->intr_fd[i]);\n+\t\t\tops_data->intr_fd[i] = -1;\n+\t\t}\n+\t}\n+\treturn -1;\n }\n \n static int\n sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)\n {\n-\tint rc;\n+\tint rc, i;\n \tint vfio_dev_fd;\n \tstruct vfio_irq_set irq_set;\n \tvoid *dev;\n@@ -161,6 +323,12 @@\n \tirq_set.index = VFIO_PCI_MSIX_IRQ_INDEX;\n \tirq_set.start = 0;\n \n+\tfor (i = 0; i < ops_data->vq_count; i++) {\n+\t\tif (ops_data->intr_fd[i] >= 0)\n+\t\t\tclose(ops_data->intr_fd[i]);\n+\t\tops_data->intr_fd[i] = -1;\n+\t}\n+\n \trc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set);\n \tif (rc) {\n \t\tsfc_vdpa_err(ops_data->dev_handle,\n@@ -223,12 +391,15 @@\n static int\n sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)\n {\n-\tint rc;\n+\tint rc, fd;\n+\tuint64_t size;\n \tuint32_t doorbell;\n \tefx_virtio_vq_t *vq;\n+\tvoid *vring_buf, *dev;\n \tstruct sfc_vdpa_vring_info vring;\n \tefx_virtio_vq_cfg_t vq_cfg;\n \tefx_virtio_vq_dyncfg_t vq_dyncfg;\n+\tuint64_t sw_vq_iova = ops_data->sw_vq_iova;\n \n \tvq = ops_data->vq_cxt[vq_num].vq;\n \tif (vq == NULL)\n@@ -241,6 +412,33 @@\n \t\tgoto fail_vring_info;\n \t}\n \n+\tif (ops_data->sw_fallback_mode) {\n+\t\tsize = vring_size(vring.size, rte_mem_page_size());\n+\t\tsize = RTE_ALIGN_CEIL(size, rte_mem_page_size());\n+\t\tvring_buf = rte_zmalloc(\"vdpa\", size, rte_mem_page_size());\n+\t\tvring_init(&ops_data->sw_vq[vq_num], vring.size, vring_buf,\n+\t\t\t   rte_mem_page_size());\n+\n+\t\tdev = ops_data->dev_handle;\n+\t\tfd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;\n+\t\trc = rte_vfio_container_dma_map(fd,\n+\t\t\t\t\t\t(uint64_t)(uintptr_t)vring_buf,\n+\t\t\t\t\t\tsw_vq_iova, size);\n+\n+\t\t/* Direct I/O for Tx queue, relay for Rx queue */\n+\t\tif (!(vq_num & 1))\n+\t\t\tvring.used = sw_vq_iova +\n+\t\t\t\t(char *)ops_data->sw_vq[vq_num].used -\n+\t\t\t\t(char *)ops_data->sw_vq[vq_num].desc;\n+\n+\t\tops_data->sw_vq[vq_num].used->idx = vring.last_used_idx;\n+\t\tops_data->sw_vq[vq_num].avail->idx = vring.last_avail_idx;\n+\n+\t\tops_data->vq_cxt[vq_num].sw_vq_iova = sw_vq_iova;\n+\t\tops_data->vq_cxt[vq_num].sw_vq_size = size;\n+\t\tops_data->sw_vq_iova += size;\n+\t}\n+\n \tvq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;\n \n \t/* even virtqueue for RX and odd for TX */\n@@ -309,9 +507,12 @@\n static int\n sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)\n {\n-\tint rc;\n+\tint rc, fd;\n+\tvoid *dev, *buf;\n+\tuint64_t size, len, iova;\n \tefx_virtio_vq_dyncfg_t vq_idx;\n \tefx_virtio_vq_t *vq;\n+\tstruct rte_vhost_vring vring;\n \n \tif (ops_data->vq_cxt[vq_num].enable != B_TRUE)\n \t\treturn -1;\n@@ -319,13 +520,34 @@\n \tvq = ops_data->vq_cxt[vq_num].vq;\n \tif (vq == NULL)\n \t\treturn -1;\n+\tif (ops_data->sw_fallback_mode) {\n+\t\tdev = ops_data->dev_handle;\n+\t\tfd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;\n+\t\t/* synchronize remaining new used entries if any */\n+\t\tif (!(vq_num & 1))\n+\t\t\tsfc_vdpa_queue_relay(ops_data, vq_num);\n+\n+\t\trte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vring);\n+\t\tlen = SFC_VDPA_USED_RING_LEN(vring.size);\n+\t\trte_vhost_log_used_vring(ops_data->vid, vq_num, 0, len);\n+\n+\t\tbuf = ops_data->sw_vq[vq_num].desc;\n+\t\tsize = ops_data->vq_cxt[vq_num].sw_vq_size;\n+\t\tiova = ops_data->vq_cxt[vq_num].sw_vq_iova;\n+\t\trte_vfio_container_dma_unmap(fd, (uint64_t)(uintptr_t)buf,\n+\t\t\t\tiova, size);\n+\t}\n \n \t/* stop the vq */\n \trc = efx_virtio_qstop(vq, &vq_idx);\n \tif (rc == 0) {\n-\t\tops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;\n-\t\tops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;\n+\t\tif (ops_data->sw_fallback_mode)\n+\t\t\tvq_idx.evvd_vq_avail_idx = vq_idx.evvd_vq_used_idx;\n+\t\trte_vhost_set_vring_base(ops_data->vid, vq_num,\n+\t\t\t\t\t vq_idx.evvd_vq_avail_idx,\n+\t\t\t\t\t vq_idx.evvd_vq_used_idx);\n \t}\n+\n \tops_data->vq_cxt[vq_num].enable = B_FALSE;\n \n \treturn rc;\n@@ -450,7 +672,11 @@\n \n \tSFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);\n \n-\tsfc_vdpa_log_init(ops_data->dev_handle, \"entry\");\n+\tif (ops_data->sw_fallback_mode) {\n+\t\tsfc_vdpa_log_init(ops_data->dev_handle,\n+\t\t\t\t  \"Trying to start VDPA with SW I/O relay\");\n+\t\tops_data->sw_vq_iova = SFC_SW_VRING_IOVA;\n+\t}\n \n \tops_data->state = SFC_VDPA_STATE_STARTING;\n \n@@ -675,6 +901,7 @@\n sfc_vdpa_dev_close(int vid)\n {\n \tint ret;\n+\tvoid *status;\n \tstruct rte_vdpa_device *vdpa_dev;\n \tstruct sfc_vdpa_ops_data *ops_data;\n \n@@ -707,7 +934,23 @@\n \t}\n \tops_data->is_notify_thread_started = false;\n \n+\tif (ops_data->sw_fallback_mode) {\n+\t\tret = pthread_cancel(ops_data->sw_relay_thread_id);\n+\t\tif (ret != 0)\n+\t\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t\t     \"failed to cancel LM relay thread: %s\",\n+\t\t\t\t     rte_strerror(ret));\n+\n+\t\tret = pthread_join(ops_data->sw_relay_thread_id, &status);\n+\t\tif (ret != 0)\n+\t\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t\t     \"failed to join LM relay thread: %s\",\n+\t\t\t\t     rte_strerror(ret));\n+\t}\n+\n \tsfc_vdpa_stop(ops_data);\n+\tops_data->sw_fallback_mode = false;\n+\n \tsfc_vdpa_close(ops_data);\n \n \tsfc_vdpa_adapter_unlock(ops_data->dev_handle);\n@@ -774,9 +1017,49 @@\n static int\n sfc_vdpa_set_features(int vid)\n {\n-\tRTE_SET_USED(vid);\n+\tint ret;\n+\tuint64_t features = 0;\n+\tstruct rte_vdpa_device *vdpa_dev;\n+\tstruct sfc_vdpa_ops_data *ops_data;\n \n-\treturn -1;\n+\tvdpa_dev = rte_vhost_get_vdpa_device(vid);\n+\tops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);\n+\tif (ops_data == NULL)\n+\t\treturn -1;\n+\n+\trte_vhost_get_negotiated_features(vid, &features);\n+\n+\tif (!RTE_VHOST_NEED_LOG(features))\n+\t\treturn -1;\n+\n+\tsfc_vdpa_info(ops_data->dev_handle, \"live-migration triggered\");\n+\n+\tsfc_vdpa_adapter_lock(ops_data->dev_handle);\n+\n+\t/* Stop HW Offload and unset host notifier */\n+\tsfc_vdpa_stop(ops_data);\n+\tif (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false) != 0)\n+\t\tsfc_vdpa_info(ops_data->dev_handle,\n+\t\t\t      \"vDPA (%s): Failed to clear host notifier\",\n+\t\t\t      ops_data->vdpa_dev->device->name);\n+\n+\t/* Restart vDPA with SW relay on RX queue */\n+\tops_data->sw_fallback_mode = true;\n+\tsfc_vdpa_start(ops_data);\n+\tret = pthread_create(&ops_data->sw_relay_thread_id, NULL,\n+\t\t\t     sfc_vdpa_sw_relay,\t(void *)ops_data);\n+\tif (ret != 0)\n+\t\tsfc_vdpa_err(ops_data->dev_handle,\n+\t\t\t     \"failed to create rx_relay thread: %s\",\n+\t\t\t     rte_strerror(ret));\n+\n+\tif (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)\n+\t\tsfc_vdpa_info(ops_data->dev_handle, \"notifier setup failed!\");\n+\n+\tsfc_vdpa_adapter_unlock(ops_data->dev_handle);\n+\tsfc_vdpa_info(ops_data->dev_handle, \"SW fallback setup done!\");\n+\n+\treturn 0;\n }\n \n static int\n@@ -860,17 +1143,28 @@\n \tsfc_vdpa_info(dev, \"vDPA ops get_notify_area :: offset : 0x%\" PRIx64,\n \t\t      *offset);\n \n-\tpci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;\n-\tdoorbell = (uint8_t *)pci_dev->mem_resource[reg.index].addr + *offset;\n+\tif (!ops_data->sw_fallback_mode) {\n+\t\tpci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;\n+\t\tdoorbell = (uint8_t *)pci_dev->mem_resource[reg.index].addr +\n+\t\t\t*offset;\n+\t\t/*\n+\t\t * virtio-net driver in VM sends queue notifications before\n+\t\t * vDPA has a chance to setup the queues and notification area,\n+\t\t * and hence the HW misses these doorbell notifications.\n+\t\t * Since, it is safe to send duplicate doorbell, send another\n+\t\t * doorbell from vDPA driver as workaround for this timing issue\n+\t\t */\n+\t\trte_write16(qid, doorbell);\n+\n+\t\t/*\n+\t\t * Update doorbell address, it will come in handy during\n+\t\t * live-migration.\n+\t\t */\n+\t\tops_data->vq_cxt[qid].doorbell = doorbell;\n+\t}\n \n-\t/*\n-\t * virtio-net driver in VM sends queue notifications before\n-\t * vDPA has a chance to setup the queues and notification area,\n-\t * and hence the HW misses these doorbell notifications.\n-\t * Since, it is safe to send duplicate doorbell, send another\n-\t * doorbell from vDPA driver as workaround for this timing issue.\n-\t */\n-\trte_write16(qid, doorbell);\n+\tsfc_vdpa_info(dev, \"vDPA ops get_notify_area :: offset : 0x%\" PRIx64,\n+\t\t      *offset);\n \n \treturn 0;\n }\ndiff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h\nindex 5c8e352..dd301ba 100644\n--- a/drivers/vdpa/sfc/sfc_vdpa_ops.h\n+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h\n@@ -6,8 +6,11 @@\n #define _SFC_VDPA_OPS_H\n \n #include <rte_vdpa.h>\n+#include <vdpa_driver.h>\n \n #define SFC_VDPA_MAX_QUEUE_PAIRS\t\t8\n+#define SFC_VDPA_USED_RING_LEN(size) \\\n+\t((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)\n \n enum sfc_vdpa_context {\n \tSFC_VDPA_AS_VF\n@@ -37,9 +40,10 @@ struct sfc_vdpa_vring_info {\n typedef struct sfc_vdpa_vq_context_s {\n \tvolatile void\t\t\t*doorbell;\n \tuint8_t\t\t\t\tenable;\n-\tuint32_t\t\t\tpidx;\n-\tuint32_t\t\t\tcidx;\n \tefx_virtio_vq_t\t\t\t*vq;\n+\n+\tuint64_t\t\t\tsw_vq_iova;\n+\tuint64_t\t\t\tsw_vq_size;\n } sfc_vdpa_vq_context_t;\n \n struct sfc_vdpa_ops_data {\n@@ -57,6 +61,13 @@ struct sfc_vdpa_ops_data {\n \n \tuint16_t\t\t\tvq_count;\n \tstruct sfc_vdpa_vq_context_s\tvq_cxt[SFC_VDPA_MAX_QUEUE_PAIRS * 2];\n+\n+\tint\t\t\t\tepfd;\n+\tuint64_t\t\t\tsw_vq_iova;\n+\tbool\t\t\t\tsw_fallback_mode;\n+\tpthread_t\t\t\tsw_relay_thread_id;\n+\tstruct vring\t\t\tsw_vq[SFC_VDPA_MAX_QUEUE_PAIRS * 2];\n+\tint\t\t\t\tintr_fd[SFC_VDPA_MAX_QUEUE_PAIRS * 2];\n };\n \n struct sfc_vdpa_ops_data *\n",
    "prefixes": [
        "5/5"
    ]
}