get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/108266/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 108266,
    "url": "http://patches.dpdk.org/api/patches/108266/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220224132820.1939650-5-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220224132820.1939650-5-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220224132820.1939650-5-xuemingl@nvidia.com",
    "date": "2022-02-24T13:28:17",
    "name": "[4/7] vdpa/mlx5: reuse resources in reconfiguration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "98a6bc157269c1972850c1474e9ac4f91ae60771",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220224132820.1939650-5-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 21862,
            "url": "http://patches.dpdk.org/api/series/21862/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21862",
            "date": "2022-02-24T13:28:13",
            "name": "vdpa/mlx5: improve device shutdown time",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/21862/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/108266/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/108266/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 01261A034E;\n\tThu, 24 Feb 2022 14:29:25 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7100642703;\n\tThu, 24 Feb 2022 14:29:16 +0100 (CET)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2074.outbound.protection.outlook.com [40.107.93.74])\n by mails.dpdk.org (Postfix) with ESMTP id E9D53426F1\n for <dev@dpdk.org>; Thu, 24 Feb 2022 14:29:14 +0100 (CET)",
            "from BN6PR22CA0031.namprd22.prod.outlook.com (2603:10b6:404:37::17)\n by BN6PR1201MB0082.namprd12.prod.outlook.com (2603:10b6:405:53::19)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5017.22; Thu, 24 Feb\n 2022 13:29:11 +0000",
            "from BN8NAM11FT045.eop-nam11.prod.protection.outlook.com\n (2603:10b6:404:37:cafe::9) by BN6PR22CA0031.outlook.office365.com\n (2603:10b6:404:37::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4975.14 via Frontend\n Transport; Thu, 24 Feb 2022 13:29:11 +0000",
            "from mail.nvidia.com (12.22.5.234) by\n BN8NAM11FT045.mail.protection.outlook.com (10.13.177.47) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5017.22 via Frontend Transport; Thu, 24 Feb 2022 13:29:11 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL101.nvidia.com\n (10.27.9.10) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 24 Feb 2022 13:29:09 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.9; Thu, 24 Feb 2022\n 05:29:06 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=DGZLDP2GVAPaRTBjiN5lW6BUNVsWb/KtQoJDhJZlWo1MXwB9rHK9UEmdR2padIxV3fymdcHGLg+6xFM3i0LFF36nVUWxh1tCMw4U15pEtK0zd6Arx5Vqgs2TdPpteRBWun6G5GnuMiBU7Ic8d3N7Nu/PWx/lK+CamrMMfNV2yBKnZPXn2wrdvJWxbxpWs9cvXC9p5ErrdySnT5c8i/ip9VcFWAUOHIK0faLQ6NB1Eqxy9xiSTJsQm5bisZXTCR+OMR7c2TqauNayX/oYpbk9Hz4YpaKmcHWahuXN0VOTEOix8SINovh7jJg/RzDq4nD6tBY3QGR34rjic4///eJpdw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=aSEg/nUz6I5eS3hD350v6JX/uSwaPDJNxNRbL6koeM4=;\n b=JMoNPJkPvePIEe6IUBoXlFNLcb18bMigqpTgGc4si2TOk9hP1YtBraOjDLPlb281AKGgOJIJnps8FMUwKF0nPV2OdwPCevF+a+4rPCQlAr3EoAXeGBQQMJWLBP5Vqmmp2zuitpwHg/WdFJz40eao4/susBgLDS3NhpIsGeR/dE5zyOY78piSG8Hj9OhST8vKFDA+/AK0xmNM2UY57/oB0Aj00RvRSrfnOe3l2quIOknUnndHnnuU8ONQQyCYS/1QdF3i+W3PcTLHPpPcHmqZLExWcRmdANa4F0CDv0FKxw/hiZ2NK8AQ9GaHAl3s4ppDNnsXgfPLUcyk4dAK15Jfrg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.234) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aSEg/nUz6I5eS3hD350v6JX/uSwaPDJNxNRbL6koeM4=;\n b=mdVOSi/8imTMtjsWuIY7HG//BLR+nn9Ux9/BbLlazmYG16T1aLgb80OPPN71tRHxVSuyJnAw23LhqAO+dHne6DwG5gdEdvy602wZqj6KL9rUWZTnSMxaQ2JwJb30fCppQwnb8wmWOBbIT6DpaaYAOt9uzk2ibDTyEdv3mDLLxvLxyD9zKb6ljPxKRk5sRn6ONRxcRm3fPoEA4kd+lVbbQ1Z/t/QLtFH6Y2xBgHXnCF4GSZq+YZGNstavZmKCwGR+rRD56NREhr9oZVsWmBB0LlBfKXTyJJFHlNivDwVlAghxAfpB4JkUnFIuBvoPsg1sTj42A+0RvTG8hxioBxtxBg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.234)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.234 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.234; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Matan Azrad <matan@nvidia.com>, \"Viacheslav\n Ovsiienko\" <viacheslavo@nvidia.com>",
        "Subject": "[PATCH 4/7] vdpa/mlx5: reuse resources in reconfiguration",
        "Date": "Thu, 24 Feb 2022 21:28:17 +0800",
        "Message-ID": "<20220224132820.1939650-5-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.35.1",
        "In-Reply-To": "<20220224132820.1939650-1-xuemingl@nvidia.com>",
        "References": "<20220224132820.1939650-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "41b182d2-982c-4cae-1b56-08d9f799a4da",
        "X-MS-TrafficTypeDiagnostic": "BN6PR1201MB0082:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR1201MB00821EEACDEE53EB0E736137A13D9@BN6PR1201MB0082.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 2UZC4kO+AkSXdircRDiRqId6LduVwA5R3Sei4xYvnYY1kWwLpWCXRQv4LTu1aglyt809l3spnRJb38unNpkL8smhN0vZQ8jooYU8JT3r7BLfSyoXwmtiJp84kL0WGf46jt74uiJECpu6mD4A6AFwux92jRVtB4VF1PVhHuw2QArc21VaZfsIRp/KlarlTnDVH0iuxZ7nvVjOk4VKmvXHoRECzFBo29YYkyf6+n+p0bmjWu6Y9Q2A/o82MIgMTmFbZTaIDh7DV+sCZegA0dNTwN5I5NaBgulf7lwpsD45sjhye4bkg+Mx5Evc3BTH7GP9NXS0v6nYfl5dAIbHEf8vcxC1+tOIyLLDanZ2/IWw6f/j21EGw985Q9JacRfms3aYJiW8BxDe/n9rJvmipnxXGYpBnHsVTYbOaxTBNw9PQ+/Y5eeQHe39L7yYtYb5oVHw2UNUPpNZWXs2DR4FPIQ6R9zlBokcf5/puSAf6+g8sor/Ih9gm3GRHXVNjfHWt7l24eXgMcnfcmSAHcRlZwrfZbEZ603iOZlDVOjEzqTmjAt72qEQ7fG18KUivy5DTKVr7tZHhA+c5vKI6drvsQpP+P/CvtsGFHyuZBdqSqLDd0CjtMVUUX9Q1hMU/lrjlTNwwvYf9E7Jkg5NmIbkgZuL8v6YJ1Zvv9xEG5c4nfYK9fgE54d23Dk6t2hgXABGaLHcJkFOgRuViIV2tPX4KrsPjA==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.234; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(40470700004)(36840700001)(46966006)(82310400004)(47076005)(83380400001)(36756003)(1076003)(16526019)(2616005)(426003)(40460700003)(186003)(336012)(107886003)(30864003)(26005)(6286002)(6916009)(316002)(2906002)(54906003)(4326008)(7696005)(86362001)(70206006)(70586007)(5660300002)(8676002)(6666004)(36860700001)(81166007)(55016003)(508600001)(356005)(8936002)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "24 Feb 2022 13:29:11.2762 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 41b182d2-982c-4cae-1b56-08d9f799a4da",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.234];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT045.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR1201MB0082",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "To speed up device resume, create reuseable resources during device\nprobe state, release when device remove. Reused resources includes TIS,\nTD, VAR Doorbell mmap, error handling event channel and interrupt\nhandler, UAR, Rx event channel, NULL MR, steer domain and table.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       | 165 +++++++++++++++++++++-------\n drivers/vdpa/mlx5/mlx5_vdpa.h       |   9 ++\n drivers/vdpa/mlx5/mlx5_vdpa_event.c |  23 ++--\n drivers/vdpa/mlx5/mlx5_vdpa_mem.c   |  11 --\n drivers/vdpa/mlx5/mlx5_vdpa_steer.c |  25 +----\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  44 --------\n 6 files changed, 147 insertions(+), 130 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex 48f20d9ecdb..7e57ae715a8 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -5,6 +5,7 @@\n #include <net/if.h>\n #include <sys/socket.h>\n #include <sys/ioctl.h>\n+#include <sys/mman.h>\n #include <fcntl.h>\n #include <netinet/in.h>\n \n@@ -49,6 +50,8 @@ TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =\n \t\t\t\t\t      TAILQ_HEAD_INITIALIZER(priv_list);\n static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;\n \n+static void mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv);\n+\n static struct mlx5_vdpa_priv *\n mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)\n {\n@@ -250,7 +253,6 @@ mlx5_vdpa_dev_close(int vid)\n \t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n \t\treturn -1;\n \t}\n-\tmlx5_vdpa_err_event_unset(priv);\n \tmlx5_vdpa_cqe_event_unset(priv);\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED) {\n \t\tret |= mlx5_vdpa_lm_log(priv);\n@@ -258,7 +260,6 @@ mlx5_vdpa_dev_close(int vid)\n \t}\n \tmlx5_vdpa_steer_unset(priv);\n \tmlx5_vdpa_virtqs_release(priv);\n-\tmlx5_vdpa_event_qp_global_release(priv);\n \tmlx5_vdpa_mem_dereg(priv);\n \tpriv->state = MLX5_VDPA_STATE_PROBED;\n \tpriv->vid = 0;\n@@ -288,7 +289,7 @@ mlx5_vdpa_dev_config(int vid)\n \tif (mlx5_vdpa_mtu_set(priv))\n \t\tDRV_LOG(WARNING, \"MTU cannot be set on device %s.\",\n \t\t\t\tvdev->device->name);\n-\tif (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||\n+\tif (mlx5_vdpa_mem_register(priv) ||\n \t    mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||\n \t    mlx5_vdpa_cqe_event_setup(priv)) {\n \t\tmlx5_vdpa_dev_close(vid);\n@@ -507,13 +508,88 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n \tDRV_LOG(DEBUG, \"no traffic max is %u.\", priv->no_traffic_max);\n }\n \n+static int\n+mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)\n+{\n+\tstruct mlx5_devx_tis_attr tis_attr = {0};\n+\tstruct ibv_context *ctx = priv->cdev->ctx;\n+\tuint32_t i;\n+\tint retry;\n+\n+\tfor (retry = 0; retry < 7; retry++) {\n+\t\tpriv->var = mlx5_glue->dv_alloc_var(ctx, 0);\n+\t\tif (priv->var != NULL)\n+\t\t\tbreak;\n+\t\tDRV_LOG(WARNING, \"Failed to allocate VAR, retry %d.\", retry);\n+\t\t/* Wait Qemu release VAR during vdpa restart, 0.1 sec based. */\n+\t\tusleep(100000U << retry);\n+\t}\n+\tif (!priv->var) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate VAR %u.\", errno);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\t/* Always map the entire page. */\n+\tpriv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |\n+\t\t\t\t   PROT_WRITE, MAP_SHARED, ctx->cmd_fd,\n+\t\t\t\t   priv->var->mmap_off);\n+\tif (priv->virtq_db_addr == MAP_FAILED) {\n+\t\tDRV_LOG(ERR, \"Failed to map doorbell page %u.\", errno);\n+\t\tpriv->virtq_db_addr = NULL;\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tDRV_LOG(DEBUG, \"VAR address of doorbell mapping is %p.\",\n+\t\tpriv->virtq_db_addr);\n+\tpriv->td = mlx5_devx_cmd_create_td(ctx);\n+\tif (!priv->td) {\n+\t\tDRV_LOG(ERR, \"Failed to create transport domain.\");\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\ttis_attr.transport_domain = priv->td->id;\n+\tfor (i = 0; i < priv->num_lag_ports; i++) {\n+\t\t/* 0 is auto affinity, non-zero value to propose port. */\n+\t\ttis_attr.lag_tx_port_affinity = i + 1;\n+\t\tpriv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);\n+\t\tif (!priv->tiss[i]) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create TIS %u.\", i);\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\tpriv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);\n+\tif (!priv->null_mr) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate null MR.\");\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tDRV_LOG(DEBUG, \"Dump fill Mkey = %u.\", priv->null_mr->lkey);\n+\tpriv->steer.domain = mlx5_glue->dr_create_domain(ctx,\n+\t\t\t\t\tMLX5DV_DR_DOMAIN_TYPE_NIC_RX);\n+\tif (!priv->steer.domain) {\n+\t\tDRV_LOG(ERR, \"Failed to create Rx domain.\");\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tpriv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);\n+\tif (!priv->steer.tbl) {\n+\t\tDRV_LOG(ERR, \"Failed to create table 0 with Rx domain.\");\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tif (mlx5_vdpa_err_event_setup(priv) != 0)\n+\t\treturn -rte_errno;\n+\tif (mlx5_vdpa_event_qp_global_prepare(priv))\n+\t\treturn -rte_errno;\n+\treturn 0;\n+}\n+\n static int\n mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \t\t    struct mlx5_kvargs_ctrl *mkvlist)\n {\n \tstruct mlx5_vdpa_priv *priv = NULL;\n \tstruct mlx5_hca_attr *attr = &cdev->config.hca_attr;\n-\tint retry;\n \n \tif (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {\n \t\tDRV_LOG(ERR, \"Not enough capabilities to support vdpa, maybe \"\n@@ -537,25 +613,10 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \tpriv->num_lag_ports = attr->num_lag_ports;\n \tif (attr->num_lag_ports == 0)\n \t\tpriv->num_lag_ports = 1;\n+\tpthread_mutex_init(&priv->vq_config_lock, NULL);\n \tpriv->cdev = cdev;\n-\tfor (retry = 0; retry < 7; retry++) {\n-\t\tpriv->var = mlx5_glue->dv_alloc_var(priv->cdev->ctx, 0);\n-\t\tif (priv->var != NULL)\n-\t\t\tbreak;\n-\t\tDRV_LOG(WARNING, \"Failed to allocate VAR, retry %d.\\n\", retry);\n-\t\t/* Wait Qemu release VAR during vdpa restart, 0.1 sec based. */\n-\t\tusleep(100000U << retry);\n-\t}\n-\tif (!priv->var) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate VAR %u.\", errno);\n+\tif (mlx5_vdpa_create_dev_resources(priv))\n \t\tgoto error;\n-\t}\n-\tpriv->err_intr_handle =\n-\t\trte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);\n-\tif (priv->err_intr_handle == NULL) {\n-\t\tDRV_LOG(ERR, \"Fail to allocate intr_handle\");\n-\t\tgoto error;\n-\t}\n \tpriv->vdev = rte_vdpa_register_device(cdev->dev, &mlx5_vdpa_ops);\n \tif (priv->vdev == NULL) {\n \t\tDRV_LOG(ERR, \"Failed to register vDPA device.\");\n@@ -564,19 +625,13 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \t}\n \tmlx5_vdpa_config_get(mkvlist, priv);\n \tSLIST_INIT(&priv->mr_list);\n-\tpthread_mutex_init(&priv->vq_config_lock, NULL);\n \tpthread_mutex_lock(&priv_list_lock);\n \tTAILQ_INSERT_TAIL(&priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n \treturn 0;\n-\n error:\n-\tif (priv) {\n-\t\tif (priv->var)\n-\t\t\tmlx5_glue->dv_free_var(priv->var);\n-\t\trte_intr_instance_free(priv->err_intr_handle);\n-\t\trte_free(priv);\n-\t}\n+\tif (priv)\n+\t\tmlx5_vdpa_dev_release(priv);\n \treturn -rte_errno;\n }\n \n@@ -596,22 +651,48 @@ mlx5_vdpa_dev_remove(struct mlx5_common_device *cdev)\n \tif (found)\n \t\tTAILQ_REMOVE(&priv_list, priv, next);\n \tpthread_mutex_unlock(&priv_list_lock);\n-\tif (found) {\n-\t\tif (priv->state == MLX5_VDPA_STATE_CONFIGURED)\n-\t\t\tmlx5_vdpa_dev_close(priv->vid);\n-\t\tif (priv->var) {\n-\t\t\tmlx5_glue->dv_free_var(priv->var);\n-\t\t\tpriv->var = NULL;\n-\t\t}\n-\t\tif (priv->vdev)\n-\t\t\trte_vdpa_unregister_device(priv->vdev);\n-\t\tpthread_mutex_destroy(&priv->vq_config_lock);\n-\t\trte_intr_instance_free(priv->err_intr_handle);\n-\t\trte_free(priv);\n-\t}\n+\tif (found)\n+\t\tmlx5_vdpa_dev_release(priv);\n \treturn 0;\n }\n \n+static void\n+mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t i;\n+\n+\tmlx5_vdpa_event_qp_global_release(priv);\n+\tmlx5_vdpa_err_event_unset(priv);\n+\tif (priv->steer.tbl)\n+\t\tclaim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));\n+\tif (priv->steer.domain)\n+\t\tclaim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));\n+\tif (priv->null_mr)\n+\t\tclaim_zero(mlx5_glue->dereg_mr(priv->null_mr));\n+\tfor (i = 0; i < priv->num_lag_ports; i++) {\n+\t\tif (priv->tiss[i])\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));\n+\t}\n+\tif (priv->td)\n+\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->td));\n+\tif (priv->virtq_db_addr)\n+\t\tclaim_zero(munmap(priv->virtq_db_addr, priv->var->length));\n+\tif (priv->var)\n+\t\tmlx5_glue->dv_free_var(priv->var);\n+}\n+\n+static void\n+mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n+{\n+\tif (priv->state == MLX5_VDPA_STATE_CONFIGURED)\n+\t\tmlx5_vdpa_dev_close(priv->vid);\n+\tmlx5_vdpa_release_dev_resources(priv);\n+\tif (priv->vdev)\n+\t\trte_vdpa_unregister_device(priv->vdev);\n+\tpthread_mutex_destroy(&priv->vq_config_lock);\n+\trte_free(priv);\n+}\n+\n static const struct rte_pci_id mlx5_vdpa_pci_id_map[] = {\n \t{\n \t\tRTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex cc83d7cba3d..e0ba20b953c 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -233,6 +233,15 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n  */\n void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);\n \n+/**\n+ * Create all the event global resources.\n+ *\n+ * @param[in] priv\n+ *   The vdpa driver private structure.\n+ */\n+int\n+mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);\n+\n /**\n  * Release all the event global resources.\n  *\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex f8d910b33f8..7167a98db0f 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -40,11 +40,9 @@ mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)\n }\n \n /* Prepare all the global resources for all the event objects.*/\n-static int\n+int\n mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tif (priv->eventc)\n-\t\treturn 0;\n \tpriv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,\n \t\t\t   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);\n \tif (!priv->eventc) {\n@@ -389,22 +387,30 @@ mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)\n \tflags = fcntl(priv->err_chnl->fd, F_GETFL);\n \tret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);\n \tif (ret) {\n+\t\trte_errno = errno;\n \t\tDRV_LOG(ERR, \"Failed to change device event channel FD.\");\n \t\tgoto error;\n \t}\n-\n+\tpriv->err_intr_handle =\n+\t\trte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);\n+\tif (priv->err_intr_handle == NULL) {\n+\t\tDRV_LOG(ERR, \"Fail to allocate intr_handle\");\n+\t\tgoto error;\n+\t}\n \tif (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))\n \t\tgoto error;\n \n \tif (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))\n \t\tgoto error;\n \n-\tif (rte_intr_callback_register(priv->err_intr_handle,\n-\t\t\t\t       mlx5_vdpa_err_interrupt_handler,\n-\t\t\t\t       priv)) {\n+\tret = rte_intr_callback_register(priv->err_intr_handle,\n+\t\t\t\t\t mlx5_vdpa_err_interrupt_handler,\n+\t\t\t\t\t priv);\n+\tif (ret != 0) {\n \t\trte_intr_fd_set(priv->err_intr_handle, 0);\n \t\tDRV_LOG(ERR, \"Failed to register error interrupt for device %d.\",\n \t\t\tpriv->vid);\n+\t\trte_errno = -ret;\n \t\tgoto error;\n \t} else {\n \t\tDRV_LOG(DEBUG, \"Registered error interrupt for device%d.\",\n@@ -453,6 +459,7 @@ mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)\n \t\tmlx5_glue->devx_destroy_event_channel(priv->err_chnl);\n \t\tpriv->err_chnl = NULL;\n \t}\n+\trte_intr_instance_free(priv->err_intr_handle);\n }\n \n int\n@@ -575,8 +582,6 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \tuint16_t log_desc_n = rte_log2_u32(desc_n);\n \tuint32_t ret;\n \n-\tif (mlx5_vdpa_event_qp_global_prepare(priv))\n-\t\treturn -1;\n \tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))\n \t\treturn -1;\n \tattr.pd = priv->cdev->pdn;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\nindex 599079500b0..62f5530e91d 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n@@ -34,10 +34,6 @@ mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv)\n \tSLIST_INIT(&priv->mr_list);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n-\tif (priv->null_mr) {\n-\t\tclaim_zero(mlx5_glue->dereg_mr(priv->null_mr));\n-\t\tpriv->null_mr = NULL;\n-\t}\n \tif (priv->vmem) {\n \t\tfree(priv->vmem);\n \t\tpriv->vmem = NULL;\n@@ -196,13 +192,6 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \tif (!mem)\n \t\treturn -rte_errno;\n \tpriv->vmem = mem;\n-\tpriv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);\n-\tif (!priv->null_mr) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate null MR.\");\n-\t\tret = -errno;\n-\t\tgoto error;\n-\t}\n-\tDRV_LOG(DEBUG, \"Dump fill Mkey = %u.\", priv->null_mr->lkey);\n \tfor (i = 0; i < mem->nregions; i++) {\n \t\treg = &mem->regions[i];\n \t\tentry = rte_zmalloc(__func__, sizeof(*entry), 0);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\nindex a0fd2776e57..e42868486e7 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n@@ -45,14 +45,6 @@ void\n mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)\n {\n \tmlx5_vdpa_rss_flows_destroy(priv);\n-\tif (priv->steer.tbl) {\n-\t\tclaim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));\n-\t\tpriv->steer.tbl = NULL;\n-\t}\n-\tif (priv->steer.domain) {\n-\t\tclaim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));\n-\t\tpriv->steer.domain = NULL;\n-\t}\n \tif (priv->steer.rqt) {\n \t\tclaim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));\n \t\tpriv->steer.rqt = NULL;\n@@ -248,11 +240,7 @@ mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)\n \tint ret = mlx5_vdpa_rqt_prepare(priv);\n \n \tif (ret == 0) {\n-\t\tmlx5_vdpa_rss_flows_destroy(priv);\n-\t\tif (priv->steer.rqt) {\n-\t\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));\n-\t\t\tpriv->steer.rqt = NULL;\n-\t\t}\n+\t\tmlx5_vdpa_steer_unset(priv);\n \t} else if (ret < 0) {\n \t\treturn ret;\n \t} else if (!priv->steer.rss[0].flow) {\n@@ -269,17 +257,6 @@ int\n mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)\n {\n #ifdef HAVE_MLX5DV_DR\n-\tpriv->steer.domain = mlx5_glue->dr_create_domain(priv->cdev->ctx,\n-\t\t\t\t\t\t  MLX5DV_DR_DOMAIN_TYPE_NIC_RX);\n-\tif (!priv->steer.domain) {\n-\t\tDRV_LOG(ERR, \"Failed to create Rx domain.\");\n-\t\tgoto error;\n-\t}\n-\tpriv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);\n-\tif (!priv->steer.tbl) {\n-\t\tDRV_LOG(ERR, \"Failed to create table 0 with Rx domain.\");\n-\t\tgoto error;\n-\t}\n \tif (mlx5_vdpa_steer_update(priv))\n \t\tgoto error;\n \treturn 0;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex b1d584ca8b0..6bda9f1814a 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -3,7 +3,6 @@\n  */\n #include <string.h>\n #include <unistd.h>\n-#include <sys/mman.h>\n #include <sys/eventfd.h>\n \n #include <rte_malloc.h>\n@@ -120,20 +119,6 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n \t\tif (virtq->counters)\n \t\t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->counters));\n \t}\n-\tfor (i = 0; i < priv->num_lag_ports; i++) {\n-\t\tif (priv->tiss[i]) {\n-\t\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));\n-\t\t\tpriv->tiss[i] = NULL;\n-\t\t}\n-\t}\n-\tif (priv->td) {\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->td));\n-\t\tpriv->td = NULL;\n-\t}\n-\tif (priv->virtq_db_addr) {\n-\t\tclaim_zero(munmap(priv->virtq_db_addr, priv->var->length));\n-\t\tpriv->virtq_db_addr = NULL;\n-\t}\n \tpriv->features = 0;\n \tmemset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);\n \tpriv->nr_virtqs = 0;\n@@ -462,8 +447,6 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)\n int\n mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tstruct mlx5_devx_tis_attr tis_attr = {0};\n-\tstruct ibv_context *ctx = priv->cdev->ctx;\n \tuint32_t i;\n \tuint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);\n \tint ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);\n@@ -485,33 +468,6 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\t(int)nr_vring);\n \t\treturn -1;\n \t}\n-\t/* Always map the entire page. */\n-\tpriv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |\n-\t\t\t\t   PROT_WRITE, MAP_SHARED, ctx->cmd_fd,\n-\t\t\t\t   priv->var->mmap_off);\n-\tif (priv->virtq_db_addr == MAP_FAILED) {\n-\t\tDRV_LOG(ERR, \"Failed to map doorbell page %u.\", errno);\n-\t\tpriv->virtq_db_addr = NULL;\n-\t\tgoto error;\n-\t} else {\n-\t\tDRV_LOG(DEBUG, \"VAR address of doorbell mapping is %p.\",\n-\t\t\tpriv->virtq_db_addr);\n-\t}\n-\tpriv->td = mlx5_devx_cmd_create_td(ctx);\n-\tif (!priv->td) {\n-\t\tDRV_LOG(ERR, \"Failed to create transport domain.\");\n-\t\treturn -rte_errno;\n-\t}\n-\ttis_attr.transport_domain = priv->td->id;\n-\tfor (i = 0; i < priv->num_lag_ports; i++) {\n-\t\t/* 0 is auto affinity, non-zero value to propose port. */\n-\t\ttis_attr.lag_tx_port_affinity = i + 1;\n-\t\tpriv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);\n-\t\tif (!priv->tiss[i]) {\n-\t\t\tDRV_LOG(ERR, \"Failed to create TIS %u.\", i);\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n \tpriv->nr_virtqs = nr_vring;\n \tfor (i = 0; i < nr_vring; i++)\n \t\tif (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))\n",
    "prefixes": [
        "4/7"
    ]
}