get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103622/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103622,
    "url": "http://patches.dpdk.org/api/patches/103622/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211103075838.1486056-14-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211103075838.1486056-14-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211103075838.1486056-14-xuemingl@nvidia.com",
    "date": "2021-11-03T07:58:37",
    "name": "[v3,13/14] net/mlx5: support shared Rx queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4c4ef2f7d6e08c0615d1f8bd4c11c726c74f598a",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211103075838.1486056-14-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 20258,
            "url": "http://patches.dpdk.org/api/series/20258/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=20258",
            "date": "2021-11-03T07:58:24",
            "name": "net/mlx5: support shared Rx queue",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/20258/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/103622/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/103622/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3A5E0A0C53;\n\tWed,  3 Nov 2021 09:01:49 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0435C41176;\n\tWed,  3 Nov 2021 09:01:29 +0100 (CET)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2060.outbound.protection.outlook.com [40.107.93.60])\n by mails.dpdk.org (Postfix) with ESMTP id 4976E41147\n for <dev@dpdk.org>; Wed,  3 Nov 2021 09:01:26 +0100 (CET)",
            "from MWHPR18CA0037.namprd18.prod.outlook.com (2603:10b6:320:31::23)\n by BL0PR12MB2385.namprd12.prod.outlook.com (2603:10b6:207:4d::15)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4649.19; Wed, 3 Nov\n 2021 08:01:24 +0000",
            "from CO1NAM11FT023.eop-nam11.prod.protection.outlook.com\n (2603:10b6:320:31:cafe::b3) by MWHPR18CA0037.outlook.office365.com\n (2603:10b6:320:31::23) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.10 via Frontend\n Transport; Wed, 3 Nov 2021 08:01:24 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT023.mail.protection.outlook.com (10.13.175.35) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Wed, 3 Nov 2021 08:01:23 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 3 Nov\n 2021 08:00:18 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Vv5xz25844+ii3waETEHV/+51LGP0Ga2vrltBdCjJmewqGaEEfBLeiEXOIH81qTwBf3POEACBKUPTQfi8bYRgPytModmil4a675y66+e7ozHiKli80GqnuJHviQ4OUUaP+Jvibwzrl9wW5VkxbPMkNTLAqKm9OxautuaPOnMynkDMS85138PlTJTaScmXoWCIAqr7Z+40QSyzrf5XPPmPOh86WNkOpKNnBsRuOq1FEI1+yE3+26XYl4qT1iMz3eo8l5TjCXN0wpp7lJwk0PBBB3SxQi8dS8C3PM19oKTWrK8x7+CmpCt+qk4AaEvSyCljaO66OB24hC0/4AGZyLb6A==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=DnvdthN690zJ/dKKg1AcsVNbB0GxMSuZ62FqClRp+z0=;\n b=E/Ixow5uwTowiY65sSuT9Vusd0mpO3ZIB5bv8GRw+NKhaQA3MY30aabBar3K//8/t4GK6hIg25dFo1LvsRnrp65+J0sOeXmp3mmTItYcrU4bh/d7gr0EaxDenQVLMANP1sUGLd9dFfuX8jfLvXigQbMjTcxRag6dCp4BSNN1tMxlCIIvzzU2Q33lWp9Y6CebcXa921n1TQE2W89mOcwIjQvjQ/sWs1+TxRzV9gKeBtQufUQzcfEBb6uHTMVTMKfoiF5Lr0oHxQmaOr0431ZEYO52KgldE9+QZfbzLgTIlxhv2Y3y9MEaU+UeBGc4mPA5R4BPJ2+rbNIwu0A+8fBllA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=DnvdthN690zJ/dKKg1AcsVNbB0GxMSuZ62FqClRp+z0=;\n b=RZear9Hq+bvKlCOrlAxPypGxZIG4HQrQvrbOWUp1BaJlqh3wMNGt4+EmdvFFLfc2Pj05isaDbhxIDPSUjCSSfeRGd7pE6S1/u4U9/gVPY55ZCooM8aNQf2jOzAhxHugSAZI+5hWYXE16WlbosQfun+12pRwUxflyxiRMqo0L30re//qMEi2mnJ1v7BqSClerQjRt3iXUlV7Gtc5dVobd2CYP1JuWwaXYVlGRXfKg5Rf1dPyGAFyyjLqFdMZbOhcMzD0drvSKAscWEC/49QIXBGi/L61Z4ilYrwunm2yh0BDZ+Y5e7naZ/M59QFiy2yVeTB3k2W/4qkbdFfAhmSQHAg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Wed, 3 Nov 2021 15:58:37 +0800",
        "Message-ID": "<20211103075838.1486056-14-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211103075838.1486056-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211103075838.1486056-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "4eefd152-6896-4082-47b9-08d99ea02134",
        "X-MS-TrafficTypeDiagnostic": "BL0PR12MB2385:",
        "X-Microsoft-Antispam-PRVS": "\n <BL0PR12MB23852C28A792BBF0BE29799FA18C9@BL0PR12MB2385.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:1091;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 4R77FXm3MgLz80CAMEWQThDeUoZ/Q/IXi7ys9m/0rS8XONi3JujBkqZYY+YwVxZNuK3+x0SRbShZYuvG2qaXXt33PjO7fK2qU8X1QWTTCLNKLbLsimMNIbMdwYzT0wM6dBjsqkFKYgDYJgnutkj0cRA6LrscxXCUYivPcItpbRNjoNq84RZIJFVAOYHFm5hujE9yAUsv6cyYQ84BKCmS5eUpzc7lnpzOgpEfBaTwlxj/N3E9/ykRuC5hhVXzMDktoQy8wWwufdgF5ApCXzIw95YTzyQHy4ArQ1OizuiaqrdY2DTObDKhyF4SfyciCP0z/GlpVSu0L/enhHppoSgxrN0E3qEpRttHOlDHnhydt42aPwEN0S5jIsEVZwsteCbZ1KZAyhwyIIT5Sf7lLKIgXfHyGReWHx6N/zrDr44+Aaf4Lzcyn0NnGY8jxck8505SD6qNKV3uNyFw69vXF1S4Og1j5KLmBWipSCY6Urp8hPSR0ClzleDMdk/AefZUWocrPZ31/GU88I+kLtsI9iAsxIddCXD52YbBkBMeAaTL91VQHidVsKCs4gJAzlV1Lh/Mx6/F8i8okI6RWiiDKwVyHNZc5ru2utCLzILHGbUxczA60cK/MEqiJh+Bi6+Uldy/BtrUONcwSgnLEkFCd+IAj3Rjc11A2y3qj9m5OD2qyZkyR/nXBms/UQwYdVucJ2+KLzg+9b2o39Qe3YPbZ9o2YA==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(70206006)(4326008)(70586007)(30864003)(2906002)(47076005)(7696005)(36860700001)(8676002)(5660300002)(82310400003)(8936002)(36756003)(6666004)(186003)(55016002)(426003)(6916009)(336012)(6286002)(26005)(1076003)(7636003)(54906003)(107886003)(83380400001)(508600001)(16526019)(316002)(2616005)(86362001)(356005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "03 Nov 2021 08:01:23.7516 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 4eefd152-6896-4082-47b9-08d99ea02134",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT023.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BL0PR12MB2385",
        "Subject": "[dpdk-dev] [PATCH v3 13/14] net/mlx5: support shared Rx queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch introduces shared RxQ. All shared Rx queues with same group\nand queue ID share the same rxq_ctrl. Rxq_ctrl and rxq_data are shared,\nall queues from different member port share same WQ and CQ, essentially\none Rx WQ, mbufs are filled into this singleton WQ.\n\nShared rxq_data is set into device Rx queues of all member ports as\nRxQ object, used for receiving packets. Polling queue of any member\nports returns packets of any member, mbuf->port is used to identify\nsource port.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n doc/guides/nics/features/mlx5.ini   |   1 +\n doc/guides/nics/mlx5.rst            |   6 +\n drivers/net/mlx5/linux/mlx5_os.c    |   2 +\n drivers/net/mlx5/linux/mlx5_verbs.c |   8 +-\n drivers/net/mlx5/mlx5.h             |   2 +\n drivers/net/mlx5/mlx5_devx.c        |  46 +++--\n drivers/net/mlx5/mlx5_ethdev.c      |   5 +\n drivers/net/mlx5/mlx5_rx.h          |   3 +\n drivers/net/mlx5/mlx5_rxq.c         | 274 ++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_trigger.c     |  61 ++++---\n 10 files changed, 330 insertions(+), 78 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini\nindex 403f58cd7e2..7cbd11bb160 100644\n--- a/doc/guides/nics/features/mlx5.ini\n+++ b/doc/guides/nics/features/mlx5.ini\n@@ -11,6 +11,7 @@ Removal event        = Y\n Rx interrupt         = Y\n Fast mbuf free       = Y\n Queue start/stop     = Y\n+Shared Rx queue      = Y\n Burst mode info      = Y\n Power mgmt address monitor = Y\n MTU update           = Y\ndiff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\nindex bb92520dff4..824971d89ae 100644\n--- a/doc/guides/nics/mlx5.rst\n+++ b/doc/guides/nics/mlx5.rst\n@@ -113,6 +113,7 @@ Features\n - Connection tracking.\n - Sub-Function representors.\n - Sub-Function.\n+- Shared Rx queue.\n \n \n Limitations\n@@ -465,6 +466,11 @@ Limitations\n   - In order to achieve best insertion rate, application should manage the flows per lcore.\n   - Better to disable memory reclaim by setting ``reclaim_mem_mode`` to 0 to accelerate the flow object allocation and release with cache.\n \n+ Shared Rx queue:\n+\n+  - Counters of received packets and bytes number of devices in same share group are same.\n+  - Counters of received packets and bytes number of queues in same group and queue ID are same.\n+\n - HW hashed bonding\n \n   - TXQ affinity subjects to HW hash once enabled.\ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex dd4fc0c7165..48acae65133 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -410,6 +410,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\t\tmlx5_glue->dr_create_flow_action_default_miss();\n \tif (!sh->default_miss_action)\n \t\tDRV_LOG(WARNING, \"Default miss action is not supported.\");\n+\tLIST_INIT(&sh->shared_rxqs);\n \treturn 0;\n error:\n \t/* Rollback the created objects. */\n@@ -484,6 +485,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)\n \tMLX5_ASSERT(sh && sh->refcnt);\n \tif (sh->refcnt > 1)\n \t\treturn;\n+\tMLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));\n #ifdef HAVE_MLX5DV_DR\n \tif (sh->rx_domain) {\n \t\tmlx5_glue->dr_destroy_domain(sh->rx_domain);\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex f78916c868f..9d299542614 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -424,14 +424,16 @@ mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)\n {\n \tstruct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;\n \n-\tMLX5_ASSERT(rxq_obj);\n-\tMLX5_ASSERT(rxq_obj->wq);\n-\tMLX5_ASSERT(rxq_obj->ibv_cq);\n+\tif (rxq_obj == NULL || rxq_obj->wq == NULL)\n+\t\treturn;\n \tclaim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));\n+\trxq_obj->wq = NULL;\n+\tMLX5_ASSERT(rxq_obj->ibv_cq);\n \tclaim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));\n \tif (rxq_obj->ibv_channel)\n \t\tclaim_zero(mlx5_glue->destroy_comp_channel\n \t\t\t\t\t\t\t(rxq_obj->ibv_channel));\n+\trxq->ctrl->started = false;\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 75c58b93f91..3950f0dabb0 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1172,6 +1172,7 @@ struct mlx5_dev_ctx_shared {\n \tstruct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];\n \t/* Flex parser profiles information. */\n \tvoid *devx_rx_uar; /* DevX UAR for Rx. */\n+\tLIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */\n \tstruct mlx5_aso_age_mng *aso_age_mng;\n \t/* Management data for aging mechanism using ASO Flow Hit. */\n \tstruct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;\n@@ -1239,6 +1240,7 @@ struct mlx5_rxq_obj {\n \t\t};\n \t\tstruct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */\n \t\tstruct {\n+\t\t\tstruct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */\n \t\t\tstruct mlx5_devx_cq cq_obj; /* DevX CQ object. */\n \t\t\tvoid *devx_channel;\n \t\t};\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 668d47025e8..d3d189ab7f2 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -88,6 +88,8 @@ mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)\n \tdefault:\n \t\tbreak;\n \t}\n+\tif (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)\n+\t\treturn mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);\n \treturn mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);\n }\n \n@@ -156,18 +158,21 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,\n static void\n mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n-\tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;\n \n-\tMLX5_ASSERT(rxq != NULL);\n-\tMLX5_ASSERT(rxq_ctrl != NULL);\n+\tif (rxq_obj == NULL)\n+\t\treturn;\n \tif (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {\n-\t\tMLX5_ASSERT(rxq_obj->rq);\n+\t\tif (rxq_obj->rq == NULL)\n+\t\t\treturn;\n \t\tmlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);\n \t\tclaim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));\n \t} else {\n+\t\tif (rxq->devx_rq.rq == NULL)\n+\t\t\treturn;\n \t\tmlx5_devx_rq_destroy(&rxq->devx_rq);\n-\t\tmemset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));\n+\t\tif (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0)\n+\t\t\treturn;\n \t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n \t\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n \t\tif (rxq_obj->devx_channel) {\n@@ -176,6 +181,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)\n \t\t\trxq_obj->devx_channel = NULL;\n \t\t}\n \t}\n+\trxq->ctrl->started = false;\n }\n \n /**\n@@ -271,6 +277,8 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)\n \t\t\t\t\t\tMLX5_WQ_END_PAD_MODE_NONE;\n \trq_attr.wq_attr.pd = cdev->pdn;\n \trq_attr.counter_set_id = priv->counter_set_id;\n+\tif (rxq_data->shared) /* Create RMP based RQ. */\n+\t\trxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;\n \t/* Create RQ using DevX API. */\n \treturn mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,\n \t\t\t\t   log_desc_n, &rq_attr, rxq_ctrl->socket);\n@@ -300,6 +308,8 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)\n \tuint16_t event_nums[1] = { 0 };\n \tint ret = 0;\n \n+\tif (rxq_ctrl->started)\n+\t\treturn 0;\n \tif (priv->config.cqe_comp && !rxq_data->hw_timestamp &&\n \t    !rxq_data->lro) {\n \t\tcq_attr.cqe_comp_en = 1u;\n@@ -365,6 +375,7 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)\n \trxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);\n \trxq_data->cqe_n = log_cqe_n;\n \trxq_data->cqn = cq_obj->cq->id;\n+\trxq_data->cq_ci = 0;\n \tif (rxq_ctrl->obj->devx_channel) {\n \t\tret = mlx5_os_devx_subscribe_devx_event\n \t\t\t\t\t      (rxq_ctrl->obj->devx_channel,\n@@ -463,7 +474,7 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)\n \tif (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)\n \t\treturn mlx5_rxq_obj_hairpin_new(rxq);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n-\tif (rxq_ctrl->irq) {\n+\tif (rxq_ctrl->irq && !rxq_ctrl->started) {\n \t\tint devx_ev_flag =\n \t\t\t  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;\n \n@@ -496,11 +507,19 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)\n \tret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret)\n \t\tgoto error;\n-\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n-\trxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;\n-\tmlx5_rxq_initialize(rxq_data);\n+\tif (!rxq_data->shared) {\n+\t\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n+\t\trxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;\n+\t} else if (!rxq_ctrl->started) {\n+\t\trxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;\n+\t\trxq_data->rq_db =\n+\t\t\t\t(uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec;\n+\t}\n+\tif (!rxq_ctrl->started) {\n+\t\tmlx5_rxq_initialize(rxq_data);\n+\t\trxq_ctrl->wqn = rxq->devx_rq.rq->id;\n+\t}\n \tpriv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;\n-\trxq_ctrl->wqn = rxq->devx_rq.rq->id;\n \treturn 0;\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n@@ -558,7 +577,10 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,\n \t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);\n \n \t\tMLX5_ASSERT(rxq != NULL);\n-\t\trqt_attr->rq_list[i] = rxq->devx_rq.rq->id;\n+\t\tif (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)\n+\t\t\trqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;\n+\t\telse\n+\t\t\trqt_attr->rq_list[i] = rxq->devx_rq.rq->id;\n \t}\n \tMLX5_ASSERT(i > 0);\n \tfor (j = 0; i != rqt_n; ++j, ++i)\ndiff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c\nindex bb38d5d2ade..dc647d5580c 100644\n--- a/drivers/net/mlx5/mlx5_ethdev.c\n+++ b/drivers/net/mlx5/mlx5_ethdev.c\n@@ -26,6 +26,7 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n #include \"mlx5_autoconf.h\"\n+#include \"mlx5_devx.h\"\n \n /**\n  * Get the interface index from device name.\n@@ -336,9 +337,13 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)\n \tinfo->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;\n \tmlx5_set_default_params(dev, info);\n \tmlx5_set_txlimit_params(dev, info);\n+\tif (priv->config.hca_attr.mem_rq_rmp &&\n+\t    priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)\n+\t\tinfo->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE;\n \tinfo->switch_info.name = dev->data->name;\n \tinfo->switch_info.domain_id = priv->domain_id;\n \tinfo->switch_info.port_id = priv->representor_id;\n+\tinfo->switch_info.rx_domain = 0; /* No sub Rx domains. */\n \tif (priv->representor) {\n \t\tuint16_t port_id;\n \ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 413e36f6d8d..eda6eca8dea 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -96,6 +96,7 @@ struct mlx5_rxq_data {\n \tunsigned int lro:1; /* Enable LRO. */\n \tunsigned int dynf_meta:1; /* Dynamic metadata is configured. */\n \tunsigned int mcqe_format:3; /* CQE compression format. */\n+\tunsigned int shared:1; /* Shared RXQ. */\n \tvolatile uint32_t *rq_db;\n \tvolatile uint32_t *cq_db;\n \tuint16_t port_id;\n@@ -158,8 +159,10 @@ struct mlx5_rxq_ctrl {\n \tstruct mlx5_dev_ctx_shared *sh; /* Shared context. */\n \tenum mlx5_rxq_type type; /* Rxq type. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n+\tLIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */\n \tuint32_t share_group; /* Group ID of shared RXQ. */\n \tuint16_t share_qid; /* Shared RxQ ID in group. */\n+\tunsigned int started:1; /* Whether (shared) RXQ has been started. */\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n \tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex f3fc618ed2c..0f1f4660bc7 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -29,6 +29,7 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_autoconf.h\"\n+#include \"mlx5_devx.h\"\n \n \n /* Default RSS hash key also used for ConnectX-3. */\n@@ -633,14 +634,19 @@ mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)\n  *   RX queue index.\n  * @param desc\n  *   Number of descriptors to configure in queue.\n+ * @param[out] rxq_ctrl\n+ *   Address of pointer to shared Rx queue control.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)\n+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,\n+\t\t\tstruct mlx5_rxq_ctrl **rxq_ctrl)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_priv *rxq;\n+\tbool empty;\n \n \tif (!rte_is_power_of_2(*desc)) {\n \t\t*desc = 1 << log2above(*desc);\n@@ -657,16 +663,143 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)\n \t\trte_errno = EOVERFLOW;\n \t\treturn -rte_errno;\n \t}\n-\tif (!mlx5_rxq_releasable(dev, idx)) {\n-\t\tDRV_LOG(ERR, \"port %u unable to release queue index %u\",\n-\t\t\tdev->data->port_id, idx);\n-\t\trte_errno = EBUSY;\n-\t\treturn -rte_errno;\n+\tif (rxq_ctrl == NULL || *rxq_ctrl == NULL)\n+\t\treturn 0;\n+\tif (!(*rxq_ctrl)->rxq.shared) {\n+\t\tif (!mlx5_rxq_releasable(dev, idx)) {\n+\t\t\tDRV_LOG(ERR, \"port %u unable to release queue index %u\",\n+\t\t\t\tdev->data->port_id, idx);\n+\t\t\trte_errno = EBUSY;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tmlx5_rxq_release(dev, idx);\n+\t} else {\n+\t\tif ((*rxq_ctrl)->obj != NULL)\n+\t\t\t/* Some port using shared Rx queue has been started. */\n+\t\t\treturn 0;\n+\t\t/* Release all owner RxQ to reconfigure Shared RxQ. */\n+\t\tdo {\n+\t\t\trxq = LIST_FIRST(&(*rxq_ctrl)->owners);\n+\t\t\tLIST_REMOVE(rxq, owner_entry);\n+\t\t\tempty = LIST_EMPTY(&(*rxq_ctrl)->owners);\n+\t\t\tmlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);\n+\t\t} while (!empty);\n+\t\t*rxq_ctrl = NULL;\n \t}\n-\tmlx5_rxq_release(dev, idx);\n \treturn 0;\n }\n \n+/**\n+ * Get the shared Rx queue object that matches group and queue index.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param group\n+ *   Shared RXQ group.\n+ * @param share_qid\n+ *   Shared RX queue index.\n+ *\n+ * @return\n+ *   Shared RXQ object that matching, or NULL if not found.\n+ */\n+static struct mlx5_rxq_ctrl *\n+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tLIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {\n+\t\tif (rxq_ctrl->share_group == group &&\n+\t\t    rxq_ctrl->share_qid == share_qid)\n+\t\t\treturn rxq_ctrl;\n+\t}\n+\treturn NULL;\n+}\n+\n+/**\n+ * Check whether requested Rx queue configuration matches shared RXQ.\n+ *\n+ * @param rxq_ctrl\n+ *   Pointer to shared RXQ.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   Queue index.\n+ * @param desc\n+ *   Number of descriptors to configure in queue.\n+ * @param socket\n+ *   NUMA socket on which memory must be allocated.\n+ * @param[in] conf\n+ *   Thresholds parameters.\n+ * @param mp\n+ *   Memory pool for buffer allocations.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static bool\n+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,\n+\t\t      uint16_t idx, uint16_t desc, unsigned int socket,\n+\t\t      const struct rte_eth_rxconf *conf,\n+\t\t      struct rte_mempool *mp)\n+{\n+\tstruct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tunsigned int i;\n+\n+\tRTE_SET_USED(conf);\n+\tif (rxq_ctrl->socket != socket) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: socket mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (rxq_ctrl->rxq.elts_n != log2above(desc)) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: descriptor number mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->mtu != spriv->mtu) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: mtu mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->dev_data->dev_conf.intr_conf.rxq !=\n+\t    spriv->dev_data->dev_conf.intr_conf.rxq) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: interrupt mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (mp != NULL && rxq_ctrl->rxq.mp != mp) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: mempool mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t} else if (mp == NULL) {\n+\t\tfor (i = 0; i < conf->rx_nseg; i++) {\n+\t\t\tif (conf->rx_seg[i].split.mp !=\n+\t\t\t    rxq_ctrl->rxq.rxseg[i].mp ||\n+\t\t\t    conf->rx_seg[i].split.length !=\n+\t\t\t    rxq_ctrl->rxq.rxseg[i].length) {\n+\t\t\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: segment %u configuration mismatch\",\n+\t\t\t\t\tdev->data->port_id, idx, i);\n+\t\t\t\treturn false;\n+\t\t\t}\n+\t\t}\n+\t}\n+\tif (priv->config.hw_padding != spriv->config.hw_padding) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: padding mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->config.cqe_comp != spriv->config.cqe_comp ||\n+\t    (priv->config.cqe_comp &&\n+\t     priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: CQE compression mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\treturn true;\n+}\n+\n /**\n  *\n  * @param dev\n@@ -692,12 +825,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_priv *rxq;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n \tstruct rte_eth_rxseg_split *rx_seg =\n \t\t\t\t(struct rte_eth_rxseg_split *)conf->rx_seg;\n \tstruct rte_eth_rxseg_split rx_single = {.mp = mp};\n \tuint16_t n_seg = conf->rx_nseg;\n \tint res;\n+\tuint64_t offloads = conf->offloads |\n+\t\t\t    dev->data->dev_conf.rxmode.offloads;\n \n \tif (mp) {\n \t\t/*\n@@ -709,9 +844,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\tn_seg = 1;\n \t}\n \tif (n_seg > 1) {\n-\t\tuint64_t offloads = conf->offloads |\n-\t\t\t\t    dev->data->dev_conf.rxmode.offloads;\n-\n \t\t/* The offloads should be checked on rte_eth_dev layer. */\n \t\tMLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);\n \t\tif (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n@@ -723,9 +855,46 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t}\n \t\tMLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);\n \t}\n-\tres = mlx5_rx_queue_pre_setup(dev, idx, &desc);\n+\tif (conf->share_group > 0) {\n+\t\tif (!priv->config.hca_attr.mem_rq_rmp) {\n+\t\t\tDRV_LOG(ERR, \"port %u queue index %u shared Rx queue not supported by fw\",\n+\t\t\t\t     dev->data->port_id, idx);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {\n+\t\t\tDRV_LOG(ERR, \"port %u queue index %u shared Rx queue needs DevX api\",\n+\t\t\t\t     dev->data->port_id, idx);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif (conf->share_qid >= priv->rxqs_n) {\n+\t\t\tDRV_LOG(ERR, \"port %u shared Rx queue index %u > number of Rx queues %u\",\n+\t\t\t\tdev->data->port_id, conf->share_qid,\n+\t\t\t\tpriv->rxqs_n);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif (priv->config.mprq.enabled) {\n+\t\t\tDRV_LOG(ERR, \"port %u shared Rx queue index %u: not supported when MPRQ enabled\",\n+\t\t\t\tdev->data->port_id, conf->share_qid);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* Try to reuse shared RXQ. */\n+\t\trxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,\n+\t\t\t\t\t       conf->share_qid);\n+\t\tif (rxq_ctrl != NULL &&\n+\t\t    !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,\n+\t\t\t\t\t   conf, mp)) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\tres = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);\n \tif (res)\n \t\treturn res;\n+\t/* Allocate RXQ. */\n \trxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,\n \t\t\t  SOCKET_ID_ANY);\n \tif (!rxq) {\n@@ -737,15 +906,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \trxq->priv = priv;\n \trxq->idx = idx;\n \t(*priv->rxq_privs)[idx] = rxq;\n-\trxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);\n-\tif (!rxq_ctrl) {\n-\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u\",\n-\t\t\tdev->data->port_id, idx);\n-\t\tmlx5_free(rxq);\n-\t\t(*priv->rxq_privs)[idx] = NULL;\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n+\tif (rxq_ctrl != NULL) {\n+\t\t/* Join owner list. */\n+\t\tLIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);\n+\t\trxq->ctrl = rxq_ctrl;\n+\t} else {\n+\t\trxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,\n+\t\t\t\t\tn_seg);\n+\t\tif (rxq_ctrl == NULL) {\n+\t\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u\",\n+\t\t\t\tdev->data->port_id, idx);\n+\t\t\tmlx5_free(rxq);\n+\t\t\t(*priv->rxq_privs)[idx] = NULL;\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn -rte_errno;\n+\t\t}\n \t}\n+\tmlx5_rxq_ref(dev, idx);\n \tDRV_LOG(DEBUG, \"port %u adding Rx queue %u to list\",\n \t\tdev->data->port_id, idx);\n \tdev->data->rx_queues[idx] = &rxq_ctrl->rxq;\n@@ -776,7 +953,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tint res;\n \n-\tres = mlx5_rx_queue_pre_setup(dev, idx, &desc);\n+\tres = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);\n \tif (res)\n \t\treturn res;\n \tif (hairpin_conf->peer_count != 1) {\n@@ -1095,6 +1272,9 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev)\n \tstruct mlx5_rxq_obj *rxq_obj;\n \n \tLIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {\n+\t\tif (rxq_obj->rxq_ctrl->rxq.shared &&\n+\t\t    !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))\n+\t\t\tcontinue;\n \t\tDRV_LOG(DEBUG, \"port %u Rx queue %u still referenced\",\n \t\t\tdev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);\n \t\t++ret;\n@@ -1413,6 +1593,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n \t\treturn NULL;\n \t}\n \tLIST_INIT(&tmpl->owners);\n+\tif (conf->share_group > 0) {\n+\t\ttmpl->rxq.shared = 1;\n+\t\ttmpl->share_group = conf->share_group;\n+\t\tLIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);\n+\t}\n \trxq->ctrl = tmpl;\n \tLIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);\n \tMLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);\n@@ -1660,8 +1845,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n #ifndef RTE_ARCH_64\n \ttmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;\n #endif\n+\tif (conf->share_group > 0)\n+\t\ttmpl->share_qid = conf->share_qid;\n \ttmpl->rxq.idx = idx;\n-\tmlx5_rxq_ref(dev, idx);\n \tLIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);\n \treturn tmpl;\n error:\n@@ -1836,31 +2022,41 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_priv *rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tuint32_t refcnt;\n \n \tif (priv->rxq_privs == NULL)\n \t\treturn 0;\n \trxq = mlx5_rxq_get(dev, idx);\n-\tif (rxq == NULL)\n+\tif (rxq == NULL || rxq->refcnt == 0)\n \t\treturn 0;\n-\tif (mlx5_rxq_deref(dev, idx) > 1)\n-\t\treturn 1;\n \trxq_ctrl = rxq->ctrl;\n-\tif (rxq_ctrl->obj != NULL) {\n+\trefcnt = mlx5_rxq_deref(dev, idx);\n+\tif (refcnt > 1) {\n+\t\treturn 1;\n+\t} else if (refcnt == 1) { /* RxQ stopped. */\n \t\tpriv->obj_ops.rxq_obj_release(rxq);\n-\t\tLIST_REMOVE(rxq_ctrl->obj, next);\n-\t\tmlx5_free(rxq_ctrl->obj);\n-\t\trxq_ctrl->obj = NULL;\n-\t}\n-\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\trxq_free_elts(rxq_ctrl);\n-\t\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;\n-\t}\n-\tif (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {\n-\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)\n-\t\t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n+\t\tif (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {\n+\t\t\tLIST_REMOVE(rxq_ctrl->obj, next);\n+\t\t\tmlx5_free(rxq_ctrl->obj);\n+\t\t\trxq_ctrl->obj = NULL;\n+\t\t}\n+\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n+\t\t\tif (!rxq_ctrl->started)\n+\t\t\t\trxq_free_elts(rxq_ctrl);\n+\t\t\tdev->data->rx_queue_state[idx] =\n+\t\t\t\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\t\t}\n+\t} else { /* Refcnt zero, closing device. */\n \t\tLIST_REMOVE(rxq, owner_entry);\n-\t\tLIST_REMOVE(rxq_ctrl, next);\n-\t\tmlx5_free(rxq_ctrl);\n+\t\tif (LIST_EMPTY(&rxq_ctrl->owners)) {\n+\t\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)\n+\t\t\t\tmlx5_mr_btree_free\n+\t\t\t\t\t(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n+\t\t\tif (rxq_ctrl->rxq.shared)\n+\t\t\t\tLIST_REMOVE(rxq_ctrl, share_entry);\n+\t\t\tLIST_REMOVE(rxq_ctrl, next);\n+\t\t\tmlx5_free(rxq_ctrl);\n+\t\t}\n \t\tdev->data->rx_queues[idx] = NULL;\n \t\tmlx5_free(rxq);\n \t\t(*priv->rxq_privs)[idx] = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 72475e4b5b5..a3e62e95335 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -176,6 +176,39 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)\n \t\tmlx5_rxq_release(dev, i);\n }\n \n+static int\n+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,\n+\t\t      unsigned int idx)\n+{\n+\tint ret = 0;\n+\n+\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n+\t\t/*\n+\t\t * Pre-register the mempools. Regardless of whether\n+\t\t * the implicit registration is enabled or not,\n+\t\t * Rx mempool destruction is tracked to free MRs.\n+\t\t */\n+\t\tif (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)\n+\t\t\treturn -rte_errno;\n+\t\tret = rxq_alloc_elts(rxq_ctrl);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tMLX5_ASSERT(!rxq_ctrl->obj);\n+\trxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n+\t\t\t\t    sizeof(*rxq_ctrl->obj), 0,\n+\t\t\t\t    rxq_ctrl->socket);\n+\tif (!rxq_ctrl->obj) {\n+\t\tDRV_LOG(ERR, \"Port %u Rx queue %u can't allocate resources.\",\n+\t\t\tdev->data->port_id, idx);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\tDRV_LOG(DEBUG, \"Port %u rxq %u updated with %p.\", dev->data->port_id,\n+\t\tidx, (void *)&rxq_ctrl->obj);\n+\treturn 0;\n+}\n+\n /**\n  * Start traffic on Rx queues.\n  *\n@@ -208,28 +241,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\tif (rxq == NULL)\n \t\t\tcontinue;\n \t\trxq_ctrl = rxq->ctrl;\n-\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\t\t/*\n-\t\t\t * Pre-register the mempools. Regardless of whether\n-\t\t\t * the implicit registration is enabled or not,\n-\t\t\t * Rx mempool destruction is tracked to free MRs.\n-\t\t\t */\n-\t\t\tif (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)\n-\t\t\t\tgoto error;\n-\t\t\tret = rxq_alloc_elts(rxq_ctrl);\n-\t\t\tif (ret)\n+\t\tif (!rxq_ctrl->started) {\n+\t\t\tif (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)\n \t\t\t\tgoto error;\n-\t\t}\n-\t\tMLX5_ASSERT(!rxq_ctrl->obj);\n-\t\trxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n-\t\t\t\t\t    sizeof(*rxq_ctrl->obj), 0,\n-\t\t\t\t\t    rxq_ctrl->socket);\n-\t\tif (!rxq_ctrl->obj) {\n-\t\t\tDRV_LOG(ERR,\n-\t\t\t\t\"Port %u Rx queue %u can't allocate resources.\",\n-\t\t\t\tdev->data->port_id, i);\n-\t\t\trte_errno = ENOMEM;\n-\t\t\tgoto error;\n+\t\t\tLIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);\n \t\t}\n \t\tret = priv->obj_ops.rxq_obj_new(rxq);\n \t\tif (ret) {\n@@ -237,9 +252,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\t\trxq_ctrl->obj = NULL;\n \t\t\tgoto error;\n \t\t}\n-\t\tDRV_LOG(DEBUG, \"Port %u rxq %u updated with %p.\",\n-\t\t\tdev->data->port_id, i, (void *)&rxq_ctrl->obj);\n-\t\tLIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);\n+\t\trxq_ctrl->started = true;\n \t}\n \treturn 0;\n error:\n",
    "prefixes": [
        "v3",
        "13/14"
    ]
}