get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99687/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99687,
    "url": "http://patches.dpdk.org/api/patches/99687/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-5-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210926111904.237736-5-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210926111904.237736-5-xuemingl@nvidia.com",
    "date": "2021-09-26T11:18:57",
    "name": "[04/11] net/mlx5: split multiple packet Rq memory pool",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d6da1f7e5ca8b2d5e54c4e070446facf374e0446",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-5-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19166,
            "url": "http://patches.dpdk.org/api/series/19166/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19166",
            "date": "2021-09-26T11:18:53",
            "name": "net/mlx5: support shared Rx queue",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/19166/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/99687/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/99687/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DCAC5A0547;\n\tSun, 26 Sep 2021 13:19:51 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 001EA410F6;\n\tSun, 26 Sep 2021 13:19:38 +0200 (CEST)",
            "from NAM10-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam10on2053.outbound.protection.outlook.com [40.107.94.53])\n by mails.dpdk.org (Postfix) with ESMTP id A7A35410F4\n for <dev@dpdk.org>; Sun, 26 Sep 2021 13:19:37 +0200 (CEST)",
            "from BN9PR03CA0735.namprd03.prod.outlook.com (2603:10b6:408:110::20)\n by CO6PR12MB5473.namprd12.prod.outlook.com (2603:10b6:303:13e::8)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.13; Sun, 26 Sep\n 2021 11:19:34 +0000",
            "from BN8NAM11FT041.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:110:cafe::22) by BN9PR03CA0735.outlook.office365.com\n (2603:10b6:408:110::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.14 via Frontend\n Transport; Sun, 26 Sep 2021 11:19:34 +0000",
            "from mail.nvidia.com (216.228.112.32) by\n BN8NAM11FT041.mail.protection.outlook.com (10.13.177.18) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4544.13 via Frontend Transport; Sun, 26 Sep 2021 11:19:34 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL109.nvidia.com\n (172.20.187.15) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 04:19:33 -0700",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:19:31 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=d9KO5gAUL/AQxvFlgPBYH1fSH6zIQodPMKnBIU6gTWsvPG5VKaUA6Mhwf4y9ARHZva/cyf+bwQfJBX7VmChmEyrT1mNcgN2JqSNH3DXym52p04xSPgLduSYeyqXD2PXEm/7qjdkVCwFhgOCnVPSf0nihRbYTIIyKNnzhtIEoq7s7DMuvr3YdH4hophcObx+2CBGbRKEpGulBU/gOPTMFGju8ygwFLqRtlw+vfoEHe2VpdF6nCQZBkZ0zoZgMxDNs75P9s36ybKYaw1VEa2U7RV0Z/TVVoou4koZ5XqbCnuNVPpgU9ezfJSHrm/mM4GidLmmy5uR7FfQJjnvaLZsRcg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=T2HOQbAWANpYJ9TChzKqfYKFj4XrSJz1zULPDHkuTqY=;\n b=MLUSg4gy0IkvlBsnfAFzX/eU4ORvencQ7U36pp/OEV10j0dOiHrYXgx+pvmyfQny6fALGJRabyHhnC5vlReJ7oUPIUbDZ37qXLbCd36lR0fLESAmITQU4o5CqEGTJ2wxvnkYJwUsnEGKIHj1oe7v0qFLAV4VrzHUoZoxqbZIgNE5EUejoVB98INxMylz6+vzoh6EBZgKJhVe0JRPjXxPrp82RATPebN8gBzpAMmTanJBBTEysn4SEZ3wMKVIBBJqiJ0i/Sabn8T4fUzijzzeUWRnBj3Emm7FMj6YfVx3ZacXKkr8OcoLfr+KSkGh3lWgH8P1aP2M7kFB+4aaQrXCuw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.32) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=T2HOQbAWANpYJ9TChzKqfYKFj4XrSJz1zULPDHkuTqY=;\n b=ZwVs4L74lLcbLSAylLaYs24Ej4S+Hexd2Vew4RYMg/S4RXwOaKNH8gQb5/vpVtb3jt+xnZ3WVvLNXkFj/SZP3wFmGVWds0TYn76AmI/a3HeNl2ifhcdedwnQ18cp2BhzEIMPpFxC4xElFOFiyFS0JnRVQ+aNb1YF3pcQoU8UEPrK4HiFA5n8r7Gic5WvVD+BZwocLamWTMRH1wR3uEKjGM9HrY1K/9jgsgII+OSaEDIqWLIhdGgjBwMZgDth46ewb7hTQfADMUJy1yGugZrGHU7XCeDlfNUqntlnXP35sxs4Yf5o7CEjf6UUPI4tdPN3FGbm2bqLre9vkh3shf2Bpg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.32)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.32 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.32; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Sun, 26 Sep 2021 19:18:57 +0800",
        "Message-ID": "<20210926111904.237736-5-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "References": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "704253d3-2331-4c60-be61-08d980df84c2",
        "X-MS-TrafficTypeDiagnostic": "CO6PR12MB5473:",
        "X-Microsoft-Antispam-PRVS": "\n <CO6PR12MB5473BF4C9D7954A09651CD38A1A69@CO6PR12MB5473.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:233;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n dr1GPKjeZyo/zIBYe8k9HQywIwInsj/nKfuou7131oOsS/d+E4JYdFR+mZvYXYLMmiRxAnqVvhQUr/b1k8S0Iv/o2mG9dR4F4RaDQw2bvMgXfG8C9UrNmuAHg2aa83IQZhruF6dBMqw1rwRnmTMyuCeGrFYmGOtWavLKMm4ALMM2gWn9Cp2C90MMxk1gJ1rYlpuiX08B3P1jdSutW6G8oMZIpUjzgoVlX9wWTxr5IPZ8XB3B+BHFyFkTAQ/nJr4vTWlCOATG27vxRa1OyNFkv/ZiRP76G4EWyua++FrlQPN+cSJgkYRfnqqemfEtfr41/vMsDTlMLiQZbFmCFSwuIyGsnU61yDXueYUnWFafrJx9VvsKyrSouM8s0yRIkB+svGKXSeQ3CUv9tn3oyBsDBZCYxfZQoSS9SwNN/3OQFGlW4q7UGHTkEBs8Kn7vNIGh9zTF0dZJD+AeCJTLhKCMAPvmB9OAW/AQQS4M5oL5TwgXlQdduiFeiiX3Tl0tks8nVaZWtZxKQd+gILrAQ+0jHLXEp3qCnVnWmXaf2OM4oiLfOQK1Uv06LLdQBRKyqGK7pM07yERrvUoKUCeDq9Dyc0NHfX5mlDUQGGszfeq/rHNPSmjpwaCcSJcHcrPDq2uVhCGXLSaNtuA5bCQlR6bmgsdBjtG64R93rnbSnM6s76RK/o7GPi2kcMCtOh82/MddGFBww6p96gwqc901PBfvyA==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.32; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid01.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(4326008)(7696005)(36756003)(2616005)(70206006)(70586007)(82310400003)(26005)(336012)(6666004)(83380400001)(426003)(5660300002)(86362001)(2906002)(8676002)(1076003)(107886003)(6286002)(316002)(6916009)(36860700001)(186003)(508600001)(8936002)(55016002)(54906003)(16526019)(7636003)(47076005)(356005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "26 Sep 2021 11:19:34.1194 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 704253d3-2331-4c60-be61-08d980df84c2",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.32];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT041.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CO6PR12MB5473",
        "Subject": "[dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory\n pool",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Port info is invisible from shared Rx queue, split MPR mempool from\ndevice to Rx queue, also changed pool flag to mp_sc.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c         |   1 -\n drivers/net/mlx5/mlx5_rx.h      |   4 +-\n drivers/net/mlx5/mlx5_rxq.c     | 109 ++++++++++++--------------------\n drivers/net/mlx5/mlx5_trigger.c |  10 ++-\n 4 files changed, 47 insertions(+), 77 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex f84e061fe71..3abb8c97e76 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1602,7 +1602,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t\tmlx5_drop_action_destroy(dev);\n \tif (priv->mreg_cp_tbl)\n \t\tmlx5_hlist_destroy(priv->mreg_cp_tbl);\n-\tmlx5_mprq_free_mp(dev);\n \tif (priv->sh->ct_mng)\n \t\tmlx5_flow_aso_ct_mng_close(priv->sh);\n \tmlx5_os_free_shared_dr(priv);\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex d44c8078dea..a8e0c3162b0 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -179,8 +179,8 @@ struct mlx5_rxq_ctrl {\n extern uint8_t rss_hash_default_key[];\n \n unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);\n-int mlx5_mprq_free_mp(struct rte_eth_dev *dev);\n-int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);\n+int mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);\n+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);\n int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);\n int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);\n int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 7e97cdd4bc0..14de8d0e6a4 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -1087,7 +1087,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,\n }\n \n /**\n- * Free mempool of Multi-Packet RQ.\n+ * Free RXQ mempool of Multi-Packet RQ.\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n@@ -1096,16 +1096,15 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,\n  *   0 on success, negative errno value on failure.\n  */\n int\n-mlx5_mprq_free_mp(struct rte_eth_dev *dev)\n+mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct rte_mempool *mp = priv->mprq_mp;\n-\tunsigned int i;\n+\tstruct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;\n+\tstruct rte_mempool *mp = rxq->mprq_mp;\n \n \tif (mp == NULL)\n \t\treturn 0;\n-\tDRV_LOG(DEBUG, \"port %u freeing mempool (%s) for Multi-Packet RQ\",\n-\t\tdev->data->port_id, mp->name);\n+\tDRV_LOG(DEBUG, \"port %u queue %hu freeing mempool (%s) for Multi-Packet RQ\",\n+\t\tdev->data->port_id, rxq->idx, mp->name);\n \t/*\n \t * If a buffer in the pool has been externally attached to a mbuf and it\n \t * is still in use by application, destroying the Rx queue can spoil\n@@ -1123,34 +1122,28 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev)\n \t\treturn -rte_errno;\n \t}\n \trte_mempool_free(mp);\n-\t/* Unset mempool for each Rx queue. */\n-\tfor (i = 0; i != priv->rxqs_n; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[i];\n-\n-\t\tif (rxq == NULL)\n-\t\t\tcontinue;\n-\t\trxq->mprq_mp = NULL;\n-\t}\n-\tpriv->mprq_mp = NULL;\n+\trxq->mprq_mp = NULL;\n \treturn 0;\n }\n \n /**\n- * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the\n- * mempool. If already allocated, reuse it if there're enough elements.\n+ * Allocate RXQ a mempool for Multi-Packet RQ.\n+ * If already allocated, reuse it if there're enough elements.\n  * Otherwise, resize it.\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n+ * @param rxq_ctrl\n+ *   Pointer to RXQ.\n  *\n  * @return\n  *   0 on success, negative errno value on failure.\n  */\n int\n-mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct rte_mempool *mp = priv->mprq_mp;\n+\tstruct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;\n+\tstruct rte_mempool *mp = rxq->mprq_mp;\n \tchar name[RTE_MEMPOOL_NAMESIZE];\n \tunsigned int desc = 0;\n \tunsigned int buf_len;\n@@ -1158,28 +1151,15 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n \tunsigned int obj_size;\n \tunsigned int strd_num_n = 0;\n \tunsigned int strd_sz_n = 0;\n-\tunsigned int i;\n-\tunsigned int n_ibv = 0;\n \n-\tif (!mlx5_mprq_enabled(dev))\n+\tif (rxq_ctrl == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)\n \t\treturn 0;\n-\t/* Count the total number of descriptors configured. */\n-\tfor (i = 0; i != priv->rxqs_n; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[i];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl = container_of\n-\t\t\t(rxq, struct mlx5_rxq_ctrl, rxq);\n-\n-\t\tif (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)\n-\t\t\tcontinue;\n-\t\tn_ibv++;\n-\t\tdesc += 1 << rxq->elts_n;\n-\t\t/* Get the max number of strides. */\n-\t\tif (strd_num_n < rxq->strd_num_n)\n-\t\t\tstrd_num_n = rxq->strd_num_n;\n-\t\t/* Get the max size of a stride. */\n-\t\tif (strd_sz_n < rxq->strd_sz_n)\n-\t\t\tstrd_sz_n = rxq->strd_sz_n;\n-\t}\n+\t/* Number of descriptors configured. */\n+\tdesc = 1 << rxq->elts_n;\n+\t/* Get the max number of strides. */\n+\tstrd_num_n = rxq->strd_num_n;\n+\t/* Get the max size of a stride. */\n+\tstrd_sz_n = rxq->strd_sz_n;\n \tMLX5_ASSERT(strd_num_n && strd_sz_n);\n \tbuf_len = (1 << strd_num_n) * (1 << strd_sz_n);\n \tobj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *\n@@ -1196,7 +1176,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n \t * this Mempool gets available again.\n \t */\n \tdesc *= 4;\n-\tobj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;\n+\tobj_num = desc + MLX5_MPRQ_MP_CACHE_SZ;\n \t/*\n \t * rte_mempool_create_empty() has sanity check to refuse large cache\n \t * size compared to the number of elements.\n@@ -1209,50 +1189,41 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n \t\tDRV_LOG(DEBUG, \"port %u mempool %s is being reused\",\n \t\t\tdev->data->port_id, mp->name);\n \t\t/* Reuse. */\n-\t\tgoto exit;\n-\t} else if (mp != NULL) {\n-\t\tDRV_LOG(DEBUG, \"port %u mempool %s should be resized, freeing it\",\n-\t\t\tdev->data->port_id, mp->name);\n+\t\treturn 0;\n+\t}\n+\tif (mp != NULL) {\n+\t\tDRV_LOG(DEBUG, \"port %u queue %u mempool %s should be resized, freeing it\",\n+\t\t\tdev->data->port_id, rxq->idx, mp->name);\n \t\t/*\n \t\t * If failed to free, which means it may be still in use, no way\n \t\t * but to keep using the existing one. On buffer underrun,\n \t\t * packets will be memcpy'd instead of external buffer\n \t\t * attachment.\n \t\t */\n-\t\tif (mlx5_mprq_free_mp(dev)) {\n+\t\tif (mlx5_mprq_free_mp(dev, rxq_ctrl) != 0) {\n \t\t\tif (mp->elt_size >= obj_size)\n-\t\t\t\tgoto exit;\n+\t\t\t\treturn 0;\n \t\t\telse\n \t\t\t\treturn -rte_errno;\n \t\t}\n \t}\n-\tsnprintf(name, sizeof(name), \"port-%u-mprq\", dev->data->port_id);\n+\tsnprintf(name, sizeof(name), \"port-%u-queue-%hu-mprq\",\n+\t\t dev->data->port_id, rxq->idx);\n \tmp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,\n \t\t\t\t0, NULL, NULL, mlx5_mprq_buf_init,\n-\t\t\t\t(void *)((uintptr_t)1 << strd_num_n),\n-\t\t\t\tdev->device->numa_node, 0);\n+\t\t\t\t(void *)(uintptr_t)(1 << strd_num_n),\n+\t\t\t\tdev->device->numa_node, MEMPOOL_F_SC_GET);\n \tif (mp == NULL) {\n \t\tDRV_LOG(ERR,\n-\t\t\t\"port %u failed to allocate a mempool for\"\n+\t\t\t\"port %u queue %hu failed to allocate a mempool for\"\n \t\t\t\" Multi-Packet RQ, count=%u, size=%u\",\n-\t\t\tdev->data->port_id, obj_num, obj_size);\n+\t\t\tdev->data->port_id, rxq->idx, obj_num, obj_size);\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n-\tpriv->mprq_mp = mp;\n-exit:\n-\t/* Set mempool for each Rx queue. */\n-\tfor (i = 0; i != priv->rxqs_n; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[i];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl = container_of\n-\t\t\t(rxq, struct mlx5_rxq_ctrl, rxq);\n-\n-\t\tif (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)\n-\t\t\tcontinue;\n-\t\trxq->mprq_mp = mp;\n-\t}\n-\tDRV_LOG(INFO, \"port %u Multi-Packet RQ is configured\",\n-\t\tdev->data->port_id);\n+\trxq->mprq_mp = mp;\n+\tDRV_LOG(INFO, \"port %u queue %hu Multi-Packet RQ is configured\",\n+\t\tdev->data->port_id, rxq->idx);\n \treturn 0;\n }\n \n@@ -1717,8 +1688,10 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \t\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;\n \t}\n \tif (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {\n-\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)\n+\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n \t\t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n+\t\t\tmlx5_mprq_free_mp(dev, rxq_ctrl);\n+\t\t}\n \t\tLIST_REMOVE(rxq_ctrl, next);\n \t\tmlx5_free(rxq_ctrl);\n \t\t(*priv->rxqs)[idx] = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex c3adf5082e6..0753dbad053 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -138,11 +138,6 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \tunsigned int i;\n \tint ret = 0;\n \n-\t/* Allocate/reuse/resize mempool for Multi-Packet RQ. */\n-\tif (mlx5_mprq_alloc_mp(dev)) {\n-\t\t/* Should not release Rx queues but return immediately. */\n-\t\treturn -rte_errno;\n-\t}\n \tDRV_LOG(DEBUG, \"Port %u device_attr.max_qp_wr is %d.\",\n \t\tdev->data->port_id, priv->sh->device_attr.max_qp_wr);\n \tDRV_LOG(DEBUG, \"Port %u device_attr.max_sge is %d.\",\n@@ -153,8 +148,11 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\tif (!rxq_ctrl)\n \t\t\tcontinue;\n \t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\t\t/* Pre-register Rx mempools. */\n \t\t\tif (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {\n+\t\t\t\t/* Allocate/reuse/resize mempool for MPRQ. */\n+\t\t\t\tif (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)\n+\t\t\t\t\tgoto error;\n+\t\t\t\t/* Pre-register Rx mempools. */\n \t\t\t\tmlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,\n \t\t\t\t\t\t  rxq_ctrl->rxq.mprq_mp);\n \t\t\t} else {\n",
    "prefixes": [
        "04/11"
    ]
}