get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103754/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103754,
    "url": "https://patches.dpdk.org/api/patches/103754/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20211104123320.1638915-8-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211104123320.1638915-8-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211104123320.1638915-8-xuemingl@nvidia.com",
    "date": "2021-11-04T12:33:13",
    "name": "[v4,07/14] net/mlx5: split Rx queue into shareable and private",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1a66da2207162b14b1b02c7af5115bfc29697801",
    "submitter": {
        "id": 1904,
        "url": "https://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20211104123320.1638915-8-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 20310,
            "url": "https://patches.dpdk.org/api/series/20310/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=20310",
            "date": "2021-11-04T12:33:06",
            "name": "net/mlx5: support shared Rx queue",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/20310/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/103754/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/103754/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CA27FA0548;\n\tThu,  4 Nov 2021 13:35:12 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CDAC64273F;\n\tThu,  4 Nov 2021 13:34:48 +0100 (CET)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2086.outbound.protection.outlook.com [40.107.223.86])\n by mails.dpdk.org (Postfix) with ESMTP id 44199411A4\n for <dev@dpdk.org>; Thu,  4 Nov 2021 13:34:46 +0100 (CET)",
            "from DS7PR03CA0203.namprd03.prod.outlook.com (2603:10b6:5:3b6::28)\n by DM4PR12MB5247.namprd12.prod.outlook.com (2603:10b6:5:39b::8) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4649.15; Thu, 4 Nov\n 2021 12:34:44 +0000",
            "from DM6NAM11FT016.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3b6:cafe::8d) by DS7PR03CA0203.outlook.office365.com\n (2603:10b6:5:3b6::28) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11 via Frontend\n Transport; Thu, 4 Nov 2021 12:34:44 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT016.mail.protection.outlook.com (10.13.173.139) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Thu, 4 Nov 2021 12:34:44 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Nov\n 2021 12:34:20 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Kyto5C8DBtTeMkOx3caHqw5yp1Bz0QN+3s+Lj2ahCOljefgnpqxHehbS4rn+QMxiKsIK/xPjU331OPfQYSW+MM99ZFGxS6484IcoSk+o2yVWktZucsG+2gz7Go+f2x78Qh5aNUChsXi6WcitL/fqUy9AEtL6zy2wio2IPRnYVuKSbKj4CUDP7SGAxjgV6aiHmvUwKBEXP5U0XS+GSeqf2xVolzEVTdJ/uwGuorNFN8czDExA1P2oStw5QwR2v4wxAV41dv3hejVZjGHIb5grlWZ5YtR8m6HhZDRA4c4rWivPuze4L3IRosDeOa4BBVD+0BxJ/D/MeSvA9+kxOPx6gg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=aSfrL3cuKSUqURyXJXdMtx5L71UWOjYwnWulaZ0DyLo=;\n b=QUaEUzR/ihZY10Uew55gEOkBKBtusFfUdKD2hmLgGmRKQ1rBtvmascrhcMYDy2DbUaOn1Ju3moafP76yU04D7wrNuZ6O8lh66/ogKeNvIKksDGUQj4lUagEgmvDkZzoCQklmtSdhAUNfQwpy5mybTrCIYDdNja67kIPefZPjIchGT6kqrzprWcNxtJtMM6Odody3I2ZBBNpaeUkEhgBVHEVyhrClSAFNrD5TgrW4eMBZ9pdvbsGKHML994f919GXfnQi4FH6026a1Sg4MjHa29mNOpbTMsWXHnxjMagKXiLhdBoymO6L/kPHv/rzLVDMVadbz08ct334u4KCATwa8g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aSfrL3cuKSUqURyXJXdMtx5L71UWOjYwnWulaZ0DyLo=;\n b=VQxQGDO71fr9Hhz0aMAFxwqMe2SOiK/VV+nC4YkecJ3l8bBZirw3RfKc0mW8cWHi7uf1ZiK4UlfYGlSLZbobagSsZ0sT4q3pSvNz8awzOwLTqW/SMjCZFlNn06ST08AWjURp3LQlHwaFp01M1jYJgGxoLOvDj2ybN6YkXOU7eEAO4JaFKEkyTlYSgde146ofXB071SDk/G29pwjuQzZNfAcB+BmHZRR4/CddbfBuD1EVnBztdbvnIljPgNDsvFo9oJHCZOWR9hGoAXtP6FLfeB/aFH8HwLexA9oGCeRZxpdTQWPgOxkKevBpXT7ZJQR2OB2Xo89lX8cTtCkw2m1gsw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, \"Slava\n Ovsiienko\" <viacheslavo@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "Date": "Thu, 4 Nov 2021 20:33:13 +0800",
        "Message-ID": "<20211104123320.1638915-8-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211104123320.1638915-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211104123320.1638915-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "191112e8-3683-4ec8-0d14-08d99f8f7b3d",
        "X-MS-TrafficTypeDiagnostic": "DM4PR12MB5247:",
        "X-Microsoft-Antispam-PRVS": "\n <DM4PR12MB5247B5E830403ADD54A6665CA18D9@DM4PR12MB5247.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:4941;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n JlYs4mFOckJa8Pm3GxGGzcCSEx0aIlkddVom6JFpZU2KQkBYfzpmfPEZSu2Tm8uguvIJ7MrLSHrg8IVU1O4vK+ofqRqDbfcjzCEDtQQmPaO4Ib5l9I3BI501YLv5xVyK+nuRiRMjDprCeAPSwSd+b7vv7sQYlTpoLqjss+pGYJvodDJ+bZ21cC/GLIG5o/d//bdRvI/E8ZSWSibIM4WIBMt6VQSa0jnxmi57Zit2Omrc1LtvuKUiomJf02MpXMUnHyoJCO683eSQEMFcT209D88ws0o/2zndaXpZQ5sDcRiJZ5Q8mvi1eqEM55pq26H51e00s0Qa/YP7Ux7PYNLYaDSQL86eUZjMV3TXL5ZLH8MarkHqlbGpDxO+dvW6xXQXoPQbI4dycTEMOpZJeUTq0xWu11iJd9ZoIHo0Lrql6CjBREUsxfImYWNGvqmZv7DKnkH9OiL7AQC3WZoJCbhUU6HgqObq3akwwVGKGIZNgIWD14Ou6cjYBafYXZEKzw1BqYvprsTUVbb0zs6zagpQeNLFDxOnLya5dH67AMqhGxoL5oU5dSTbWNRsNuppIFaX7qIBUvOadiwP8vUnfLThXTmvr4iWQA1bpMFM9tloym4vLE6YfzMszno+/MUWv98onTMeyFUT6YZj3w/+7T5XetCrt9EUrctWwcZq+6Kj01HK8k0crbwNoki8oAdv49UDGI8+sm39mDbrH7XlMLkYrnYVXD0ZvVRpv9eXO0kwSFGCSbzc+hS7VWZVUx02tDqu",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(186003)(8676002)(107886003)(26005)(5660300002)(54906003)(36860700001)(6916009)(16526019)(316002)(83380400001)(55016002)(6286002)(7636003)(356005)(30864003)(2906002)(36756003)(2616005)(7696005)(47076005)(426003)(6666004)(70206006)(336012)(508600001)(1076003)(4326008)(8936002)(70586007)(82310400003)(86362001)(21314003)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "04 Nov 2021 12:34:44.4635 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 191112e8-3683-4ec8-0d14-08d99f8f7b3d",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT016.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB5247",
        "Subject": "[dpdk-dev] [PATCH v4 07/14] net/mlx5: split Rx queue into shareable\n and private",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "To prepare shared Rx queue, splits RxQ data into shareable and private.\nStruct mlx5_rxq_priv is per queue data.\nStruct mlx5_rxq_ctrl is shared queue resources and data.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nAcked-by: Slava Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c        |  4 +++\n drivers/net/mlx5/mlx5.h        |  5 ++-\n drivers/net/mlx5/mlx5_ethdev.c | 10 ++++++\n drivers/net/mlx5/mlx5_rx.h     | 17 +++++++--\n drivers/net/mlx5/mlx5_rxq.c    | 66 ++++++++++++++++++++++++++++------\n 5 files changed, 88 insertions(+), 14 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex dc15688f216..374cc9757aa 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1700,6 +1700,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t\tmlx5_free(dev->intr_handle);\n \t\tdev->intr_handle = NULL;\n \t}\n+\tif (priv->rxq_privs != NULL) {\n+\t\tmlx5_free(priv->rxq_privs);\n+\t\tpriv->rxq_privs = NULL;\n+\t}\n \tif (priv->txqs != NULL) {\n \t\t/* XXX race condition if mlx5_tx_burst() is still running. */\n \t\trte_delay_us_sleep(1000);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 74af88ec194..4e99fe7d068 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1345,6 +1345,8 @@ enum mlx5_txq_modify_type {\n \tMLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */\n };\n \n+struct mlx5_rxq_priv;\n+\n /* HW objects operations structure. */\n struct mlx5_obj_ops {\n \tint (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);\n@@ -1408,7 +1410,8 @@ struct mlx5_priv {\n \t/* RX/TX queues. */\n \tunsigned int rxqs_n; /* RX queues array size. */\n \tunsigned int txqs_n; /* TX queues array size. */\n-\tstruct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */\n+\tstruct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */\n+\tstruct mlx5_rxq_data *(*rxqs)[]; /* (Shared) RX queues. */\n \tstruct mlx5_txq_data *(*txqs)[]; /* TX queues. */\n \tstruct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */\n \tstruct rte_eth_rss_conf rss_conf; /* RSS configuration. */\ndiff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c\nindex 81fa8845bb5..cde505955df 100644\n--- a/drivers/net/mlx5/mlx5_ethdev.c\n+++ b/drivers/net/mlx5/mlx5_ethdev.c\n@@ -104,6 +104,16 @@ mlx5_dev_configure(struct rte_eth_dev *dev)\n \t       MLX5_RSS_HASH_KEY_LEN);\n \tpriv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;\n \tpriv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;\n+\tpriv->rxq_privs = mlx5_realloc(priv->rxq_privs,\n+\t\t\t\t       MLX5_MEM_RTE | MLX5_MEM_ZERO,\n+\t\t\t\t       sizeof(void *) * rxqs_n, 0,\n+\t\t\t\t       SOCKET_ID_ANY);\n+\tif (priv->rxq_privs == NULL) {\n+\t\tDRV_LOG(ERR, \"port %u cannot allocate rxq private data\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n \tpriv->rxqs = (void *)dev->data->rx_queues;\n \tpriv->txqs = (void *)dev->data->tx_queues;\n \tif (txqs_n != priv->txqs_n) {\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 69b1263339e..fa24f5cdf3a 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -150,10 +150,14 @@ struct mlx5_rxq_ctrl {\n \tstruct mlx5_rxq_data rxq; /* Data path structure. */\n \tLIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */\n \tuint32_t refcnt; /* Reference counter. */\n+\tLIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */\n \tstruct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */\n+\tstruct mlx5_dev_ctx_shared *sh; /* Shared context. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n \tenum mlx5_rxq_type type; /* Rxq type. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n+\tuint32_t share_group; /* Group ID of shared RXQ. */\n+\tuint16_t share_qid; /* Shared RxQ ID in group. */\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n \tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\n@@ -163,6 +167,14 @@ struct mlx5_rxq_ctrl {\n \tuint32_t hairpin_status; /* Hairpin binding status. */\n };\n \n+/* RX queue private data. */\n+struct mlx5_rxq_priv {\n+\tuint16_t idx; /* Queue index. */\n+\tstruct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */\n+\tLIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */\n+\tstruct mlx5_priv *priv; /* Back pointer to private data. */\n+};\n+\n /* mlx5_rxq.c */\n \n extern uint8_t rss_hash_default_key[];\n@@ -186,13 +198,14 @@ void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);\n int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);\n-struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,\n+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,\n+\t\t\t\t   struct mlx5_rxq_priv *rxq,\n \t\t\t\t   uint16_t desc, unsigned int socket,\n \t\t\t\t   const struct rte_eth_rxconf *conf,\n \t\t\t\t   const struct rte_eth_rxseg_split *rx_seg,\n \t\t\t\t   uint16_t n_seg);\n struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new\n-\t(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+\t(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,\n \t const struct rte_eth_hairpin_conf *hairpin_conf);\n struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);\n int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex b2e4389ad60..00df245a5c6 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -674,6 +674,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t    struct rte_mempool *mp)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_priv *rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tstruct rte_eth_rxseg_split *rx_seg =\n \t\t\t\t(struct rte_eth_rxseg_split *)conf->rx_seg;\n@@ -708,10 +709,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \tres = mlx5_rx_queue_pre_setup(dev, idx, &desc);\n \tif (res)\n \t\treturn res;\n-\trxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);\n+\trxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,\n+\t\t\t  SOCKET_ID_ANY);\n+\tif (!rxq) {\n+\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u private data\",\n+\t\t\tdev->data->port_id, idx);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\trxq->priv = priv;\n+\trxq->idx = idx;\n+\t(*priv->rxq_privs)[idx] = rxq;\n+\trxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);\n \tif (!rxq_ctrl) {\n-\t\tDRV_LOG(ERR, \"port %u unable to allocate queue index %u\",\n+\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u\",\n \t\t\tdev->data->port_id, idx);\n+\t\tmlx5_free(rxq);\n+\t\t(*priv->rxq_privs)[idx] = NULL;\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n@@ -741,6 +755,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\t    const struct rte_eth_hairpin_conf *hairpin_conf)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_priv *rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tint res;\n \n@@ -776,14 +791,27 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\treturn -rte_errno;\n \t\t}\n \t}\n-\trxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);\n+\trxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,\n+\t\t\t  SOCKET_ID_ANY);\n+\tif (!rxq) {\n+\t\tDRV_LOG(ERR, \"port %u unable to allocate hairpin rx queue index %u private data\",\n+\t\t\tdev->data->port_id, idx);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\trxq->priv = priv;\n+\trxq->idx = idx;\n+\t(*priv->rxq_privs)[idx] = rxq;\n+\trxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);\n \tif (!rxq_ctrl) {\n-\t\tDRV_LOG(ERR, \"port %u unable to allocate queue index %u\",\n+\t\tDRV_LOG(ERR, \"port %u unable to allocate hairpin queue index %u\",\n \t\t\tdev->data->port_id, idx);\n+\t\tmlx5_free(rxq);\n+\t\t(*priv->rxq_privs)[idx] = NULL;\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n-\tDRV_LOG(DEBUG, \"port %u adding Rx queue %u to list\",\n+\tDRV_LOG(DEBUG, \"port %u adding hairpin Rx queue %u to list\",\n \t\tdev->data->port_id, idx);\n \t(*priv->rxqs)[idx] = &rxq_ctrl->rxq;\n \treturn 0;\n@@ -1319,8 +1347,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n- * @param idx\n- *   RX queue index.\n+ * @param rxq\n+ *   RX queue private data.\n  * @param desc\n  *   Number of descriptors to configure in queue.\n  * @param socket\n@@ -1330,10 +1358,12 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,\n  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.\n  */\n struct mlx5_rxq_ctrl *\n-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n+\t     uint16_t desc,\n \t     unsigned int socket, const struct rte_eth_rxconf *conf,\n \t     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)\n {\n+\tuint16_t idx = rxq->idx;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_ctrl *tmpl;\n \tunsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);\n@@ -1377,6 +1407,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n \t}\n+\tLIST_INIT(&tmpl->owners);\n+\trxq->ctrl = tmpl;\n+\tLIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);\n \tMLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);\n \t/*\n \t * Build the array of actual buffer offsets and lengths.\n@@ -1610,6 +1643,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \ttmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&\n \t\t(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));\n \ttmpl->rxq.port_id = dev->data->port_id;\n+\ttmpl->sh = priv->sh;\n \ttmpl->priv = priv;\n \ttmpl->rxq.mp = rx_seg[0].mp;\n \ttmpl->rxq.elts_n = log2above(desc);\n@@ -1637,8 +1671,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n- * @param idx\n- *   RX queue index.\n+ * @param rxq\n+ *   RX queue.\n  * @param desc\n  *   Number of descriptors to configure in queue.\n  * @param hairpin_conf\n@@ -1648,9 +1682,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.\n  */\n struct mlx5_rxq_ctrl *\n-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n+\t\t     uint16_t desc,\n \t\t     const struct rte_eth_hairpin_conf *hairpin_conf)\n {\n+\tuint16_t idx = rxq->idx;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_ctrl *tmpl;\n \n@@ -1660,10 +1696,14 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n \t}\n+\tLIST_INIT(&tmpl->owners);\n+\trxq->ctrl = tmpl;\n+\tLIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);\n \ttmpl->type = MLX5_RXQ_TYPE_HAIRPIN;\n \ttmpl->socket = SOCKET_ID_ANY;\n \ttmpl->rxq.rss_hash = 0;\n \ttmpl->rxq.port_id = dev->data->port_id;\n+\ttmpl->sh = priv->sh;\n \ttmpl->priv = priv;\n \ttmpl->rxq.mp = NULL;\n \ttmpl->rxq.elts_n = log2above(desc);\n@@ -1717,6 +1757,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];\n \n \tif (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)\n \t\treturn 0;\n@@ -1736,9 +1777,12 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \tif (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {\n \t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)\n \t\t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n+\t\tLIST_REMOVE(rxq, owner_entry);\n \t\tLIST_REMOVE(rxq_ctrl, next);\n \t\tmlx5_free(rxq_ctrl);\n \t\t(*priv->rxqs)[idx] = NULL;\n+\t\tmlx5_free(rxq);\n+\t\t(*priv->rxq_privs)[idx] = NULL;\n \t}\n \treturn 0;\n }\n",
    "prefixes": [
        "v4",
        "07/14"
    ]
}