get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137477/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137477,
    "url": "http://patches.dpdk.org/api/patches/137477/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240229115157.201671-9-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240229115157.201671-9-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240229115157.201671-9-dsosnowski@nvidia.com",
    "date": "2024-02-29T11:51:53",
    "name": "[v2,08/11] net/mlx5: use flow as operation container",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "97f9866973559546cda2fd17af16b21e32277c47",
    "submitter": {
        "id": 2386,
        "url": "http://patches.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240229115157.201671-9-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31292,
            "url": "http://patches.dpdk.org/api/series/31292/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31292",
            "date": "2024-02-29T11:51:45",
            "name": "net/mlx5: flow insertion performance improvements",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/31292/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/137477/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/137477/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BFBD643C35;\n\tThu, 29 Feb 2024 12:53:49 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A392242DEF;\n\tThu, 29 Feb 2024 12:52:59 +0100 (CET)",
            "from NAM10-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam10on2067.outbound.protection.outlook.com [40.107.94.67])\n by mails.dpdk.org (Postfix) with ESMTP id BBDB042686\n for <dev@dpdk.org>; Thu, 29 Feb 2024 12:52:54 +0100 (CET)",
            "from BLAPR05CA0008.namprd05.prod.outlook.com (2603:10b6:208:36e::11)\n by SJ2PR12MB7894.namprd12.prod.outlook.com (2603:10b6:a03:4c6::13)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.41; Thu, 29 Feb\n 2024 11:52:50 +0000",
            "from BL6PEPF0001AB4B.namprd04.prod.outlook.com\n (2603:10b6:208:36e:cafe::6c) by BLAPR05CA0008.outlook.office365.com\n (2603:10b6:208:36e::11) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7362.12 via Frontend\n Transport; Thu, 29 Feb 2024 11:52:50 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n BL6PEPF0001AB4B.mail.protection.outlook.com (10.167.242.69) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Thu, 29 Feb 2024 11:52:49 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 29 Feb\n 2024 03:52:28 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Thu, 29 Feb\n 2024 03:52:26 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=kP8kU/P94cgc1bfzc0oe9DH8IaJXAL+3+xVHi7/GmtYuYYZYA1jGPOCdgUa1B3iA4wJ6auigIBaBRD1wd0GXFxd6xkFYenwl3biDQSzTxYjhvwPzNGv01KbbsmiLci71hHWzsXBmbNWX1qA1Tg7v0QG5Q3JAuR91bzlFW2z+kkZjYjh5PrqTe9WECsS+ido3u27K+d5dTlRU4ylP90gt/uWwIHHwAY5rp1M91lyfpKDd9ZwmUYvyHyruCW5tavT3wGmMN+PatbbCBUKhR99JXcd/CMmhTJM7s655IfgudVAA4ukGDZK6wCtgtzy2C6y6wbqHPBZyb/FQrJgQxLZbFQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=QnObfUJxp1xs7eap3AxCgqzfSXDAqEavcCVHWdVOzIU=;\n b=b4uSbLwOM+M+ULCpwL5vomA7Q0+udV9eGAY3c0Vsy6/v1q4ac4ZyvoA0k6lNVY9g3wLQJdPS3VxtUJ63xbxcSoQRnB+w+LxcVnKIBAGIE/8O4c3GFgToGGz/xtQF0rZkvKTcBf/rFNMrYKzgJNaso2I58ctrUlSOh8DxJwtjiivKxtxriA9BhdrrD1rrGIZCWLdSp0h65Sq/G7Lgp/IXkDQtMLq24ctT1wSBTTCdoPnGsPDhozFPqRFnkyV7uVmSS34H6vRrrCdQX5cu250196OhOpX0RiT35LGjglOg2HnOZYzkKMTQVWnocEBKAtQcz7xR/jhlyTut9wYsU2X7Bw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=QnObfUJxp1xs7eap3AxCgqzfSXDAqEavcCVHWdVOzIU=;\n b=rzyaHNcRkR+M+6jy1Cb04FsdhYQ/XdfWKXXuTh1iWo5xK3KmMZ+0YVCcJQyiDjHHytaHoEFMtDB0dXSHYLhKXQ9w/Vn/RBYO6fDqf2gKXOaqYITaptxoXZKSCKCBHKLG/2noZ7zgTGiKjLBZTf6EqCQTWCsrO3wSEWBvl85Lkkmh3CuNnbDF9jRK/VIkrB7Zd0ggmGzz4AuN7armusnY8PTifPF9UHD4dOIpXSneSjDhnU1RhDT54h3VgohDYWPIl+QXFRKnK4RWevNaTRyLoHUHyN7FYU5w5sb8iOjZtjngDikJaqsx62VVlA8Ddip2iwnkas065ZC81ixGg3tJJg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH v2 08/11] net/mlx5: use flow as operation container",
        "Date": "Thu, 29 Feb 2024 12:51:53 +0100",
        "Message-ID": "<20240229115157.201671-9-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240229115157.201671-1-dsosnowski@nvidia.com>",
        "References": "<20240228170046.176600-1-dsosnowski@nvidia.com>\n <20240229115157.201671-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BL6PEPF0001AB4B:EE_|SJ2PR12MB7894:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "54fb1340-8a1f-4e9c-1f83-08dc391cf45d",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n sJsRc6UBVZznU4EoRl/52qMk89jmPfikZNkHmqLsj22G1XytVsV5vTO9nPsVOyBe0fNow1yCixYGZZVqicQmJLrTQ6thke3GSQZqY1UKuH3rT02t6husjTMGm+NhnrcIHuFGtR4Y4agz/ZTIPQWH7N2JLGzoPeCVknPQqYYEuGjMux/R/gHhvdJp4+kHSGoLG3HLU9bS5MOXmuqkhH52CXs01MXaHtfYAaYh4c7eJnI/wuuqQ7DhFiFD8rUwV8hVPu/GQ7ViVqVIT1jWj2gTv+fdJVMRUvvhpIsHdYhS0Bf2ZScCu07ZE1NgmevO7cd8KI3rcJKih+EJgpADen3Il0We1bc3ql6FuUR+EuqpHYT3+NOJ1k82giOBmsSmvRXcgo6B5/eeQKaJLZORN3qldT9uYsosUnEFErRG8obIaaoqwVNJcbNnoM6hgFLb5tA7RkYetfWGYWsR13Z9FdshbZ5UL2OCr5X1lbaoDRI/Hd+yIvpQcbvh/PsjuWf8A9NpkK9Xp9NCidEGB87LgWWEH+KFdhVF+XgM4M4viW3BV6w/DEb7xCoE4NTCeO/pm30iu9QL6LiGoxAGCOhIufeZsisFjO33iN6tXx3h02ipjamPU10caNFvKr0ah8Z9K3lABk6Wg94CQxrEYUIuMibnPs7srPyPRt1tBVpe4VcUO2PDBOr918nB9Z/Os8k12JM20oObX6nYH9BpfHBI49Cf77A7LgQTXw+jBkU9xz7ygriHWs+SVFCSoldSbIsSy/0n",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(82310400014)(36860700004); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "29 Feb 2024 11:52:49.8722 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 54fb1340-8a1f-4e9c-1f83-08dc391cf45d",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BL6PEPF0001AB4B.namprd04.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ2PR12MB7894",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "While processing async flow operations in mlx5 PMD,\nmlx5_hw_q_job struct is used to hold the following data\nrelated to the ongoing operation.\n\n- operation type,\n- user data,\n- flow reference.\n\nJob itself is then passed to mlx5dr layer as its \"user data\".\nOther types of data required during flow operation processing\nare accessed through the flow itself.\n\nSince most of the accessed fields are in the flow struct itself,\nthe operation type and user data can be moved to the flow struct.\nThis removes unnecessary memory indirection and reduces memory\nfootprint of flow operations processing. It decreases cache stress\nand as a result can increase processing throughput.\n\nThis patch removes the mlx5_hw_q_job from async flow operations\nprocessing and from now on the flow itself can represent the ongoing\noperation. Async operations on indirect actions still use jobs.\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h         |   8 +-\n drivers/net/mlx5/mlx5_flow.h    |  13 ++\n drivers/net/mlx5/mlx5_flow_hw.c | 210 +++++++++++++++-----------------\n 3 files changed, 116 insertions(+), 115 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 2e2504f20f..8acb79e7bb 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -396,10 +396,7 @@ enum mlx5_hw_indirect_type {\n struct mlx5_hw_q_job {\n \tuint32_t type; /* Job type. */\n \tuint32_t indirect_type;\n-\tunion {\n-\t\tstruct rte_flow_hw *flow; /* Flow attached to the job. */\n-\t\tconst void *action; /* Indirect action attached to the job. */\n-\t};\n+\tconst void *action; /* Indirect action attached to the job. */\n \tvoid *user_data; /* Job user data. */\n \tstruct {\n \t\t/* User memory for query output */\n@@ -412,7 +409,8 @@ struct mlx5_hw_q_job {\n /* HW steering job descriptor LIFO pool. */\n struct mlx5_hw_q {\n \tuint32_t job_idx; /* Free job index. */\n-\tuint32_t size; /* LIFO size. */\n+\tuint32_t size; /* Job LIFO queue size. */\n+\tuint32_t ongoing_flow_ops; /* Number of ongoing flow operations. */\n \tstruct mlx5_hw_q_job **job; /* LIFO header. */\n \tstruct rte_ring *indir_cq; /* Indirect action SW completion queue. */\n \tstruct rte_ring *indir_iq; /* Indirect action SW in progress queue. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex a204f94624..46d8ce1775 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1269,6 +1269,16 @@ typedef uint32_t cnt_id_t;\n \n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \n+enum {\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_NONE,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_CREATE,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY,\n+\tMLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE,\n+};\n+\n #ifdef PEDANTIC\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n@@ -1290,6 +1300,9 @@ struct rte_flow_hw {\n \tcnt_id_t cnt_id;\n \tuint32_t mtr_id;\n \tuint32_t rule_idx;\n+\tuint8_t operation_type; /**< Ongoing flow operation type. */\n+\tvoid *user_data; /**< Application's private data passed to enqueued flow operation. */\n+\tuint8_t padding[1]; /**< Padding for proper alignment of mlx5dr rule struct. */\n \tuint8_t rule[]; /* HWS layer data struct. */\n } __rte_packed;\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex cbbf87b999..dc0b4bff3d 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -312,6 +312,31 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {\n \t.hdr.ether_type = 0,\n };\n \n+static inline uint32_t\n+flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)\n+{\n+\tstruct mlx5_hw_q *q = &priv->hw_q[queue];\n+\n+\tMLX5_ASSERT(q->size >= q->job_idx);\n+\treturn (q->size - q->job_idx) + q->ongoing_flow_ops;\n+}\n+\n+static inline void\n+flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)\n+{\n+\tstruct mlx5_hw_q *q = &priv->hw_q[queue];\n+\n+\tq->ongoing_flow_ops++;\n+}\n+\n+static inline void\n+flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)\n+{\n+\tstruct mlx5_hw_q *q = &priv->hw_q[queue];\n+\n+\tq->ongoing_flow_ops--;\n+}\n+\n static __rte_always_inline struct mlx5_hw_q_job *\n flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)\n {\n@@ -3426,20 +3451,15 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tstruct mlx5_flow_hw_action_params ap;\n \tstruct mlx5_flow_hw_pattern_params pp;\n \tstruct rte_flow_hw *flow = NULL;\n-\tstruct mlx5_hw_q_job *job = NULL;\n \tconst struct rte_flow_item *rule_items;\n \tuint32_t flow_idx = 0;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n \tif (unlikely((!dev->data->dev_started))) {\n-\t\trte_errno = EINVAL;\n-\t\tgoto error;\n-\t}\n-\tjob = flow_hw_job_get(priv, queue);\n-\tif (!job) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"Port must be started before enqueueing flow operations\");\n+\t\treturn NULL;\n \t}\n \tflow = mlx5_ipool_zmalloc(table->flow, &flow_idx);\n \tif (!flow)\n@@ -3461,13 +3481,12 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\tflow->res_idx = flow_idx;\n \t}\n \t/*\n-\t * Set the job type here in order to know if the flow memory\n+\t * Set the flow operation type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n \t */\n-\tjob->type = MLX5_HW_Q_JOB_TYPE_CREATE;\n-\tjob->flow = flow;\n-\tjob->user_data = user_data;\n-\trule_attr.user_data = job;\n+\tflow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;\n+\tflow->user_data = user_data;\n+\trule_attr.user_data = flow;\n \t/*\n \t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices\n \t * for rule insertion hints.\n@@ -3501,7 +3520,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t} else {\n \t\tuint32_t selector;\n \n-\t\tjob->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;\n+\t\tflow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;\n \t\trte_rwlock_read_lock(&table->matcher_replace_rwlk);\n \t\tselector = table->matcher_selector;\n \t\tret = mlx5dr_rule_create(table->matcher_info[selector].matcher,\n@@ -3512,15 +3531,15 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\trte_rwlock_read_unlock(&table->matcher_replace_rwlk);\n \t\tflow->matcher_selector = selector;\n \t}\n-\tif (likely(!ret))\n+\tif (likely(!ret)) {\n+\t\tflow_hw_q_inc_flow_ops(priv, queue);\n \t\treturn (struct rte_flow *)flow;\n+\t}\n error:\n \tif (table->resource && res_idx)\n \t\tmlx5_ipool_free(table->resource, res_idx);\n \tif (flow_idx)\n \t\tmlx5_ipool_free(table->flow, flow_idx);\n-\tif (job)\n-\t\tflow_hw_job_put(priv, job, queue);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3575,19 +3594,14 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tstruct mlx5dr_rule_action *rule_acts;\n \tstruct mlx5_flow_hw_action_params ap;\n \tstruct rte_flow_hw *flow = NULL;\n-\tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t flow_idx = 0;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n \tif (unlikely(rule_index >= table->cfg.attr.nb_flows)) {\n-\t\trte_errno = EINVAL;\n-\t\tgoto error;\n-\t}\n-\tjob = flow_hw_job_get(priv, queue);\n-\tif (!job) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"Flow rule index exceeds table size\");\n+\t\treturn NULL;\n \t}\n \tflow = mlx5_ipool_zmalloc(table->flow, &flow_idx);\n \tif (!flow)\n@@ -3609,13 +3623,12 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\tflow->res_idx = flow_idx;\n \t}\n \t/*\n-\t * Set the job type here in order to know if the flow memory\n+\t * Set the flow operation type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n \t */\n-\tjob->type = MLX5_HW_Q_JOB_TYPE_CREATE;\n-\tjob->flow = flow;\n-\tjob->user_data = user_data;\n-\trule_attr.user_data = job;\n+\tflow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;\n+\tflow->user_data = user_data;\n+\trule_attr.user_data = flow;\n \t/* Set the rule index. */\n \tflow->rule_idx = rule_index;\n \trule_attr.rule_idx = flow->rule_idx;\n@@ -3640,7 +3653,7 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t} else {\n \t\tuint32_t selector;\n \n-\t\tjob->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;\n+\t\tflow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;\n \t\trte_rwlock_read_lock(&table->matcher_replace_rwlk);\n \t\tselector = table->matcher_selector;\n \t\tret = mlx5dr_rule_create(table->matcher_info[selector].matcher,\n@@ -3649,15 +3662,15 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\t\t\t\t (struct mlx5dr_rule *)flow->rule);\n \t\trte_rwlock_read_unlock(&table->matcher_replace_rwlk);\n \t}\n-\tif (likely(!ret))\n+\tif (likely(!ret)) {\n+\t\tflow_hw_q_inc_flow_ops(priv, queue);\n \t\treturn (struct rte_flow *)flow;\n+\t}\n error:\n \tif (table->resource && res_idx)\n \t\tmlx5_ipool_free(table->resource, res_idx);\n \tif (flow_idx)\n \t\tmlx5_ipool_free(table->flow, flow_idx);\n-\tif (job)\n-\t\tflow_hw_job_put(priv, job, queue);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3713,15 +3726,9 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tstruct rte_flow_hw *nf;\n \tstruct rte_flow_hw_aux *aux;\n \tstruct rte_flow_template_table *table = of->table;\n-\tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n-\tjob = flow_hw_job_get(priv, queue);\n-\tif (!job) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n \taux = mlx5_flow_hw_aux(dev->data->port_id, of);\n \tnf = &aux->upd_flow;\n \tmemset(nf, 0, sizeof(struct rte_flow_hw));\n@@ -3741,14 +3748,6 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t} else {\n \t\tnf->res_idx = of->res_idx;\n \t}\n-\t/*\n-\t * Set the job type here in order to know if the flow memory\n-\t * should be freed or not when get the result from dequeue.\n-\t */\n-\tjob->type = MLX5_HW_Q_JOB_TYPE_UPDATE;\n-\tjob->flow = nf;\n-\tjob->user_data = user_data;\n-\trule_attr.user_data = job;\n \t/*\n \t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices\n \t * for rule insertion hints.\n@@ -3770,18 +3769,22 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n-\t/* Switch to the old flow. New flow will retrieved from the table on completion. */\n-\tjob->flow = of;\n+\t/*\n+\t * Set the flow operation type here in order to know if the flow memory\n+\t * should be freed or not when get the result from dequeue.\n+\t */\n+\tof->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;\n+\tof->user_data = user_data;\n+\trule_attr.user_data = of;\n \tret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,\n \t\t\t\t\taction_template_index, rule_acts, &rule_attr);\n-\tif (likely(!ret))\n+\tif (likely(!ret)) {\n+\t\tflow_hw_q_inc_flow_ops(priv, queue);\n \t\treturn 0;\n+\t}\n error:\n \tif (table->resource && res_idx)\n \t\tmlx5_ipool_free(table->resource, res_idx);\n-\t/* Flow created fail, return the descriptor and flow memory. */\n-\tif (job)\n-\t\tflow_hw_job_put(priv, job, queue);\n \treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t  \"fail to update rte flow\");\n@@ -3825,27 +3828,23 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,\n \t\t.burst = attr->postpone,\n \t};\n \tstruct rte_flow_hw *fh = (struct rte_flow_hw *)flow;\n-\tstruct mlx5_hw_q_job *job;\n+\tbool resizable = rte_flow_template_table_resizable(dev->data->port_id,\n+\t\t\t\t\t\t\t   &fh->table->cfg.attr);\n \tint ret;\n \n-\tjob = flow_hw_job_get(priv, queue);\n-\tif (!job)\n-\t\treturn rte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t  \"fail to destroy rte flow: flow queue full\");\n-\tjob->type = !rte_flow_template_table_resizable(dev->data->port_id, &fh->table->cfg.attr) ?\n-\t\t    MLX5_HW_Q_JOB_TYPE_DESTROY : MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY;\n-\tjob->user_data = user_data;\n-\tjob->flow = fh;\n-\trule_attr.user_data = job;\n+\tfh->operation_type = !resizable ?\n+\t\t\t     MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :\n+\t\t\t     MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;\n+\tfh->user_data = user_data;\n+\trule_attr.user_data = fh;\n \trule_attr.rule_idx = fh->rule_idx;\n \tret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);\n \tif (ret) {\n-\t\tflow_hw_job_put(priv, job, queue);\n \t\treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"fail to destroy rte flow\");\n \t}\n+\tflow_hw_q_inc_flow_ops(priv, queue);\n \treturn 0;\n }\n \n@@ -3950,16 +3949,16 @@ mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,\n \t\t\t\tuint16_t n_res)\n {\n \tuint32_t size, i;\n-\tstruct mlx5_hw_q_job *job = NULL;\n+\tstruct rte_flow_hw *flow = NULL;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;\n \n \tsize = RTE_MIN(rte_ring_count(ring), n_res);\n \tfor (i = 0; i < size; i++) {\n \t\tres[i].status = RTE_FLOW_OP_SUCCESS;\n-\t\trte_ring_dequeue(ring, (void **)&job);\n-\t\tres[i].user_data = job->user_data;\n-\t\tflow_hw_job_put(priv, job, queue);\n+\t\trte_ring_dequeue(ring, (void **)&flow);\n+\t\tres[i].user_data = flow->user_data;\n+\t\tflow_hw_q_dec_flow_ops(priv, queue);\n \t}\n \treturn (int)size;\n }\n@@ -4016,12 +4015,11 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n \n static __rte_always_inline void\n hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,\n-\t\t\t       struct mlx5_hw_q_job *job,\n+\t\t\t       struct rte_flow_hw *flow,\n \t\t\t       uint32_t queue, struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n-\tstruct rte_flow_hw *flow = job->flow;\n \tstruct rte_flow_template_table *table = flow->table;\n \t/* Release the original resource index in case of update. */\n \tuint32_t res_idx = flow->res_idx;\n@@ -4037,12 +4035,10 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,\n \t\tmlx5_ipool_free(pool->idx_pool,\tflow->mtr_id);\n \t\tflow->mtr_id = 0;\n \t}\n-\tif (job->type != MLX5_HW_Q_JOB_TYPE_UPDATE) {\n-\t\tif (table) {\n-\t\t\tif (table->resource)\n-\t\t\t\tmlx5_ipool_free(table->resource, res_idx);\n-\t\t\tmlx5_ipool_free(table->flow, flow->idx);\n-\t\t}\n+\tif (flow->operation_type != MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE) {\n+\t\tif (table->resource)\n+\t\t\tmlx5_ipool_free(table->resource, res_idx);\n+\t\tmlx5_ipool_free(table->flow, flow->idx);\n \t} else {\n \t\tstruct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);\n \t\tstruct rte_flow_hw *upd_flow = &aux->upd_flow;\n@@ -4055,28 +4051,27 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,\n \n static __rte_always_inline void\n hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,\n-\t\t      struct mlx5_hw_q_job *job,\n+\t\t      struct rte_flow_hw *flow,\n \t\t      uint32_t queue, enum rte_flow_op_status status,\n \t\t      struct rte_flow_error *error)\n {\n-\tstruct rte_flow_hw *flow = job->flow;\n \tstruct rte_flow_template_table *table = flow->table;\n \tuint32_t selector = flow->matcher_selector;\n \tuint32_t other_selector = (selector + 1) & 1;\n \n-\tswitch (job->type) {\n-\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:\n+\tswitch (flow->operation_type) {\n+\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:\n \t\trte_atomic_fetch_add_explicit\n \t\t\t(&table->matcher_info[selector].refcnt, 1,\n \t\t\t rte_memory_order_relaxed);\n \t\tbreak;\n-\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:\n+\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:\n \t\trte_atomic_fetch_sub_explicit\n \t\t\t(&table->matcher_info[selector].refcnt, 1,\n \t\t\t rte_memory_order_relaxed);\n-\t\thw_cmpl_flow_update_or_destroy(dev, job, queue, error);\n+\t\thw_cmpl_flow_update_or_destroy(dev, flow, queue, error);\n \t\tbreak;\n-\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:\n+\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:\n \t\tif (status == RTE_FLOW_OP_SUCCESS) {\n \t\t\trte_atomic_fetch_sub_explicit\n \t\t\t\t(&table->matcher_info[selector].refcnt, 1,\n@@ -4120,7 +4115,6 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hw_q_job *job;\n \tint ret, i;\n \n \t/* 1. Pull the flow completion. */\n@@ -4130,23 +4124,24 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\"fail to query flow queue\");\n \tfor (i = 0; i <  ret; i++) {\n-\t\tjob = (struct mlx5_hw_q_job *)res[i].user_data;\n+\t\tstruct rte_flow_hw *flow = res[i].user_data;\n+\n \t\t/* Restore user data. */\n-\t\tres[i].user_data = job->user_data;\n-\t\tswitch (job->type) {\n-\t\tcase MLX5_HW_Q_JOB_TYPE_DESTROY:\n-\t\tcase MLX5_HW_Q_JOB_TYPE_UPDATE:\n-\t\t\thw_cmpl_flow_update_or_destroy(dev, job, queue, error);\n+\t\tres[i].user_data = flow->user_data;\n+\t\tswitch (flow->operation_type) {\n+\t\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:\n+\t\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:\n+\t\t\thw_cmpl_flow_update_or_destroy(dev, flow, queue, error);\n \t\t\tbreak;\n-\t\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:\n-\t\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:\n-\t\tcase MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:\n-\t\t\thw_cmpl_resizable_tbl(dev, job, queue, res[i].status, error);\n+\t\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:\n+\t\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:\n+\t\tcase MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:\n+\t\t\thw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);\n \t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n-\t\tflow_hw_job_put(priv, job, queue);\n+\t\tflow_hw_q_dec_flow_ops(priv, queue);\n \t}\n \t/* 2. Pull indirect action comp. */\n \tif (ret < n_res)\n@@ -4190,7 +4185,7 @@ __flow_hw_push_action(struct rte_eth_dev *dev,\n \t\t\tmlx5_aso_push_wqe(priv->sh,\n \t\t\t\t\t  &priv->hws_mpool->sq[queue]);\n \t}\n-\treturn priv->hw_q[queue].size - priv->hw_q[queue].job_idx;\n+\treturn flow_hw_q_pending(priv, queue);\n }\n \n static int\n@@ -10204,6 +10199,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \tfor (i = 0; i < nb_q_updated; i++) {\n \t\tpriv->hw_q[i].job_idx = _queue_attr[i]->size;\n \t\tpriv->hw_q[i].size = _queue_attr[i]->size;\n+\t\tpriv->hw_q[i].ongoing_flow_ops = 0;\n \t\tif (i == 0)\n \t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n \t\t\t\t\t    &priv->hw_q[nb_q_updated];\n@@ -12635,7 +12631,6 @@ flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,\n {\n \tint ret;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hw_q_job *job;\n \tstruct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;\n \tstruct rte_flow_template_table *table = hw_flow->table;\n \tuint32_t table_selector = table->matcher_selector;\n@@ -12661,31 +12656,26 @@ flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"no active table resize\");\n-\tjob = flow_hw_job_get(priv, queue);\n-\tif (!job)\n-\t\treturn rte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t  \"queue is full\");\n-\tjob->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE;\n-\tjob->user_data = user_data;\n-\tjob->flow = hw_flow;\n-\trule_attr.user_data = job;\n+\thw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;\n+\thw_flow->user_data = user_data;\n+\trule_attr.user_data = hw_flow;\n \tif (rule_selector == table_selector) {\n \t\tstruct rte_ring *ring = !attr->postpone ?\n \t\t\t\t\tpriv->hw_q[queue].flow_transfer_completed :\n \t\t\t\t\tpriv->hw_q[queue].flow_transfer_pending;\n-\t\trte_ring_enqueue(ring, job);\n+\t\trte_ring_enqueue(ring, hw_flow);\n+\t\tflow_hw_q_inc_flow_ops(priv, queue);\n \t\treturn 0;\n \t}\n \tret = mlx5dr_matcher_resize_rule_move(other_matcher,\n \t\t\t\t\t      (struct mlx5dr_rule *)hw_flow->rule,\n \t\t\t\t\t      &rule_attr);\n \tif (ret) {\n-\t\tflow_hw_job_put(priv, job, queue);\n \t\treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"flow transfer failed\");\n \t}\n+\tflow_hw_q_inc_flow_ops(priv, queue);\n \treturn 0;\n }\n \n",
    "prefixes": [
        "v2",
        "08/11"
    ]
}