get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137444/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137444,
    "url": "http://patches.dpdk.org/api/patches/137444/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240228170046.176600-7-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240228170046.176600-7-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240228170046.176600-7-dsosnowski@nvidia.com",
    "date": "2024-02-28T17:00:41",
    "name": "[06/11] net/mlx5: remove flow pattern from job",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ecbd943e7cb4fe402b4f376d6e7e50ee90028cf2",
    "submitter": {
        "id": 2386,
        "url": "http://patches.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240228170046.176600-7-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31278,
            "url": "http://patches.dpdk.org/api/series/31278/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31278",
            "date": "2024-02-28T17:00:35",
            "name": "net/mlx5: flow insertion performance improvements",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/31278/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/137444/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/137444/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 59E9B43C2C;\n\tWed, 28 Feb 2024 18:02:16 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0841642F29;\n\tWed, 28 Feb 2024 18:01:50 +0100 (CET)",
            "from NAM04-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam04on2070.outbound.protection.outlook.com [40.107.101.70])\n by mails.dpdk.org (Postfix) with ESMTP id D02D242F11\n for <dev@dpdk.org>; Wed, 28 Feb 2024 18:01:48 +0100 (CET)",
            "from SJ0PR03CA0035.namprd03.prod.outlook.com (2603:10b6:a03:33e::10)\n by SJ0PR12MB7066.namprd12.prod.outlook.com (2603:10b6:a03:4ae::5)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.41; Wed, 28 Feb\n 2024 17:01:42 +0000",
            "from SJ1PEPF00001CDD.namprd05.prod.outlook.com\n (2603:10b6:a03:33e:cafe::d) by SJ0PR03CA0035.outlook.office365.com\n (2603:10b6:a03:33e::10) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7339.28 via Frontend\n Transport; Wed, 28 Feb 2024 17:01:41 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n SJ1PEPF00001CDD.mail.protection.outlook.com (10.167.242.5) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Wed, 28 Feb 2024 17:01:41 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 28 Feb\n 2024 09:01:18 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Wed, 28 Feb\n 2024 09:01:15 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=QZ1DqJU3RT7LTFJIzBU2FyIHQpsp+d/iHiaLmpFHLqyeUUz/wWKPEHfhe972/hXPucUTxTPwhxTAQwTQd/LOtSFNiVdquKUoUqZ9cI6SLHHhlZRpa3EkgZdkEKNBAxl81V7xCCOMa4LHcDI98fVus3jaZc2SLbkRpSqQVMIV6SKM7CpsCJ38RFs0rtm3Q1FljhHvp81jChdMCm2y3PrzQIgNICm2dSldLLAWsUQQMi/ngAkOS216qT5vASUMbPoUCmLP5w/8TBIZIPZRdFo4v9b9Z4tHKKnbHZs3oL25XouW3dh49xKeH1P1WSRii8AziEq4H3rzn0ojtLzbNnw4TQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=K0KaAdoKOSfUSgANE0UgnPTPeyM2dzhck5m+f6TWG3M=;\n b=nvZ2qpccjfhmtvpkyiXy3CctJs64CLHyn681mFL9OobjrdEuv7WYh5cnCpoII54n+e2FEFe7IX20cN27r1PiBluUCpzOs7dbcjqdmRiBJP7WHE9olsMeYYi9PPdA+M5TEjvMNKLOH3zicj5AEoAb889J2+UGmHtpX+806gP0VMhgo3/oaj1I18Sq4wo90oCD4HkucSpZmN55Ls6s0j/N4yRB0oW+IO20YW4PwzhweyLBCda/RgzwPo/72lfIiZKIekaxxs9TQEyMAMyIcUClB3BkYh0YNTTAgb6M9qPJf+HnfQC7Wx4IiGQdythgDD9JUkh7hXm2xXWKv7nmg8uZ4w==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=K0KaAdoKOSfUSgANE0UgnPTPeyM2dzhck5m+f6TWG3M=;\n b=MDh8OYt7PzhaNOcEFLcryfilR4Y/Nkt1NVo9vmDw+lkN479kDuP67KvMlzWBTUuFzPP6JP5o7WjSmQOYOMW84w/oDxC45MDaZE5BhsCCh6kVDjBSb084ZOrW3Y/mlFn/QImYUeKQB0dofwkAxeiVtYVegmQkiG8h/JD+1mTz0uuld2QLWE4+WJemqcVsbtRRwnJ98KWDO7IU5UlWn2x1uPM/egV3uzOW7Wu19QaoLkbRddSYuQmyQWwok+1ba+salyq2+bccTYnhHVXX3lB2R4Ixan3tNi66agjQzKSFLCb19/ATXlGjMjJFJKEDoxSPc3GXwYSZCn0w0A102ico8w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH 06/11] net/mlx5: remove flow pattern from job",
        "Date": "Wed, 28 Feb 2024 18:00:41 +0100",
        "Message-ID": "<20240228170046.176600-7-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "References": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SJ1PEPF00001CDD:EE_|SJ0PR12MB7066:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "45ea4c72-5fbc-4696-1d9e-08dc387eefb9",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n gzSjx8jSZ9uhoRqzeTErfP6jqM9I36LaonweJ9e9pgZitPINd/C/GSQCcE78KewrSGVaeh0BecUMRxxA2osKeXTQ1Ny3t3WDETZQ4zBHhDxAqtv8O9oEKKWDiMDeZIyRe5Ikt5o59zm0l7nV0fEbEVLUj86f9PU6k4q5WuEEJnu+zCCuR1MCm1jz6aWexCdee6jQIoEz/JcOrJOpi5KUmlYugoLC7cv2/u/uUwUmQ4pBXa8d8ljNV/b8COC+vX0RBFvDEyJXn6vh/m9/f6MkHTBlPOvzvWisWDg1Fr6XP9RcZAe957zJc4whicXuJorSAH3TInD8XWkPF7UMPY8yx1yvUU7peCrTrkVwlbIZlxdb/61wIi/RDeE1LeBhiizE4bDlSXIn3lhWJCrYyd7e+V1s3Ii4FS3rLiF8wGfwOfM7vZGBmkOV1D4PiQV6DIl9rbBIY9OqQhYnZy64HTnSHsf1d7vL/5R+B1u8rfjAid3UUs5tQRiov28gKf6jkCe55+gKpsV3aYofi5T8qCYKhIgd7LKtlsdPIwG6NjeuGkcRVuRR7z7c6TqRBuMS/+u686GPirG51uMmbqT2Lnnw2EoYAEqpRmKygBnbSX7FYT6UBXxBD2JPveDmBEjhey45QN8DtCwQA8a42FCh/wbTJ9jWb0MrnG51B+VT3mNoUFXAwlHwfVHGN6BWJl8r4+0FDnva9zNJ/mD7pQ40jE7DrZaonqqKs4L1bCoCiHAFb8zJkV9Uw4UNbttUzPzWGwc3",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(36860700004)(82310400014); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Feb 2024 17:01:41.7907 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 45ea4c72-5fbc-4696-1d9e-08dc387eefb9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n SJ1PEPF00001CDD.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ0PR12MB7066",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "mlx5_hw_q_job struct held a reference to temporary flow rule pattern\nand contained temporary REPRESENTED_PORT and TAG items structs.\nThey are used whenever it is required to prepend a flow rule pattern,\nprovided by the application with one of such items.\nIf prepending is required, then flow rule pattern is copied over to\ntemporary buffer and a new item added internally in PMD.\nSuch constructed buffer is passed to the HWS layer when flow create\noperation is being enqueued.\nAfter operation is enqueued, temporary flow pattern can be safely\ndiscarded, so there is no need to store it during\nthe whole lifecycle of mlx5_hw_q_job.\n\nThis patch removes all references to flow rule pattern and items stored\ninside mlx5_hw_q_job and removes relevant allocations to reduce job\nmemory footprint.\nTemporary pattern and items stored per job are replaced with stack\nallocated ones, contained in mlx5_flow_hw_pattern_params struct.\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h         | 17 ++++-------\n drivers/net/mlx5/mlx5_flow.h    | 10 +++++++\n drivers/net/mlx5/mlx5_flow_hw.c | 51 ++++++++++++++-------------------\n 3 files changed, 37 insertions(+), 41 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex bd0846d6bf..fc3d28e6f2 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -401,17 +401,12 @@ struct mlx5_hw_q_job {\n \t\tconst void *action; /* Indirect action attached to the job. */\n \t};\n \tvoid *user_data; /* Job user data. */\n-\tstruct rte_flow_item *items;\n-\tunion {\n-\t\tstruct {\n-\t\t\t/* User memory for query output */\n-\t\t\tvoid *user;\n-\t\t\t/* Data extracted from hardware */\n-\t\t\tvoid *hw;\n-\t\t} __rte_packed query;\n-\t\tstruct rte_flow_item_ethdev port_spec;\n-\t\tstruct rte_flow_item_tag tag_spec;\n-\t} __rte_packed;\n+\tstruct {\n+\t\t/* User memory for query output */\n+\t\tvoid *user;\n+\t\t/* Data extracted from hardware */\n+\t\tvoid *hw;\n+\t} query;\n \tstruct rte_flow_hw *upd_flow; /* Flow with updated values. */\n };\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex df1c913017..96b43ce61e 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1304,6 +1304,16 @@ struct mlx5_flow_hw_action_params {\n \tuint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];\n };\n \n+/** Container for dynamically generated flow items used during flow rule creation. */\n+struct mlx5_flow_hw_pattern_params {\n+\t/** Array of dynamically generated flow items. */\n+\tstruct rte_flow_item items[MLX5_HW_MAX_ITEMS];\n+\t/** Temporary REPRESENTED_PORT item generated by PMD. */\n+\tstruct rte_flow_item_ethdev port_spec;\n+\t/** Temporary TAG item generated by PMD. */\n+\tstruct rte_flow_item_tag tag_spec;\n+};\n+\n /* rte flow action translate to DR action struct. */\n struct mlx5_action_construct_data {\n \tLIST_ENTRY(mlx5_action_construct_data) next;\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 7160477c83..c3d9eef999 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -3253,44 +3253,44 @@ flow_hw_get_rule_items(struct rte_eth_dev *dev,\n \t\t       const struct rte_flow_template_table *table,\n \t\t       const struct rte_flow_item items[],\n \t\t       uint8_t pattern_template_index,\n-\t\t       struct mlx5_hw_q_job *job)\n+\t\t       struct mlx5_flow_hw_pattern_params *pp)\n {\n \tstruct rte_flow_pattern_template *pt = table->its[pattern_template_index];\n \n \t/* Only one implicit item can be added to flow rule pattern. */\n \tMLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);\n-\t/* At least one item was allocated in job descriptor for items. */\n+\t/* At least one item was allocated in pattern params for items. */\n \tMLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);\n \tif (pt->implicit_port) {\n \t\tif (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {\n \t\t\trte_errno = ENOMEM;\n \t\t\treturn NULL;\n \t\t}\n-\t\t/* Set up represented port item in job descriptor. */\n-\t\tjob->port_spec = (struct rte_flow_item_ethdev){\n+\t\t/* Set up represented port item in pattern params. */\n+\t\tpp->port_spec = (struct rte_flow_item_ethdev){\n \t\t\t.port_id = dev->data->port_id,\n \t\t};\n-\t\tjob->items[0] = (struct rte_flow_item){\n+\t\tpp->items[0] = (struct rte_flow_item){\n \t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n-\t\t\t.spec = &job->port_spec,\n+\t\t\t.spec = &pp->port_spec,\n \t\t};\n-\t\trte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);\n-\t\treturn job->items;\n+\t\trte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);\n+\t\treturn pp->items;\n \t} else if (pt->implicit_tag) {\n \t\tif (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {\n \t\t\trte_errno = ENOMEM;\n \t\t\treturn NULL;\n \t\t}\n-\t\t/* Set up tag item in job descriptor. */\n-\t\tjob->tag_spec = (struct rte_flow_item_tag){\n+\t\t/* Set up tag item in pattern params. */\n+\t\tpp->tag_spec = (struct rte_flow_item_tag){\n \t\t\t.data = flow_hw_tx_tag_regc_value(dev),\n \t\t};\n-\t\tjob->items[0] = (struct rte_flow_item){\n+\t\tpp->items[0] = (struct rte_flow_item){\n \t\t\t.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,\n-\t\t\t.spec = &job->tag_spec,\n+\t\t\t.spec = &pp->tag_spec,\n \t\t};\n-\t\trte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);\n-\t\treturn job->items;\n+\t\trte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);\n+\t\treturn pp->items;\n \t} else {\n \t\treturn items;\n \t}\n@@ -3345,6 +3345,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t};\n \tstruct mlx5dr_rule_action *rule_acts;\n \tstruct mlx5_flow_hw_action_params ap;\n+\tstruct mlx5_flow_hw_pattern_params pp;\n \tstruct rte_flow_hw *flow = NULL;\n \tstruct mlx5_hw_q_job *job = NULL;\n \tconst struct rte_flow_item *rule_items;\n@@ -3409,7 +3410,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\tgoto error;\n \t}\n \trule_items = flow_hw_get_rule_items(dev, table, items,\n-\t\t\t\t\t    pattern_template_index, job);\n+\t\t\t\t\t    pattern_template_index, &pp);\n \tif (!rule_items)\n \t\tgoto error;\n \tif (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {\n@@ -9990,11 +9991,8 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\tgoto err;\n \t\t}\n \t\tmem_size += (sizeof(struct mlx5_hw_q_job *) +\n-\t\t\t    sizeof(struct mlx5_hw_q_job) +\n-\t\t\t    sizeof(struct rte_flow_item) *\n-\t\t\t    MLX5_HW_MAX_ITEMS +\n-\t\t\t\tsizeof(struct rte_flow_hw)) *\n-\t\t\t    _queue_attr[i]->size;\n+\t\t\t     sizeof(struct mlx5_hw_q_job) +\n+\t\t\t     sizeof(struct rte_flow_hw)) * _queue_attr[i]->size;\n \t}\n \tpriv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n \t\t\t\t 64, SOCKET_ID_ANY);\n@@ -10003,7 +10001,6 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tgoto err;\n \t}\n \tfor (i = 0; i < nb_q_updated; i++) {\n-\t\tstruct rte_flow_item *items = NULL;\n \t\tstruct rte_flow_hw *upd_flow = NULL;\n \n \t\tpriv->hw_q[i].job_idx = _queue_attr[i]->size;\n@@ -10016,12 +10013,8 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\t\t&job[_queue_attr[i - 1]->size - 1].upd_flow[1];\n \t\tjob = (struct mlx5_hw_q_job *)\n \t\t      &priv->hw_q[i].job[_queue_attr[i]->size];\n-\t\titems = (struct rte_flow_item *)\n-\t\t\t &job[_queue_attr[i]->size];\n-\t\tupd_flow = (struct rte_flow_hw *)\n-\t\t\t&items[_queue_attr[i]->size * MLX5_HW_MAX_ITEMS];\n+\t\tupd_flow = (struct rte_flow_hw *)&job[_queue_attr[i]->size];\n \t\tfor (j = 0; j < _queue_attr[i]->size; j++) {\n-\t\t\tjob[j].items = &items[j * MLX5_HW_MAX_ITEMS];\n \t\t\tjob[j].upd_flow = &upd_flow[j];\n \t\t\tpriv->hw_q[i].job[j] = &job[j];\n \t\t}\n@@ -12193,14 +12186,12 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,\n \t\t\t uint32_t *hash, struct rte_flow_error *error)\n {\n \tconst struct rte_flow_item *items;\n-\t/* Temp job to allow adding missing items */\n-\tstatic struct rte_flow_item tmp_items[MLX5_HW_MAX_ITEMS];\n-\tstatic struct mlx5_hw_q_job job = {.items = tmp_items};\n+\tstruct mlx5_flow_hw_pattern_params pp;\n \tint res;\n \n \titems = flow_hw_get_rule_items(dev, table, pattern,\n \t\t\t\t       pattern_template_index,\n-\t\t\t\t       &job);\n+\t\t\t\t       &pp);\n \tres = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,\n \t\t\t\t\t pattern_template_index,\n \t\t\t\t\t MLX5DR_RULE_HASH_CALC_MODE_RAW,\n",
    "prefixes": [
        "06/11"
    ]
}