get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/134409/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 134409,
    "url": "http://patches.dpdk.org/api/patches/134409/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231116080833.336377-2-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231116080833.336377-2-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231116080833.336377-2-getelson@nvidia.com",
    "date": "2023-11-16T08:08:32",
    "name": "[1/2] net/mlx5: fix sync queue completion processing",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a29fca430bc2762c58e147a94c856e82bf399484",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231116080833.336377-2-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 30309,
            "url": "http://patches.dpdk.org/api/series/30309/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=30309",
            "date": "2023-11-16T08:08:31",
            "name": "net/mlx5: fix completions processing",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/30309/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/134409/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/134409/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BD63243341;\n\tThu, 16 Nov 2023 09:09:10 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A9ABF402D6;\n\tThu, 16 Nov 2023 09:09:10 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2078.outbound.protection.outlook.com [40.107.243.78])\n by mails.dpdk.org (Postfix) with ESMTP id E42CD402D4;\n Thu, 16 Nov 2023 09:09:08 +0100 (CET)",
            "from DS7PR03CA0200.namprd03.prod.outlook.com (2603:10b6:5:3b6::25)\n by IA1PR12MB6433.namprd12.prod.outlook.com (2603:10b6:208:3af::11) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7002.21; Thu, 16 Nov\n 2023 08:09:05 +0000",
            "from DS2PEPF0000343E.namprd02.prod.outlook.com\n (2603:10b6:5:3b6:cafe::83) by DS7PR03CA0200.outlook.office365.com\n (2603:10b6:5:3b6::25) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7002.21 via Frontend\n Transport; Thu, 16 Nov 2023 08:09:05 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n DS2PEPF0000343E.mail.protection.outlook.com (10.167.18.41) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7002.20 via Frontend Transport; Thu, 16 Nov 2023 08:09:05 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 16 Nov\n 2023 00:08:52 -0800",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Thu, 16 Nov\n 2023 00:08:48 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=ikxHQlwymGjfVKEoHiLO0dgncd7A+Y5gq4GfC0h5swnM1MPqRm89wQjPVN8jrogqHvnXGh8JUJXtH65Mw+RpitIcsSQpkt2ukPUfwY9IUa3UgH1ZUE6TPKd1q30TcsmrU3LQTrRxEnpJWd90ckogXvMUJsL0IJ5v6v1rAfEtoIT0eIAjP5W+ZcS1E6TXo44dt1sHWp3Sas1rbROyO/lYROU5N7Ig2M+vXcpiBgNjKYfglXAaH4GFHj5F4gAtbdr6bY7l+YzH4E77hJoL4YfqVXa39fHl5F2isT9nm78tVxbAJBscXgxzxUu+tgHaTN9faUIM+R0fJQ9hB5uhKSR+tQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=CEo1ndda8cw7jTgb05KnICyHo6tONxTo2Ovo9mB8xRU=;\n b=RCDAOxeQ/KB4Y1IbZsvnX4T+5sddlnQEm+bfAgS+q6YdFrvBODewZAxzKSQTTAW7doMrw9PrsFbmesyKsqDfqWNtG4waQt69y/e3MrQV/+5KPbTUc+cNzbgEFi/BCKuiGAAFRCgxUzxmQBXQtXY5HxocDOUq1+xoT1aps86HjtwCLoxqUQWOHU0wh0wgzd6OuBCZo5gWe3XmAO/jYCZUBuLCrFkt9s70w4ZH6kJAD0yQhV4svmlHKAGP6dMidSbOJ7onrwsmMxTh+n+XQuiootpxBMJYzBZHPilkA8sAsazgysYj9zeSVrqcEkoAd5/c4J7CjNML3qHY4bQ9uL0FBw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=CEo1ndda8cw7jTgb05KnICyHo6tONxTo2Ovo9mB8xRU=;\n b=VDlr3qqXIIGpl4OwBpij24oa4xgqoxcdjFbHR9AxfebqroGO4o2yZ0jxRNc2pCuZAa9pNkNbVPP8dldrv97ypiMfEEoQ58iH5VwHD2uxvFQy5uH929ShjODA8XIibkUUmIxxRYysQb7+TCDARln7ttISNXYxldaa9X7kjajHhmLhHAlxwJOrRN0CfGNwZnq+MHC4PkhcMiw11iCrhj2jjw3/4Jh6vgnPjlpYwJpJYPg9s8on23unAmZzxI+4lD431glKvnE1mPAarNMOIaayvaMBxBFkULf3D1lp4PLdttfsXl4KR9ZCqs/JldW9Jkbr/p0jcTliHvW1YYbhiAyxjw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <mkashani@nvidia.com>, <rasland@nvidia.com>,\n <stable@dpdk.org>, Ori Kam <orika@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>, \"Suanming\n Mou\" <suanmingm@nvidia.com>, Alexander Kozyrev <akozyrev@nvidia.com>",
        "Subject": "[PATCH 1/2] net/mlx5: fix sync queue completion processing",
        "Date": "Thu, 16 Nov 2023 10:08:32 +0200",
        "Message-ID": "<20231116080833.336377-2-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20231116080833.336377-1-getelson@nvidia.com>",
        "References": "<20231116080833.336377-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DS2PEPF0000343E:EE_|IA1PR12MB6433:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "51f9c0ca-9f02-4af7-dcbb-08dbe67b4d31",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n lhqmJySEmA8UaKSeVJl+FZaFDYCRTy9pRVDhxPX62XDnXgDwqKiJxJt4VpVozZb67rQzNfwIcP+9r8XwtC9CzhtTJvOwe3UnwsQTeenhwp1MuyTliF3sMHw/k+7W+UIzJ4KPPzCiBODX5rT+FLHybCkTz5R+m7/zSzp3QVU88J6Logra9JTkCvg59SfoSg39GTDRbY1c5HGVUPf7PA8pxxJCWzoKdPG/9ji6Ty8SF81y7dcb5FcA9TtDvSCzyAQdq2yJcEhXb5nL0zHNWqwB8c+LW8zxSd3C01QS/+1yF7dRM71pvxcX8aB8wlrhgG7Tx9mKEhAbw8tb/xd8PsB0IVMjyIuQHrGPd5HQ23z/LeBEo1BCk9HhK2KEGcbeR4reufAQHK6KauCBOf247JwK1UzdBHFfXrNccE9niDpRDOomzfOIa8qdu4YO3uTBwXjuzH1RBn+dGor0JBotSW6Gbf+4mExiiHLaddVxaXngui1ScVxEuhKRqHBYek/UkzqPneHoTyNXvumtEuwAmMwH0IDt9cwmwrRpKT32H5l5HYhW1PhoefpjhHUEjvflgRkhpuq9UoY7z7n62gNCjypfP4lSmFWOWTAtsW5VttzYZHd29jvT/XO+LQ9Ww+n2Zc24HcZuf7CDxLLUdg7JHBi+1RDvUkEAiH57xsUa0zD4JxNW3ohtIdUaYjhk4SyaUiBUmYxaFajmojfu8N4gZJAwvzGr0RluQewvQdreiQt5sxS/LQlzS6CNq8QnEyFGZIkTV1LbVOYKe46IM2Xo8V9ynQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(39860400002)(136003)(376002)(346002)(396003)(230922051799003)(230273577357003)(230173577357003)(451199024)(186009)(82310400011)(1800799009)(64100799003)(36840700001)(46966006)(40470700004)(70206006)(70586007)(41300700001)(6916009)(54906003)(316002)(450100002)(86362001)(5660300002)(30864003)(2906002)(40460700003)(4326008)(8676002)(8936002)(47076005)(7636003)(356005)(36860700001)(336012)(426003)(82740400003)(55016003)(83380400001)(6666004)(40480700001)(478600001)(36756003)(1076003)(26005)(6286002)(16526019)(107886003)(2616005)(7696005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "16 Nov 2023 08:09:05.1973 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 51f9c0ca-9f02-4af7-dcbb-08dbe67b4d31",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DS2PEPF0000343E.namprd02.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "IA1PR12MB6433",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Indirect **SYNC** METER_MARK and CT update actions\ndo not remove completion after WQE post.\nThat implementation speeds up update time by avoiding HW timeout.\nThe completion is remoted before the following WQE post.\nHowever, HWS queue updates do not reflect that behaviour.\nTherefore, during port destruction sync queue may have\npending completions although the queue reports empty status.\n\nThe patch validates that number of pushed WQEs will not exceed queue\ncapacity. As the result, it allows to process more completions than\nexpected.\n\nFixes: 48fbb0e93d06 (\"net/mlx5: support flow meter mark indirect action with HWS\")\nCc: stable@dpdk.org\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Ori Kam <orika@nvidia.com>\n\n---\n drivers/net/mlx5/mlx5_flow_hw.c | 267 +++++++++++++++++---------------\n 1 file changed, 142 insertions(+), 125 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex d72f0a66fb..fb2e6bf67b 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -273,6 +273,22 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {\n \t.hdr.src_addr.addr_bytes = \"\\x00\\x00\\x00\\x00\\x00\\x00\",\n \t.hdr.ether_type = 0,\n };\n+\n+static __rte_always_inline struct mlx5_hw_q_job *\n+flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)\n+{\n+\tMLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);\n+\treturn priv->hw_q[queue].job_idx ?\n+\t       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;\n+}\n+\n+static __rte_always_inline void\n+flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)\n+{\n+\tMLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);\n+\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+}\n+\n static inline enum mlx5dr_matcher_insert_mode\n flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)\n {\n@@ -3297,10 +3313,10 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\t.burst = attr->postpone,\n \t};\n \tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n-\tstruct rte_flow_hw *flow;\n-\tstruct mlx5_hw_q_job *job;\n+\tstruct rte_flow_hw *flow = NULL;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tconst struct rte_flow_item *rule_items;\n-\tuint32_t flow_idx;\n+\tuint32_t flow_idx = 0;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n@@ -3308,7 +3324,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n-\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\tjob = flow_hw_job_get(priv, queue);\n+\tif (!job) {\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -3317,16 +3334,15 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\tgoto error;\n \tmlx5_ipool_malloc(table->resource, &res_idx);\n \tif (!res_idx)\n-\t\tgoto flow_free;\n+\t\tgoto error;\n \t/*\n \t * Set the table here in order to know the destination table\n-\t * when free the flow afterwards.\n+\t * when free the flow afterward.\n \t */\n \tflow->table = table;\n \tflow->mt_idx = pattern_template_index;\n \tflow->idx = flow_idx;\n \tflow->res_idx = res_idx;\n-\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n \t/*\n \t * Set the job type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n@@ -3354,25 +3370,25 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\t\t\t      pattern_template_index, actions,\n \t\t\t\t      rule_acts, queue, error)) {\n \t\trte_errno = EINVAL;\n-\t\tgoto free;\n+\t\tgoto error;\n \t}\n \trule_items = flow_hw_get_rule_items(dev, table, items,\n \t\t\t\t\t    pattern_template_index, job);\n \tif (!rule_items)\n-\t\tgoto free;\n+\t\tgoto error;\n \tret = mlx5dr_rule_create(table->matcher,\n \t\t\t\t pattern_template_index, rule_items,\n \t\t\t\t action_template_index, rule_acts,\n \t\t\t\t &rule_attr, (struct mlx5dr_rule *)flow->rule);\n \tif (likely(!ret))\n \t\treturn (struct rte_flow *)flow;\n-free:\n-\t/* Flow created fail, return the descriptor and flow memory. */\n-\tpriv->hw_q[queue].job_idx++;\n-\tmlx5_ipool_free(table->resource, res_idx);\n-flow_free:\n-\tmlx5_ipool_free(table->flow, flow_idx);\n error:\n+\tif (job)\n+\t\tflow_hw_job_put(priv, job, queue);\n+\tif (flow_idx)\n+\t\tmlx5_ipool_free(table->flow, flow_idx);\n+\tif (res_idx)\n+\t\tmlx5_ipool_free(table->resource, res_idx);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3425,9 +3441,9 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\t.burst = attr->postpone,\n \t};\n \tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n-\tstruct rte_flow_hw *flow;\n-\tstruct mlx5_hw_q_job *job;\n-\tuint32_t flow_idx;\n+\tstruct rte_flow_hw *flow = NULL;\n+\tstruct mlx5_hw_q_job *job = NULL;\n+\tuint32_t flow_idx = 0;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n@@ -3435,7 +3451,8 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n-\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\tjob = flow_hw_job_get(priv, queue);\n+\tif (!job) {\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -3444,7 +3461,7 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\tgoto error;\n \tmlx5_ipool_malloc(table->resource, &res_idx);\n \tif (!res_idx)\n-\t\tgoto flow_free;\n+\t\tgoto error;\n \t/*\n \t * Set the table here in order to know the destination table\n \t * when free the flow afterwards.\n@@ -3453,7 +3470,6 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tflow->mt_idx = 0;\n \tflow->idx = flow_idx;\n \tflow->res_idx = res_idx;\n-\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n \t/*\n \t * Set the job type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n@@ -3478,20 +3494,20 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\t\t\t      &table->ats[action_template_index],\n \t\t\t\t      0, actions, rule_acts, queue, error)) {\n \t\trte_errno = EINVAL;\n-\t\tgoto free;\n+\t\tgoto error;\n \t}\n \tret = mlx5dr_rule_create(table->matcher,\n \t\t\t\t 0, items, action_template_index, rule_acts,\n \t\t\t\t &rule_attr, (struct mlx5dr_rule *)flow->rule);\n \tif (likely(!ret))\n \t\treturn (struct rte_flow *)flow;\n-free:\n-\t/* Flow created fail, return the descriptor and flow memory. */\n-\tpriv->hw_q[queue].job_idx++;\n-\tmlx5_ipool_free(table->resource, res_idx);\n-flow_free:\n-\tmlx5_ipool_free(table->flow, flow_idx);\n error:\n+\tif (job)\n+\t\tflow_hw_job_put(priv, job, queue);\n+\tif (res_idx)\n+\t\tmlx5_ipool_free(table->resource, res_idx);\n+\tif (flow_idx)\n+\t\tmlx5_ipool_free(table->flow, flow_idx);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3545,18 +3561,18 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tstruct rte_flow_hw *of = (struct rte_flow_hw *)flow;\n \tstruct rte_flow_hw *nf;\n \tstruct rte_flow_template_table *table = of->table;\n-\tstruct mlx5_hw_q_job *job;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t res_idx = 0;\n \tint ret;\n \n-\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\tjob = flow_hw_job_get(priv, queue);\n+\tif (!job) {\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n \tmlx5_ipool_malloc(table->resource, &res_idx);\n \tif (!res_idx)\n \t\tgoto error;\n-\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n \tnf = job->upd_flow;\n \tmemset(nf, 0, sizeof(struct rte_flow_hw));\n \t/*\n@@ -3594,7 +3610,7 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\t\t\t      nf->mt_idx, actions,\n \t\t\t\t      rule_acts, queue, error)) {\n \t\trte_errno = EINVAL;\n-\t\tgoto free;\n+\t\tgoto error;\n \t}\n \t/*\n \t * Switch the old flow and the new flow.\n@@ -3605,11 +3621,12 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\t\t\t\taction_template_index, rule_acts, &rule_attr);\n \tif (likely(!ret))\n \t\treturn 0;\n-free:\n-\t/* Flow created fail, return the descriptor and flow memory. */\n-\tpriv->hw_q[queue].job_idx++;\n-\tmlx5_ipool_free(table->resource, res_idx);\n error:\n+\t/* Flow created fail, return the descriptor and flow memory. */\n+\tif (job)\n+\t\tflow_hw_job_put(priv, job, queue);\n+\tif (res_idx)\n+\t\tmlx5_ipool_free(table->resource, res_idx);\n \treturn rte_flow_error_set(error, rte_errno,\n \t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\"fail to update rte flow\");\n@@ -3656,24 +3673,24 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,\n \tstruct mlx5_hw_q_job *job;\n \tint ret;\n \n-\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\tjob = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n+\tjob = flow_hw_job_get(priv, queue);\n+\tif (!job)\n+\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"fail to destroy rte flow: flow queue full\");\n \tjob->type = MLX5_HW_Q_JOB_TYPE_DESTROY;\n \tjob->user_data = user_data;\n \tjob->flow = fh;\n \trule_attr.user_data = job;\n \trule_attr.rule_idx = fh->rule_idx;\n \tret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);\n-\tif (likely(!ret))\n-\t\treturn 0;\n-\tpriv->hw_q[queue].job_idx++;\n-error:\n-\treturn rte_flow_error_set(error, rte_errno,\n-\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\"fail to destroy rte flow\");\n+\tif (ret) {\n+\t\tflow_hw_job_put(priv, job, queue);\n+\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"fail to destroy rte flow\");\n+\t}\n+\treturn 0;\n }\n \n /**\n@@ -3732,7 +3749,7 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_ring *r = priv->hw_q[queue].indir_cq;\n-\tstruct mlx5_hw_q_job *job;\n+\tstruct mlx5_hw_q_job *job = NULL;\n \tvoid *user_data = NULL;\n \tuint32_t type, idx;\n \tstruct mlx5_aso_mtr *aso_mtr;\n@@ -3792,8 +3809,16 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\tjob->query.hw);\n \t\t\t\taso_ct->state = ASO_CONNTRACK_READY;\n \t\t\t}\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * rte_flow_op_result::user data can point to\n+\t\t\t * struct mlx5_aso_mtr object as well\n+\t\t\t */\n+\t\t\tif (queue == CTRL_QUEUE_ID(priv))\n+\t\t\t\tcontinue;\n+\t\t\tMLX5_ASSERT(false);\n \t\t}\n-\t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+\t\tflow_hw_job_put(priv, job, queue);\n \t}\n \treturn ret_comp;\n }\n@@ -3865,7 +3890,7 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\t\t\tmlx5_ipool_free(job->flow->table->resource, res_idx);\n \t\t\t}\n \t\t}\n-\t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\n+\t\tflow_hw_job_put(priv, job, queue);\n \t}\n \t/* 2. Pull indirect action comp. */\n \tif (ret < n_res)\n@@ -3874,7 +3899,7 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n-static inline void\n+static inline uint32_t\n __flow_hw_push_action(struct rte_eth_dev *dev,\n \t\t    uint32_t queue)\n {\n@@ -3889,10 +3914,35 @@ __flow_hw_push_action(struct rte_eth_dev *dev,\n \t\trte_ring_dequeue(iq, &job);\n \t\trte_ring_enqueue(cq, job);\n \t}\n-\tif (priv->hws_ctpool)\n-\t\tmlx5_aso_push_wqe(priv->sh, &priv->ct_mng->aso_sqs[queue]);\n-\tif (priv->hws_mpool)\n-\t\tmlx5_aso_push_wqe(priv->sh, &priv->hws_mpool->sq[queue]);\n+\tif (!priv->shared_host) {\n+\t\tif (priv->hws_ctpool)\n+\t\t\tmlx5_aso_push_wqe(priv->sh,\n+\t\t\t\t\t  &priv->ct_mng->aso_sqs[queue]);\n+\t\tif (priv->hws_mpool)\n+\t\t\tmlx5_aso_push_wqe(priv->sh,\n+\t\t\t\t\t  &priv->hws_mpool->sq[queue]);\n+\t}\n+\treturn priv->hw_q[queue].size - priv->hw_q[queue].job_idx;\n+}\n+\n+static int\n+__flow_hw_push(struct rte_eth_dev *dev,\n+\t       uint32_t queue,\n+\t       struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tint ret, num;\n+\n+\tnum = __flow_hw_push_action(dev, queue);\n+\tret = mlx5dr_send_queue_action(priv->dr_ctx, queue,\n+\t\t\t\t       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);\n+\tif (ret) {\n+\t\trte_flow_error_set(error, rte_errno,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"fail to push flows\");\n+\t\treturn ret;\n+\t}\n+\treturn num;\n }\n \n /**\n@@ -3912,22 +3962,11 @@ __flow_hw_push_action(struct rte_eth_dev *dev,\n  */\n static int\n flow_hw_push(struct rte_eth_dev *dev,\n-\t     uint32_t queue,\n-\t     struct rte_flow_error *error)\n+\t     uint32_t queue, struct rte_flow_error *error)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tint ret;\n+\tint ret = __flow_hw_push(dev, queue, error);\n \n-\t__flow_hw_push_action(dev, queue);\n-\tret = mlx5dr_send_queue_action(priv->dr_ctx, queue,\n-\t\t\t\t       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);\n-\tif (ret) {\n-\t\trte_flow_error_set(error, rte_errno,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t   \"fail to push flows\");\n-\t\treturn ret;\n-\t}\n-\treturn 0;\n+\treturn ret >= 0 ? 0 : ret;\n }\n \n /**\n@@ -3937,8 +3976,6 @@ flow_hw_push(struct rte_eth_dev *dev,\n  *   Pointer to the rte_eth_dev structure.\n  * @param[in] queue\n  *   The queue to pull the flow.\n- * @param[in] pending_rules\n- *   The pending flow number.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -3947,24 +3984,24 @@ flow_hw_push(struct rte_eth_dev *dev,\n  */\n static int\n __flow_hw_pull_comp(struct rte_eth_dev *dev,\n-\t\t    uint32_t queue,\n-\t\t    uint32_t pending_rules,\n-\t\t    struct rte_flow_error *error)\n+\t\t    uint32_t queue, struct rte_flow_error *error)\n {\n \tstruct rte_flow_op_result comp[BURST_THR];\n \tint ret, i, empty_loop = 0;\n+\tuint32_t pending_rules;\n \n-\tret = flow_hw_push(dev, queue, error);\n+\tret = __flow_hw_push(dev, queue, error);\n \tif (ret < 0)\n \t\treturn ret;\n+\tpending_rules = ret;\n \twhile (pending_rules) {\n \t\tret = flow_hw_pull(dev, queue, comp, BURST_THR, error);\n \t\tif (ret < 0)\n \t\t\treturn -1;\n \t\tif (!ret) {\n-\t\t\trte_delay_us_sleep(20000);\n+\t\t\trte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n \t\t\tif (++empty_loop > 5) {\n-\t\t\t\tDRV_LOG(WARNING, \"No available dequeue, quit.\");\n+\t\t\t\tDRV_LOG(WARNING, \"No available dequeue %u, quit.\", pending_rules);\n \t\t\t\tbreak;\n \t\t\t}\n \t\t\tcontinue;\n@@ -3973,13 +4010,16 @@ __flow_hw_pull_comp(struct rte_eth_dev *dev,\n \t\t\tif (comp[i].status == RTE_FLOW_OP_ERROR)\n \t\t\t\tDRV_LOG(WARNING, \"Flow flush get error CQE.\");\n \t\t}\n-\t\tif ((uint32_t)ret > pending_rules) {\n-\t\t\tDRV_LOG(WARNING, \"Flow flush get extra CQE.\");\n-\t\t\treturn rte_flow_error_set(error, ERANGE,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t\"get extra CQE\");\n-\t\t}\n-\t\tpending_rules -= ret;\n+\t\t/*\n+\t\t * Indirect **SYNC** METER_MARK and CT actions do not\n+\t\t * remove completion after WQE post.\n+\t\t * That implementation avoids HW timeout.\n+\t\t * The completion is removed before the following WQE post.\n+\t\t * However, HWS queue updates do not reflect that behaviour.\n+\t\t * Therefore, during port destruction sync queue may have\n+\t\t * pending completions.\n+\t\t */\n+\t\tpending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);\n \t\tempty_loop = 0;\n \t}\n \treturn 0;\n@@ -4001,7 +4041,7 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n \t\t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hw_q *hw_q;\n+\tstruct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];\n \tstruct rte_flow_template_table *tbl;\n \tstruct rte_flow_hw *flow;\n \tstruct rte_flow_op_attr attr = {\n@@ -4020,13 +4060,10 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n \t * be minus value.\n \t */\n \tfor (queue = 0; queue < priv->nb_queue; queue++) {\n-\t\thw_q = &priv->hw_q[queue];\n-\t\tif (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,\n-\t\t\t\t\terror))\n+\t\tif (__flow_hw_pull_comp(dev, queue, error))\n \t\t\treturn -1;\n \t}\n \t/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */\n-\thw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];\n \tLIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {\n \t\tif (!tbl->cfg.external)\n \t\t\tcontinue;\n@@ -4042,8 +4079,8 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n \t\t\t/* Drain completion with queue size. */\n \t\t\tif (pending_rules >= hw_q->size) {\n \t\t\t\tif (__flow_hw_pull_comp(dev,\n-\t\t\t\t\t\tMLX5_DEFAULT_FLUSH_QUEUE,\n-\t\t\t\t\t\tpending_rules, error))\n+\t\t\t\t\t\t\tMLX5_DEFAULT_FLUSH_QUEUE,\n+\t\t\t\t\t\t\terror))\n \t\t\t\t\treturn -1;\n \t\t\t\tpending_rules = 0;\n \t\t\t}\n@@ -4051,8 +4088,7 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n \t}\n \t/* Drain left completion. */\n \tif (pending_rules &&\n-\t    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,\n-\t\t\t\terror))\n+\t    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))\n \t\treturn -1;\n \treturn 0;\n }\n@@ -9911,18 +9947,6 @@ flow_hw_action_push(const struct rte_flow_op_attr *attr)\n \treturn attr ? !attr->postpone : true;\n }\n \n-static __rte_always_inline struct mlx5_hw_q_job *\n-flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)\n-{\n-\treturn priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];\n-}\n-\n-static __rte_always_inline void\n-flow_hw_job_put(struct mlx5_priv *priv, uint32_t queue)\n-{\n-\tpriv->hw_q[queue].job_idx++;\n-}\n-\n static __rte_always_inline struct mlx5_hw_q_job *\n flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n \t\t\tconst struct rte_flow_action_handle *handle,\n@@ -9933,13 +9957,13 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,\n \tstruct mlx5_hw_q_job *job;\n \n \tMLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);\n-\tif (unlikely(!priv->hw_q[queue].job_idx)) {\n+\tjob = flow_hw_job_get(priv, queue);\n+\tif (!job) {\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,\n \t\t\t\t   \"Action destroy failed due to queue full.\");\n \t\treturn NULL;\n \t}\n-\tjob = flow_hw_job_get(priv, queue);\n \tjob->type = type;\n \tjob->action = handle;\n \tjob->user_data = user_data;\n@@ -9953,16 +9977,21 @@ flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\tbool push, bool aso, bool status)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (queue == MLX5_HW_INV_QUEUE)\n+\t\tqueue = CTRL_QUEUE_ID(priv);\n \tif (likely(status)) {\n-\t\tif (push)\n-\t\t\t__flow_hw_push_action(dev, queue);\n+\t\t/* 1. add new job to a queue */\n \t\tif (!aso)\n \t\t\trte_ring_enqueue(push ?\n \t\t\t\t\t priv->hw_q[queue].indir_cq :\n \t\t\t\t\t priv->hw_q[queue].indir_iq,\n \t\t\t\t\t job);\n+\t\t/* 2. send pending jobs */\n+\t\tif (push)\n+\t\t\t__flow_hw_push_action(dev, queue);\n \t} else {\n-\t\tflow_hw_job_put(priv, queue);\n+\t\tflow_hw_job_put(priv, job, queue);\n \t}\n }\n \n@@ -11584,13 +11613,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,\n \t\tret = -rte_errno;\n \t\tgoto error;\n \t}\n-\tret = flow_hw_push(proxy_dev, queue, NULL);\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"port %u failed to drain control flow queue\",\n-\t\t\tproxy_dev->data->port_id);\n-\t\tgoto error;\n-\t}\n-\tret = __flow_hw_pull_comp(proxy_dev, queue, 1, NULL);\n+\tret = __flow_hw_pull_comp(proxy_dev, queue, NULL);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"port %u failed to insert control flow\",\n \t\t\tproxy_dev->data->port_id);\n@@ -11651,13 +11674,7 @@ flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t\t\" flow operation\", dev->data->port_id);\n \t\tgoto exit;\n \t}\n-\tret = flow_hw_push(dev, queue, NULL);\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"port %u failed to drain control flow queue\",\n-\t\t\tdev->data->port_id);\n-\t\tgoto exit;\n-\t}\n-\tret = __flow_hw_pull_comp(dev, queue, 1, NULL);\n+\tret = __flow_hw_pull_comp(dev, queue, NULL);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"port %u failed to destroy control flow\",\n \t\t\tdev->data->port_id);\n",
    "prefixes": [
        "1/2"
    ]
}