get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/114817/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 114817,
    "url": "https://patches.dpdk.org/api/patches/114817/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220811055058.29957-1-viacheslavo@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220811055058.29957-1-viacheslavo@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220811055058.29957-1-viacheslavo@nvidia.com",
    "date": "2022-08-11T05:50:58",
    "name": "net/mlx5: fix check for orphan wait descriptor",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "56ea4f1cc758962ea807bf350389a5e16c4823de",
    "submitter": {
        "id": 1926,
        "url": "https://patches.dpdk.org/api/people/1926/?format=api",
        "name": "Slava Ovsiienko",
        "email": "viacheslavo@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220811055058.29957-1-viacheslavo@nvidia.com/mbox/",
    "series": [
        {
            "id": 24254,
            "url": "https://patches.dpdk.org/api/series/24254/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=24254",
            "date": "2022-08-11T05:50:58",
            "name": "net/mlx5: fix check for orphan wait descriptor",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/24254/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/114817/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/114817/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9FC1CA0548;\n\tThu, 11 Aug 2022 07:51:22 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 59F4340DDA;\n\tThu, 11 Aug 2022 07:51:22 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2068.outbound.protection.outlook.com [40.107.243.68])\n by mails.dpdk.org (Postfix) with ESMTP id 0ED9240A87;\n Thu, 11 Aug 2022 07:51:20 +0200 (CEST)",
            "from MW4PR03CA0277.namprd03.prod.outlook.com (2603:10b6:303:b5::12)\n by PH7PR12MB5903.namprd12.prod.outlook.com (2603:10b6:510:1d7::14)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5504.19; Thu, 11 Aug\n 2022 05:51:18 +0000",
            "from CO1NAM11FT061.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:b5:cafe::ef) by MW4PR03CA0277.outlook.office365.com\n (2603:10b6:303:b5::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5525.11 via Frontend\n Transport; Thu, 11 Aug 2022 05:51:17 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n CO1NAM11FT061.mail.protection.outlook.com (10.13.175.200) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5525.11 via Frontend Transport; Thu, 11 Aug 2022 05:51:17 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Thu, 11 Aug 2022 05:51:14 +0000",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Wed, 10 Aug\n 2022 22:51:12 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=TGYW5/HQS79p+lcnj7bvLXoMrB2fvGxPjSm38AnkvrJwnR+4Iix4fBu7abxakDPKGC4kLZm4n5LOd3XaiGxew8xxzTMRweRuMFP0YAa/mEl0PKOKc2OLKNspbfPt/peRYmhSYj0B8YW4Vz6KJIwPh4DtaD4s1ZHnEKDc2CF+GOR07oQPFL+tcCn58jsm2ixhxUGP0y7kFU9Qoez7v3xI5IM8rT4JB2NoSN8+dTUDoMsxr/B4VtGrL0IwFH9sN/dGunGrEtXeBjz+bYIthiw5seAyKUkpssiyfzD3VRoiHeBcgkF3PQ1/cw59HHK83dPiwY3fYILIQm372WMNWA37pw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=EIhSPyIIOf0BshqaQ2CMWvm6LANQIagB+M0XU+ALWS8=;\n b=givTtCEKIbqa+bbAVb9siB7nXs6s78J/lriIoUL21t+C9GYORYwnMzL77v2JlgS24r7dEQN/o6AB2vOfv2aq/PR0tTG1pFSt9Sr1tG24sXrrBNi5e+0KdVD3MgPp0NIs7savdGSnTy75Yh3vHgO+hC/g8DVI4uA3z0+OhUQNmnXeWlRzCgwrxiKzLKE5tiMFLcdABj3yHpgZuZiZ9tbh1NTDlHaNJi2Th0n6MP5F+a7k5ed/f3nV4wvEmgxKgoWnZcATOeK7pzhd30X1kL0JtBEtNW2b5My/cR433KgQ3Glqc3PW508ZAMoiOs4hc/As+wuzl06W1/Cg7jVejkv9Vw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=EIhSPyIIOf0BshqaQ2CMWvm6LANQIagB+M0XU+ALWS8=;\n b=A5vxuYpyGWGXpeg5LViNBJWiIfKw4bHPqx7WyTUP0b3WnphZVt0eaoObhb/+t/EirDxckxfi+mqHl++Pq8tJ3k7X3R07eVn75ak6HhTXIZU24alCD+cyqBMf3PsQuPTrRo8INo3Wkx74me8YT1D6bmF34MfEyobmncNS9RKPjTKvFdLd43VZGNtrWdrTMLVEU1DOoAWyitP9VLdCGTnbmt/tE2qk53mk6Ug5lYGnrmFK6IvRNGdEVGl4KvIbRTeSaoMeWOcwAKvhe2fQv/NcXbjRK8cxRQhpueP844z2+jw3CCAuVg6Fx72DtSOTtQPa/fOcemPo0w2k+Rr3d98ukg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com; pr=C",
        "From": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n <stable@dpdk.org>",
        "Subject": "[PATCH] net/mlx5: fix check for orphan wait descriptor",
        "Date": "Thu, 11 Aug 2022 08:50:58 +0300",
        "Message-ID": "<20220811055058.29957-1-viacheslavo@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "aec8dfa5-11db-463d-ec4b-08da7b5d8269",
        "X-MS-TrafficTypeDiagnostic": "PH7PR12MB5903:EE_",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n DJv8pturmZ9QJmk6UXQpJ7k8TGtAj4nXMP4/B/EYi4fdgOMJ8zWfsTf/lkC4LUuDxfcyZE/15Nb9+vrFGIJCcR2e2cuc3+2FuMGxuWfNerSnvE744gWy91JWzaucqPvXHFkEHb8SgawlgqPrz/UZj+3NQTaYs4Pui/mQX7iE3LDMcLPNPTtDqcrj06+rCLuqDjE60Z7P/Z7agWDrWbm4XTzcbk1soJ0Hc6wU4/BJI0bSjveBHhe6ftuZ4GGjAuo4eSF09Vp7xsZEhNt14Hv/PjtRwCtu6s39wC9vCMZNdt9IMM/GL8vX3uRU4XTcRIewBbe0beJhDqUpTqONmOtbrdY6YwZSFdpQ1u3ZF18OTY1H0CzvnpxJPZlMarTYYkFYyjwt2zsNp8jsqzypCcXD1OKM2R1ovWzf0uM9qEDzlKEoMXjCgiP8o4KkRwlnWYGVEHUFQurTYi8/ET/2dGlvGca0FC6JuojEVdwA4C86W4T83lLyEQXcs2jJjNHkDxWB7UI3EwxivbQs8hgsOStA29RvQJaz8WHL8AvsW0rdpIAnq6QNfBclskoJ1So69i04/UTVfeRPuM+FQf5X8qsO6tatPQ8NiefLoPsv/kIzE0ZThdwnT/ts/me+noa8h3HpwOtDsui0F6mE6T56zOxGmvqKkMjRrb9xbfcuFe7UiYoN/S5Z1U2cWlCbnA+EyC9jG2fLVjembRWQWmFoo8F2+5s1plX5naYZ/2AsxVdh4EpLUKyNJW864c89B/NetIsxBsTqJyXPdKzxehiWarPGsbtG9d+Hy81XMD1Pj1Eyv8lnmXowtiCMlNQD7ozysZdmJ5gx6MclSLOluuAlb88SxA==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(39860400002)(136003)(396003)(346002)(376002)(36840700001)(40470700004)(46966006)(6666004)(7696005)(26005)(6286002)(41300700001)(82740400003)(1076003)(186003)(83380400001)(40460700003)(336012)(356005)(16526019)(47076005)(426003)(36860700001)(5660300002)(8936002)(81166007)(2616005)(86362001)(70586007)(70206006)(55016003)(4326008)(40480700001)(8676002)(2906002)(36756003)(82310400005)(54906003)(316002)(6916009)(478600001)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "11 Aug 2022 05:51:17.5407 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n aec8dfa5-11db-463d-ec4b-08da7b5d8269",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT061.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH7PR12MB5903",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The mlx5 PMD supports send scheduling feature, it allows\nto send packets at specified moment of time, to do that\nPMD pushes special wait descriptor (WQE) to the hardware\nqueue and then pushes descriptor for packet data as usual.\nIf queue is close to be full or there is no enough elts\nbuffers to store mbufs being sent the data descriptors might\nbe not pushed and the orphan wait WQE (not followed by the\ndata) might reside in queue on tx_burst routine exit.\n\nTo avoid orphan wait WQEs there was the check for enough\nfree space in the queue WQE buffer and enough amount of the\nfree elts in queue mbuf storage. This check was incomplete\nand did not cover all the cases for Enhanced Multi-Packet\nWrite descriptors.\n\nFixes: 2f827f5ea6e1 (\"net/mlx5: support scheduling on send routine template\")\nCc: stable@dpdk.org\n\nSigned-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5_tx.h | 74 +++++++++++++++++++++-----------------\n 1 file changed, 41 insertions(+), 33 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h\nindex 20776919c2..f081921ffc 100644\n--- a/drivers/net/mlx5/mlx5_tx.h\n+++ b/drivers/net/mlx5/mlx5_tx.h\n@@ -1642,6 +1642,9 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,\n  *   Pointer to TX queue structure.\n  * @param loc\n  *   Pointer to burst routine local context.\n+ * @param elts\n+ *   Number of free elements in elts buffer to be checked, for zero\n+ *   value the check is optimized out by compiler.\n  * @param olx\n  *   Configured Tx offloads mask. It is fully defined at\n  *   compile time and may be used for optimization.\n@@ -1655,6 +1658,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,\n static __rte_always_inline enum mlx5_txcmp_code\n mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,\n \t\t      struct mlx5_txq_local *restrict loc,\n+\t\t      uint16_t elts,\n \t\t      unsigned int olx)\n {\n \tif (MLX5_TXOFF_CONFIG(TXPP) &&\n@@ -1669,7 +1673,7 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,\n \t\t * to the queue and we won't get the orphan WAIT WQE.\n \t\t */\n \t\tif (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||\n-\t\t    loc->elts_free < NB_SEGS(loc->mbuf))\n+\t\t    loc->elts_free < elts)\n \t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\t/* Convert the timestamp into completion to wait. */\n \t\tts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);\n@@ -1735,11 +1739,12 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,\n \tstruct mlx5_wqe *__rte_restrict wqe;\n \tunsigned int ds, dlen, inlen, ntcp, vlan = 0;\n \n+\tMLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));\n \tif (MLX5_TXOFF_CONFIG(TXPP)) {\n \t\tenum mlx5_txcmp_code wret;\n \n \t\t/* Generate WAIT for scheduling if requested. */\n-\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n+\t\twret = mlx5_tx_schedule_send(txq, loc, 0, olx);\n \t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n \t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n@@ -1833,11 +1838,12 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,\n \tunsigned int ds, nseg;\n \n \tMLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);\n+\tMLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));\n \tif (MLX5_TXOFF_CONFIG(TXPP)) {\n \t\tenum mlx5_txcmp_code wret;\n \n \t\t/* Generate WAIT for scheduling if requested. */\n-\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n+\t\twret = mlx5_tx_schedule_send(txq, loc, 0, olx);\n \t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n \t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n@@ -1948,16 +1954,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,\n \n \tMLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));\n \tMLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);\n-\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n-\t\tenum mlx5_txcmp_code wret;\n-\n-\t\t/* Generate WAIT for scheduling if requested. */\n-\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n-\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n-\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n-\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n-\t\t\treturn MLX5_TXCMP_CODE_ERROR;\n-\t}\n+\tMLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));\n \t/*\n \t * First calculate data length to be inlined\n \t * to estimate the required space for WQE.\n@@ -2063,6 +2060,16 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,\n \t * supposing no any mbufs is being freed during inlining.\n \t */\n do_build:\n+\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n+\t\tenum mlx5_txcmp_code wret;\n+\n+\t\t/* Generate WAIT for scheduling if requested. */\n+\t\twret = mlx5_tx_schedule_send(txq, loc, 0, olx);\n+\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n+\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n+\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n+\t\t\treturn MLX5_TXCMP_CODE_ERROR;\n+\t}\n \tMLX5_ASSERT(inlen <= txq->inlen_send);\n \tds = NB_SEGS(loc->mbuf) + 2 + (inlen -\n \t\t\t\t       MLX5_ESEG_MIN_INLINE_SIZE +\n@@ -2223,7 +2230,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,\n \t\t\tenum mlx5_txcmp_code wret;\n \n \t\t\t/* Generate WAIT for scheduling if requested. */\n-\t\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n+\t\t\twret = mlx5_tx_schedule_send(txq, loc, 1, olx);\n \t\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n \t\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n@@ -2601,16 +2608,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,\n \n next_empw:\n \t\tMLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);\n-\t\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n-\t\t\tenum mlx5_txcmp_code wret;\n-\n-\t\t\t/* Generate WAIT for scheduling if requested. */\n-\t\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n-\t\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n-\t\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n-\t\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n-\t\t\t\treturn MLX5_TXCMP_CODE_ERROR;\n-\t\t}\n \t\tpart = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?\n \t\t\t\t       MLX5_MPW_MAX_PACKETS :\n \t\t\t\t       MLX5_EMPW_MAX_PACKETS);\n@@ -2621,6 +2618,16 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,\n \t\t\t/* But we still able to send at least minimal eMPW. */\n \t\t\tpart = loc->elts_free;\n \t\t}\n+\t\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n+\t\t\tenum mlx5_txcmp_code wret;\n+\n+\t\t\t/* Generate WAIT for scheduling if requested. */\n+\t\t\twret = mlx5_tx_schedule_send(txq, loc, 0, olx);\n+\t\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n+\t\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n+\t\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n+\t\t\t\treturn MLX5_TXCMP_CODE_ERROR;\n+\t\t}\n \t\t/* Check whether we have enough WQEs */\n \t\tif (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {\n \t\t\tif (unlikely(loc->wqe_free <\n@@ -2775,23 +2782,23 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,\n \t\tunsigned int slen = 0;\n \n \t\tMLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);\n+\t\t/*\n+\t\t * Limits the amount of packets in one WQE\n+\t\t * to improve CQE latency generation.\n+\t\t */\n+\t\tnlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?\n+\t\t\t\t       MLX5_MPW_INLINE_MAX_PACKETS :\n+\t\t\t\t       MLX5_EMPW_MAX_PACKETS);\n \t\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n \t\t\tenum mlx5_txcmp_code wret;\n \n \t\t\t/* Generate WAIT for scheduling if requested. */\n-\t\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n+\t\t\twret = mlx5_tx_schedule_send(txq, loc, nlim, olx);\n \t\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n \t\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n \t\t\t\treturn MLX5_TXCMP_CODE_ERROR;\n \t\t}\n-\t\t/*\n-\t\t * Limits the amount of packets in one WQE\n-\t\t * to improve CQE latency generation.\n-\t\t */\n-\t\tnlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?\n-\t\t\t\t       MLX5_MPW_INLINE_MAX_PACKETS :\n-\t\t\t\t       MLX5_EMPW_MAX_PACKETS);\n \t\t/* Check whether we have minimal amount WQEs */\n \t\tif (unlikely(loc->wqe_free <\n \t\t\t    ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))\n@@ -3074,11 +3081,12 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,\n \t\tenum mlx5_txcmp_code ret;\n \n \t\tMLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);\n+\t\tMLX5_ASSERT(loc->elts_free);\n \t\tif (MLX5_TXOFF_CONFIG(TXPP)) {\n \t\t\tenum mlx5_txcmp_code wret;\n \n \t\t\t/* Generate WAIT for scheduling if requested. */\n-\t\t\twret = mlx5_tx_schedule_send(txq, loc, olx);\n+\t\t\twret = mlx5_tx_schedule_send(txq, loc, 0, olx);\n \t\t\tif (wret == MLX5_TXCMP_CODE_EXIT)\n \t\t\t\treturn MLX5_TXCMP_CODE_EXIT;\n \t\t\tif (wret == MLX5_TXCMP_CODE_ERROR)\n",
    "prefixes": []
}