get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/124354/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 124354,
    "url": "http://patches.dpdk.org/api/patches/124354/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230222122628.29627-4-jiaweiw@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230222122628.29627-4-jiaweiw@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230222122628.29627-4-jiaweiw@nvidia.com",
    "date": "2023-02-22T12:26:28",
    "name": "[v2,3/3] net/mlx5: enhance the Tx queue affinity",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1c57e3ae44b523498fc5a5540677c4ec83836499",
    "submitter": {
        "id": 1939,
        "url": "http://patches.dpdk.org/api/people/1939/?format=api",
        "name": "Jiawei Wang",
        "email": "jiaweiw@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230222122628.29627-4-jiaweiw@nvidia.com/mbox/",
    "series": [
        {
            "id": 27138,
            "url": "http://patches.dpdk.org/api/series/27138/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=27138",
            "date": "2023-02-22T12:26:25",
            "name": "Add Tx queue mapping of aggregated ports in MLX5 PMD",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/27138/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/124354/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/124354/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C790B41D3C;\n\tWed, 22 Feb 2023 13:27:19 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 86D5A42FF4;\n\tWed, 22 Feb 2023 13:27:12 +0100 (CET)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2060.outbound.protection.outlook.com [40.107.223.60])\n by mails.dpdk.org (Postfix) with ESMTP id 13B5542FA2\n for <dev@dpdk.org>; Wed, 22 Feb 2023 13:27:08 +0100 (CET)",
            "from BN1PR10CA0010.namprd10.prod.outlook.com (2603:10b6:408:e0::15)\n by SA1PR12MB5669.namprd12.prod.outlook.com (2603:10b6:806:237::6)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6134.19; Wed, 22 Feb\n 2023 12:27:06 +0000",
            "from BN8NAM11FT007.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:e0:cafe::49) by BN1PR10CA0010.outlook.office365.com\n (2603:10b6:408:e0::15) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6134.19 via Frontend\n Transport; Wed, 22 Feb 2023 12:27:06 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BN8NAM11FT007.mail.protection.outlook.com (10.13.177.109) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6134.19 via Frontend Transport; Wed, 22 Feb 2023 12:27:05 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.36; Wed, 22 Feb\n 2023 04:27:00 -0800",
            "from nvidia.com (10.126.230.37) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.36; Wed, 22 Feb\n 2023 04:26:58 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=UwfW+1UyVLQJV4E96EXnSHoURywBgFln9O2h2HPCtmPImmZHAGdtd7bNZxt65n15c1MFFWnoYwlC2dNj08duMjAUfHfmfCVDuDKUgFwiidqLR18+TsvxtGTtRTVE9lPoBf8U0x/BwQlXXygH1pun77y/Hw5h4lW5OfpE8poOMOJcL27PhPtmfUJcx9fZoq62tpI0yl4eIkgZwtVEelAn8lYmJ8coAORRrrK8SqubBxjDJ6fJjU8HRk01QsjSkP+f6hAq/sn65H4YIrwwYerxDsdxWmArgUW9iUIjgg/b2oZ8bGOqBC3RqtHlR5AR2L5AmpHT7D040QsI0t76Z0An0w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=RQyVv+FuPBmt/uE6wi418wL+s6vbMPe9feDUKMDiMXY=;\n b=hyAALNiO7knMKfqTM+rEqglSYSqFHSJ1A9u1ydQ/yn1nwXAV2tzEf1mnVs9rxxGG5WqlHPUeQHfKzQnT8T7xQPn0Qu2HDdTXJtdjeGApEi3/RkteARcbZ2hcVnUbLJrMipPz7m4UC01S2iY5K3s6wtfI2kbpgnalz1rVhZ3mtByEWUCLHSdj/9B6mNfrXL0ycQzZG2CxlwvfWL0y3LCcvl6KRKT8r1oXTy0wSwL8Pwzw3IyzqiNh7IfGM3BRsrOrVG0cPKFwNtH6i50lmZXSAD0z4jRWW22Ybtp3PE3zdZeW8HaS9JoX4ZT0h8F/aec0b8ZZR+lO3rokfvS29VA8cw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=RQyVv+FuPBmt/uE6wi418wL+s6vbMPe9feDUKMDiMXY=;\n b=Pl63BU64X9w8+wXu6k4m36XYRWK8DQeHnYk5FyFHTYg63ibOCWmzEXawxHTTZJhnhPvc4ODysfA1WB3zn5rukJUQM7C4lzgqGkTiZGbhKtgk1zd3Mnba2ZacKNvJxKgJcBjIfK0P1ihmk5JDkpLSSPP+MTkgA47pKy7f2qfogfU70RvLJuOkEqRB/VKWQvt9o9FnJjjaPc4Fc9J5UCty2qCtvvwrWXuVwPMax/v+eTNnnQfPqgmue5+oOpxAKJffuGQnJKTieMxZ/YDYx2S21rrqBCA9OcNTCoHHIXkNvbGd4tdvPbtWEe7fNTO8zONwUqoE0fc0NZTQM/TG+ISUPA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Jiawei Wang <jiaweiw@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <orika@nvidia.com>, Matan Azrad\n <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>",
        "Subject": "[PATCH v2 3/3] net/mlx5: enhance the Tx queue affinity",
        "Date": "Wed, 22 Feb 2023 14:26:28 +0200",
        "Message-ID": "<20230222122628.29627-4-jiaweiw@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20230222122628.29627-1-jiaweiw@nvidia.com>",
        "References": "<20230203052107.48293-1-jiaweiw@nvidia.com>\n <20230222122628.29627-1-jiaweiw@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.37]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BN8NAM11FT007:EE_|SA1PR12MB5669:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "3c009e18-16f3-407f-1d64-08db14d01c17",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n lpyouFs73andNy7HjwVOANavLBqdHX4VN61yRR4i5w+CyMxSK56KcoNdpOuo9h22dPcFO8PHl6WHJw7Ku1JKSdJgzUzr0DLLn5tClZuYUGBxum/lO1vHQIwAFHTtZsOjta9ARqwNZS5bVwzIys1JaCFWXlADpzYWf6/FXBkJPzCftxiOlyi6nOj9g5MPBvjJY31rCFXGUpmUpt6CGwuOt2vKwu4Ult+dLbgaybk9KBL+jnK1Py/VJjjdg/+hM2fMWxLGYpifzXDnz2bqI6YNdoYxptzrpjJzIoD5T4VScld1INfpW0P+ZuUa8jtsQtgaw4jYq4S8vOIdxwlY55e9elo73kspjQynK/EI6utL/6FFOzYyt7p+4ebKTqRlnI1gojPlLAsEKqPomrz7PES4OhhgdtLmRprBWJdsKSO5RjskXJ/ynPcy5dmzl5dZA/927f3QCBPfvnKe23qPyCzc6tW7WatRgyHrKEyvzQbQ+4M3S1uj9so1fmQbpk4ALCmzq6vT5dW1YHYZ2X9GpkFCMha8bl/YuYZKkC0bs5qhwHkoBy13xoq1pnsS61fR91r8/CzPjrkQG3jA7W5Dfz41nJ7qAKCcgja53B/8keGNJFPV9KcY9L0+8fJoE5M6Wa/EaH+Oe4N82GyMBf+X90zHEs4sJOJtr1fNoZluWn1mlzVQyqemreTouGRn/OBHrm9SQ8N4yLeit0LZNegnKBsfL1Wo+Yp6FwjaewZH/i+7NUk=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230025)(4636009)(346002)(396003)(39860400002)(376002)(136003)(451199018)(36840700001)(46966006)(40470700004)(7636003)(40480700001)(82740400003)(2906002)(336012)(34020700004)(36860700001)(40460700003)(426003)(83380400001)(2616005)(47076005)(55016003)(36756003)(1076003)(186003)(6286002)(107886003)(26005)(6666004)(16526019)(110136005)(316002)(6636002)(54906003)(5660300002)(41300700001)(86362001)(4326008)(70206006)(70586007)(8676002)(7696005)(356005)(82310400005)(478600001)(8936002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "22 Feb 2023 12:27:05.7899 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 3c009e18-16f3-407f-1d64-08db14d01c17",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT007.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SA1PR12MB5669",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The rte_eth_dev_map_aggr_tx_affinity() was introduced in\nethdev lib, it was used to set the affinity value per Tx queue.\n\nThis patch adds the MLX5 PMD support for two device ops:\n - map_aggr_tx_affinity\n - count_aggr_ports\n\nAfter maps a Tx queue with an aggregated port by call\nmap_aggr_tx_affinity() and starts sending traffic, the MLX5 PMD\nupdates TIS creation with tx_aggr_affinity value of Tx queue.\nTIS index 1 goes to first physical port, TIS index 2 goes to second\nphysical port, and so on, TIS index 0 is reserved for default\nHW hash mode.\n\nSigned-off-by: Jiawei Wang <jiaweiw@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/common/mlx5/mlx5_prm.h |  8 ------\n drivers/net/mlx5/mlx5.c        | 49 +++++++++++++++++-----------------\n drivers/net/mlx5/mlx5_devx.c   | 24 +++++++++--------\n drivers/net/mlx5/mlx5_tx.h     |  4 +++\n drivers/net/mlx5/mlx5_txq.c    | 38 ++++++++++++++++++++++++++\n 5 files changed, 80 insertions(+), 43 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h\nindex 26a1f0717d..2f5aeecaa9 100644\n--- a/drivers/common/mlx5/mlx5_prm.h\n+++ b/drivers/common/mlx5/mlx5_prm.h\n@@ -2363,14 +2363,6 @@ struct mlx5_ifc_query_nic_vport_context_in_bits {\n \tu8 reserved_at_68[0x18];\n };\n \n-/*\n- * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.\n- * Each TIS binds to one PF by setting lag_tx_port_affinity (>0).\n- * Once LAG enabled, we create multiple TISs and bind each one to\n- * different PFs, then TIS[i] gets affinity i+1 and goes to PF i+1.\n- */\n-#define MLX5_IFC_LAG_MAP_TIS_AFFINITY(index, num) ((num) ? \\\n-\t\t\t\t\t\t    (index) % (num) + 1 : 0)\n struct mlx5_ifc_tisc_bits {\n \tu8 strict_lag_tx_port_affinity[0x1];\n \tu8 reserved_at_1[0x3];\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex f55c1caca0..8c8f71d508 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1257,9 +1257,9 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)\n static int\n mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)\n {\n-\tint i;\n \tstruct mlx5_devx_lag_context lag_ctx = { 0 };\n \tstruct mlx5_devx_tis_attr tis_attr = { 0 };\n+\tint i;\n \n \ttis_attr.transport_domain = sh->td->id;\n \tif (sh->bond.n_port) {\n@@ -1273,35 +1273,30 @@ mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)\n \t\t\tDRV_LOG(ERR, \"Failed to query lag affinity.\");\n \t\t\treturn -1;\n \t\t}\n-\t\tif (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {\n-\t\t\tfor (i = 0; i < sh->bond.n_port; i++) {\n-\t\t\t\ttis_attr.lag_tx_port_affinity =\n-\t\t\t\t\tMLX5_IFC_LAG_MAP_TIS_AFFINITY(i,\n-\t\t\t\t\t\t\tsh->bond.n_port);\n-\t\t\t\tsh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,\n-\t\t\t\t\t\t&tis_attr);\n-\t\t\t\tif (!sh->tis[i]) {\n-\t\t\t\t\tDRV_LOG(ERR, \"Failed to TIS %d/%d for bonding device\"\n-\t\t\t\t\t\t\" %s.\", i, sh->bond.n_port,\n-\t\t\t\t\t\tsh->ibdev_name);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\t\t\t}\n+\t\tif (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS)\n \t\t\tDRV_LOG(DEBUG, \"LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\\n\",\n \t\t\t\tsh->bond.n_port, lag_ctx.tx_remap_affinity_1,\n \t\t\t\tlag_ctx.tx_remap_affinity_2);\n-\t\t\treturn 0;\n-\t\t}\n-\t\tif (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)\n+\t\telse if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)\n \t\t\tDRV_LOG(INFO, \"Device %s enabled HW hash based LAG.\",\n \t\t\t\t\tsh->ibdev_name);\n \t}\n-\ttis_attr.lag_tx_port_affinity = 0;\n-\tsh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);\n-\tif (!sh->tis[0]) {\n-\t\tDRV_LOG(ERR, \"Failed to TIS 0 for bonding device\"\n-\t\t\t\" %s.\", sh->ibdev_name);\n-\t\treturn -1;\n+\tfor (i = 0; i <= sh->bond.n_port; i++) {\n+\t\t/*\n+\t\t * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.\n+\t\t * Each TIS binds to one PF by setting lag_tx_port_affinity (> 0).\n+\t\t * Once LAG enabled, we create multiple TISs and bind each one to\n+\t\t * different PFs, then TIS[i+1] gets affinity i+1 and goes to PF i+1.\n+\t\t * TIS[0] is reserved for HW Hash mode.\n+\t\t */\n+\t\ttis_attr.lag_tx_port_affinity = i;\n+\t\tsh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);\n+\t\tif (!sh->tis[i]) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create TIS %d/%d for [bonding] device\"\n+\t\t\t\t\" %s.\", i, sh->bond.n_port,\n+\t\t\t\tsh->ibdev_name);\n+\t\t\treturn -1;\n+\t\t}\n \t}\n \treturn 0;\n }\n@@ -2335,6 +2330,8 @@ const struct eth_dev_ops mlx5_dev_ops = {\n \t.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,\n \t.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,\n \t.get_monitor_addr = mlx5_get_monitor_addr,\n+\t.count_aggr_ports = mlx5_count_aggr_ports,\n+\t.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,\n };\n \n /* Available operations from secondary process. */\n@@ -2358,6 +2355,8 @@ const struct eth_dev_ops mlx5_dev_sec_ops = {\n \t.tx_burst_mode_get = mlx5_tx_burst_mode_get,\n \t.get_module_info = mlx5_get_module_info,\n \t.get_module_eeprom = mlx5_get_module_eeprom,\n+\t.count_aggr_ports = mlx5_count_aggr_ports,\n+\t.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,\n };\n \n /* Available operations in flow isolated mode. */\n@@ -2422,6 +2421,8 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {\n \t.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,\n \t.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,\n \t.get_monitor_addr = mlx5_get_monitor_addr,\n+\t.count_aggr_ports = mlx5_count_aggr_ports,\n+\t.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,\n };\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex f6e1943fd7..d02cedb202 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -1190,17 +1190,19 @@ static uint32_t\n mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tint tis_idx;\n-\n-\tif (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==\n-\t\t\tMLX5_LAG_MODE_TIS) {\n-\t\ttis_idx = (priv->lag_affinity_idx + queue_idx) %\n-\t\t\tpriv->sh->bond.n_port;\n-\t\tDRV_LOG(INFO, \"port %d txq %d gets affinity %d and maps to PF %d.\",\n-\t\t\tdev->data->port_id, queue_idx, tis_idx + 1,\n-\t\t\tpriv->sh->lag.tx_remap_affinity[tis_idx]);\n-\t} else {\n-\t\ttis_idx = 0;\n+\tstruct mlx5_txq_data *txq_data = (*priv->txqs)[queue_idx];\n+\tint tis_idx = 0;\n+\n+\tif (priv->sh->bond.n_port) {\n+\t\tif (txq_data->tx_aggr_affinity) {\n+\t\t\ttis_idx = txq_data->tx_aggr_affinity;\n+\t\t} else if (priv->sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {\n+\t\t\ttis_idx = (priv->lag_affinity_idx + queue_idx) %\n+\t\t\t\tpriv->sh->bond.n_port + 1;\n+\t\t\tDRV_LOG(INFO, \"port %d txq %d gets affinity %d and maps to PF %d.\",\n+\t\t\t\tdev->data->port_id, queue_idx, tis_idx,\n+\t\t\t\tpriv->sh->lag.tx_remap_affinity[tis_idx - 1]);\n+\t\t}\n \t}\n \tMLX5_ASSERT(priv->sh->tis[tis_idx]);\n \treturn priv->sh->tis[tis_idx]->id;\ndiff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h\nindex a056be7ca8..d0c6303a2d 100644\n--- a/drivers/net/mlx5/mlx5_tx.h\n+++ b/drivers/net/mlx5/mlx5_tx.h\n@@ -144,6 +144,7 @@ struct mlx5_txq_data {\n \tuint16_t inlen_send; /* Ordinary send data inline size. */\n \tuint16_t inlen_empw; /* eMPW max packet size to inline. */\n \tuint16_t inlen_mode; /* Minimal data length to inline. */\n+\tuint8_t tx_aggr_affinity; /* TxQ affinity configuration. */\n \tuint32_t qp_num_8s; /* QP number shifted by 8. */\n \tuint64_t offloads; /* Offloads for Tx Queue. */\n \tstruct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */\n@@ -218,6 +219,9 @@ void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);\n void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl);\n uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);\n void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev);\n+int mlx5_count_aggr_ports(struct rte_eth_dev *dev);\n+int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\t\t      uint8_t affinity);\n \n /* mlx5_tx.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 419e913559..1e0e61a620 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -1365,3 +1365,41 @@ mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)\n \t\t\t\t     ts_mask : 0;\n \t}\n }\n+\n+int mlx5_count_aggr_ports(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\treturn priv->sh->bond.n_port;\n+}\n+\n+int mlx5_map_aggr_tx_affinity(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\t\t      uint8_t affinity)\n+{\n+\tstruct mlx5_txq_ctrl *txq_ctrl;\n+\tstruct mlx5_txq_data *txq;\n+\tstruct mlx5_priv *priv;\n+\n+\tpriv = dev->data->dev_private;\n+\ttxq = (*priv->txqs)[tx_queue_id];\n+\tif (!txq)\n+\t\treturn -1;\n+\ttxq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);\n+\tif (tx_queue_id >= priv->txqs_n) {\n+\t\tDRV_LOG(ERR, \"port %u Tx queue index out of range (%u >= %u)\",\n+\t\t\tdev->data->port_id, tx_queue_id, priv->txqs_n);\n+\t\trte_errno = EOVERFLOW;\n+\t\treturn -rte_errno;\n+\t}\n+\tif (affinity > priv->num_lag_ports) {\n+\t\tDRV_LOG(ERR, \"port %u unable to setup Tx queue index %u\"\n+\t\t\t\" affinity is %u exceeds the maximum %u\", dev->data->port_id,\n+\t\t\ttx_queue_id, affinity, priv->num_lag_ports);\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\tDRV_LOG(DEBUG, \"port %u configuring queue %u for aggregated affinity %u\",\n+\t\tdev->data->port_id, tx_queue_id, affinity);\n+\ttxq_ctrl->txq.tx_aggr_affinity = affinity;\n+\treturn 0;\n+}\n",
    "prefixes": [
        "v2",
        "3/3"
    ]
}