get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/82184/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 82184,
    "url": "https://patches.dpdk.org/api/patches/82184/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1603710656-32187-2-git-send-email-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1603710656-32187-2-git-send-email-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1603710656-32187-2-git-send-email-xuemingl@nvidia.com",
    "date": "2020-10-26T11:10:56",
    "name": "[2/2] vdpa/mlx5: specify lag port affinity",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": true,
    "hash": "ce859c351433b0588700c2e55b84afdcb0b1fc09",
    "submitter": {
        "id": 1904,
        "url": "https://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1603710656-32187-2-git-send-email-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 13323,
            "url": "https://patches.dpdk.org/api/series/13323/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13323",
            "date": "2020-10-26T11:10:55",
            "name": "[1/2] common/mlx5: get number of ports that can be bonded",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/13323/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/82184/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/82184/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1B88DA04B5;\n\tMon, 26 Oct 2020 12:11:40 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 86F1E2BFE;\n\tMon, 26 Oct 2020 12:11:25 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id EDAD82BFE\n for <dev@dpdk.org>; Mon, 26 Oct 2020 12:11:23 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n xuemingl@nvidia.com) with SMTP; 26 Oct 2020 13:11:20 +0200",
            "from nvidia.com (pegasus05.mtr.labs.mlnx [10.210.16.100])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBBC6U027455;\n Mon, 26 Oct 2020 13:11:20 +0200"
        ],
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Cc": "dev@dpdk.org, xuemingl@nvidia.com, Asaf Penso <asafp@nvidia.com>,\n stable@dpdk.org",
        "Date": "Mon, 26 Oct 2020 11:10:56 +0000",
        "Message-Id": "<1603710656-32187-2-git-send-email-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1603710656-32187-1-git-send-email-xuemingl@nvidia.com>",
        "References": "<1603710656-32187-1-git-send-email-xuemingl@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH 2/2] vdpa/mlx5: specify lag port affinity",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "If set TIS lag port affinity to auto, firmware assign port affinity on\neach creation with Round Robin. In case of 2 PFs, if create virtq,\ndestroy and create again, then each virtq will get same port affinity.\n\nTo resolve this fw limitation, this patch sets create TIS with specified\naffinity for each PF.\n\nCc: stable@dpdk.org\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++\n drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------\n 3 files changed, 19 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex a8f3e4b1de..2e17ed4fca 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -730,6 +730,9 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t}\n \tpriv->caps = attr.vdpa;\n \tpriv->log_max_rqt_size = attr.log_max_rqt_size;\n+\tpriv->num_lag_ports = attr.num_lag_ports;\n+\tif (attr.num_lag_ports == 0)\n+\t\tpriv->num_lag_ports = 1;\n \tpriv->ctx = ctx;\n \tpriv->pci_dev = pci_dev;\n \tpriv->var = mlx5_glue->dv_alloc_var(ctx, 0);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex fcbc12ab0c..c8c1adfde4 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -146,8 +146,9 @@ struct mlx5_vdpa_priv {\n \tstruct mlx5dv_devx_uar *uar;\n \tstruct rte_intr_handle intr_handle;\n \tstruct mlx5_devx_obj *td;\n-\tstruct mlx5_devx_obj *tis;\n+\tstruct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */\n \tuint16_t nr_virtqs;\n+\tuint8_t num_lag_ports;\n \tuint64_t features; /* Negotiated features. */\n \tuint16_t log_max_rqt_size;\n \tstruct mlx5_vdpa_steer steer;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 17e71cf4f4..4724baca4e 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -103,12 +103,13 @@ void\n mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n {\n \tint i;\n-\n \tfor (i = 0; i < priv->nr_virtqs; i++)\n \t\tmlx5_vdpa_virtq_unset(&priv->virtqs[i]);\n-\tif (priv->tis) {\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->tis));\n-\t\tpriv->tis = NULL;\n+\tfor (i = 0; i < priv->num_lag_ports; i++) {\n+\t\tif (priv->tiss[i]) {\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));\n+\t\t\tpriv->tiss[i] = NULL;\n+\t\t}\n \t}\n \tif (priv->td) {\n \t\tclaim_zero(mlx5_devx_cmd_destroy(priv->td));\n@@ -302,7 +303,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \tattr.hw_used_index = last_used_idx;\n \tattr.q_size = vq.size;\n \tattr.mkey = priv->gpa_mkey_index;\n-\tattr.tis_id = priv->tis->id;\n+\tattr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;\n \tattr.queue_index = index;\n \tattr.pd = priv->pdn;\n \tvirtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);\n@@ -432,10 +433,14 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn -rte_errno;\n \t}\n \ttis_attr.transport_domain = priv->td->id;\n-\tpriv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);\n-\tif (!priv->tis) {\n-\t\tDRV_LOG(ERR, \"Failed to create TIS.\");\n-\t\tgoto error;\n+\tfor (i = 0; i < priv->num_lag_ports; i++) {\n+\t\t/* 0 is auto affinity, non-zero value to propose port. */\n+\t\ttis_attr.lag_tx_port_affinity = i + 1;\n+\t\tpriv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);\n+\t\tif (!priv->tiss[i]) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create TIS %u.\", i);\n+\t\t\tgoto error;\n+\t\t}\n \t}\n \tpriv->nr_virtqs = nr_vring;\n \tfor (i = 0; i < nr_vring; i++)\n",
    "prefixes": [
        "2/2"
    ]
}