get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/91524/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 91524,
    "url": "https://patches.dpdk.org/api/patches/91524/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210415050505.2082663-4-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210415050505.2082663-4-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210415050505.2082663-4-lizh@nvidia.com",
    "date": "2021-04-15T05:05:04",
    "name": "[v5,3/4] net/mlx5: prepare sub-policy for a flow with meter",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2758e35b1a11cd90cf85214325622e732b385737",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210415050505.2082663-4-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 16390,
            "url": "https://patches.dpdk.org/api/series/16390/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=16390",
            "date": "2021-04-15T05:05:01",
            "name": "net/mlx5: support meter policy operations",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/16390/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/91524/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/91524/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F0A39A0A0C;\n\tThu, 15 Apr 2021 07:05:18 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CC8FB161F80;\n\tThu, 15 Apr 2021 07:05:15 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by mails.dpdk.org (Postfix) with ESMTP id CF470161F78\n for <dev@dpdk.org>; Thu, 15 Apr 2021 07:05:13 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n lizh@nvidia.com)\n with SMTP; 15 Apr 2021 08:05:10 +0300",
            "from nvidia.com (c-235-17-1-009.mtl.labs.mlnx [10.235.17.9])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 13F559Bf024397;\n Thu, 15 Apr 2021 08:05:09 +0300"
        ],
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "dekelp@nvidia.com, orika@nvidia.com, viacheslavo@nvidia.com,\n matan@nvidia.com, shahafs@nvidia.com",
        "Cc": "dev@dpdk.org, thomas@monjalon.net, rasland@nvidia.com, roniba@nvidia.com",
        "Date": "Thu, 15 Apr 2021 08:05:04 +0300",
        "Message-Id": "<20210415050505.2082663-4-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20210415050505.2082663-1-lizh@nvidia.com>",
        "References": "<20210401081624.1482490-1-lizh@nvidia.com>\n <20210415050505.2082663-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v5 3/4] net/mlx5: prepare sub-policy for a flow\n with meter",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "When a flow has a RSS action, the driver splits\neach sub flow finally is configured with\na different HW TIR action.\n\nAny RSS action configured in meter policy may cause\na split in the flow configuration.\nTo save performance, any TIR action will be configured\nin different flow table, so policy can be split to\nsub-policies per TIR in the flow creation time.\n\nCreate a function to prepare the policy and\nits sub-policies for a configured flow with meter.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.h    |  10 +++\n drivers/net/mlx5/mlx5_flow_dv.c | 144 ++++++++++++++++++++++++++++++++\n 2 files changed, 154 insertions(+)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 89e43f2de6..cc9b37b9eb 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1095,6 +1095,11 @@ typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,\n typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,\n \t\t\t\tstruct mlx5_flow_meter_info *fm);\n typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);\n+typedef struct mlx5_flow_meter_sub_policy *\n+\t(*mlx5_flow_meter_sub_policy_rss_prepare_t)\n+\t\t(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);\n typedef uint32_t (*mlx5_flow_mtr_alloc_t)\n \t\t\t\t\t    (struct rte_eth_dev *dev);\n typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,\n@@ -1187,6 +1192,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_destroy_policy_rules_t destroy_policy_rules;\n \tmlx5_flow_create_def_policy_t create_def_policy;\n \tmlx5_flow_destroy_def_policy_t destroy_def_policy;\n+\tmlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;\n \tmlx5_flow_counter_alloc_t counter_alloc;\n \tmlx5_flow_counter_free_t counter_free;\n \tmlx5_flow_counter_query_t counter_query;\n@@ -1418,6 +1424,10 @@ int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,\n void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_flow_meter_info *fm);\n void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);\n+struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare\n+\t\t(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);\n int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);\n int mlx5_shared_action_flush(struct rte_eth_dev *dev);\n void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex f789f2454e..ed17bd903f 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -14857,6 +14857,149 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \treturn -1;\n }\n \n+/**\n+ * Find the policy table for prefix table with RSS.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] policy_id\n+ *   Policy index.\n+ * @param[in] rss_desc\n+ *   Pointer to rss_desc\n+ * @return\n+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_flow_meter_sub_policy *\n+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_meter_sub_policy *sub_policy = NULL;\n+\tuint32_t sub_policy_idx = 0;\n+\tuint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};\n+\tuint32_t i, j;\n+\tstruct mlx5_hrxq *hrxq;\n+\tstruct mlx5_flow_handle dh;\n+\tstruct mlx5_meter_policy_action_container *act_cnt;\n+\tuint32_t domain = MLX5_MTR_DOMAIN_INGRESS;\n+\tuint16_t sub_policy_num;\n+\n+\trte_spinlock_lock(&mtr_policy->sl);\n+\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n+\t\tif (!rss_desc[i])\n+\t\t\tcontinue;\n+\t\thrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);\n+\t\tif (!hrxq_idx[i]) {\n+\t\t\trte_spinlock_unlock(&mtr_policy->sl);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\tsub_policy_num = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\tfor (i = 0; i < sub_policy_num;\n+\t\ti++) {\n+\t\tfor (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {\n+\t\t\tif (rss_desc[j] &&\n+\t\t\t\thrxq_idx[j] !=\n+\t\t\tmtr_policy->sub_policys[domain][i]->rix_hrxq[j])\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tif (j >= MLX5_MTR_RTE_COLORS) {\n+\t\t\t/*\n+\t\t\t * Found the sub policy table with\n+\t\t\t * the same queue per color\n+\t\t\t */\n+\t\t\trte_spinlock_unlock(&mtr_policy->sl);\n+\t\t\tfor (j = 0; j < MLX5_MTR_RTE_COLORS; j++)\n+\t\t\t\tmlx5_hrxq_release(dev, hrxq_idx[j]);\n+\t\t\treturn mtr_policy->sub_policys[domain][i];\n+\t\t}\n+\t}\n+\t/* Create sub policy. */\n+\tif (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {\n+\t\t/* Reuse the first dummy sub_policy*/\n+\t\tsub_policy = mtr_policy->sub_policys[domain][0];\n+\t\tsub_policy_idx = sub_policy->idx;\n+\t} else {\n+\t\tsub_policy = mlx5_ipool_zmalloc\n+\t\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\t\t&sub_policy_idx);\n+\t\tif (!sub_policy ||\n+\t\t\tsub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM)\n+\t\t\tgoto rss_sub_policy_error;\n+\t\tsub_policy->idx = sub_policy_idx;\n+\t\tsub_policy->main_policy = mtr_policy;\n+\t}\n+\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n+\t\tif (!rss_desc[i])\n+\t\t\tcontinue;\n+\t\tsub_policy->rix_hrxq[i] = hrxq_idx[i];\n+\t\t/*\n+\t\t * Overwrite the last action from\n+\t\t * RSS action to Queue action.\n+\t\t */\n+\t\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n+\t\t\t      hrxq_idx[i]);\n+\t\tif (!hrxq) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create policy hrxq\");\n+\t\t\tgoto rss_sub_policy_error;\n+\t\t}\n+\t\tact_cnt = &mtr_policy->act_cnt[i];\n+\t\tif (act_cnt->rix_mark || act_cnt->modify_hdr) {\n+\t\t\tmemset(&dh, 0, sizeof(struct mlx5_flow_handle));\n+\t\t\tif (act_cnt->rix_mark)\n+\t\t\t\tdh.mark = 1;\n+\t\t\tdh.fate_action = MLX5_FLOW_FATE_QUEUE;\n+\t\t\tdh.rix_hrxq = hrxq_idx[i];\n+\t\t\tflow_drv_rxq_flags_set(dev, &dh);\n+\t\t}\n+\t}\n+\tif (__flow_dv_create_policy_acts_rules(dev, mtr_policy,\n+\t\tsub_policy, domain)) {\n+\t\tDRV_LOG(ERR, \"Failed to create policy \"\n+\t\t\t\"rules per domain.\");\n+\t\tgoto rss_sub_policy_error;\n+\t}\n+\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\ti = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\t\tmtr_policy->sub_policys[domain][i] = sub_policy;\n+\t\ti++;\n+\t\tif (i > MLX5_MTR_RSS_MAX_SUB_POLICY)\n+\t\t\tgoto rss_sub_policy_error;\n+\t\tmtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));\n+\t\tmtr_policy->sub_policy_num |=\n+\t\t\t(i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);\n+\t}\n+\trte_spinlock_unlock(&mtr_policy->sl);\n+\treturn sub_policy;\n+rss_sub_policy_error:\n+\tif (sub_policy) {\n+\t\t__flow_dv_destroy_sub_policy_rules(dev, sub_policy);\n+\t\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\t\ti = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\t\t\tmtr_policy->sub_policys[domain][i] = NULL;\n+\t\t\tmlx5_ipool_free\n+\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\t\t\tsub_policy->idx);\n+\t\t}\n+\t}\n+\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++)\n+\t\tmlx5_hrxq_release(dev, hrxq_idx[i]);\n+\tif (sub_policy_idx)\n+\t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\tsub_policy_idx);\n+\trte_spinlock_unlock(&mtr_policy->sl);\n+\treturn NULL;\n+}\n+\n /**\n  * Validate the batch counter support in root table.\n  *\n@@ -15447,6 +15590,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {\n \t.destroy_policy_rules = flow_dv_destroy_policy_rules,\n \t.create_def_policy = flow_dv_create_def_policy,\n \t.destroy_def_policy = flow_dv_destroy_def_policy,\n+\t.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,\n \t.counter_alloc = flow_dv_counter_allocate,\n \t.counter_free = flow_dv_counter_free,\n \t.counter_query = flow_dv_counter_query,\n",
    "prefixes": [
        "v5",
        "3/4"
    ]
}