get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/82542/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 82542,
    "url": "https://patches.dpdk.org/api/patches/82542/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1603875616-272798-3-git-send-email-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1603875616-272798-3-git-send-email-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1603875616-272798-3-git-send-email-suanmingm@nvidia.com",
    "date": "2020-10-28T08:59:43",
    "name": "[v5,02/34] net/mlx5: use thread specific flow workspace",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2474b5a89b6670f3515211dd4ca48464f9d8a8b2",
    "submitter": {
        "id": 1887,
        "url": "https://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1603875616-272798-3-git-send-email-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 13411,
            "url": "https://patches.dpdk.org/api/series/13411/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13411",
            "date": "2020-10-28T08:59:42",
            "name": "net/mlx5: support multiple-thread flow operations",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/13411/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/82542/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/82542/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8CE89A04DD;\n\tWed, 28 Oct 2020 10:01:10 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 06F96C31F;\n\tWed, 28 Oct 2020 10:00:54 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id C5320BE7F\n for <dev@dpdk.org>; Wed, 28 Oct 2020 10:00:28 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n suanmingm@nvidia.com) with SMTP; 28 Oct 2020 11:00:25 +0200",
            "from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09S90Jm2024495;\n Wed, 28 Oct 2020 11:00:23 +0200"
        ],
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Shahaf Shuler <shahafs@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Cc": "dev@dpdk.org, rasland@nvidia.com, Xueming Li <xuemingl@nvidia.com>",
        "Date": "Wed, 28 Oct 2020 16:59:43 +0800",
        "Message-Id": "<1603875616-272798-3-git-send-email-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1603875616-272798-1-git-send-email-suanmingm@nvidia.com>",
        "References": "<1601984948-313027-1-git-send-email-suanmingm@nvidia.com>\n <1603875616-272798-1-git-send-email-suanmingm@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v5 02/34] net/mlx5: use thread specific flow\n\tworkspace",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Xueming Li <xuemingl@nvidia.com>\n\nAs part of multi-thread flow support, this patch moves flow intermediate\ndata to thread specific, makes them a flow workspace. The workspace is\nallocated per thread, destroyed along with thread life-cycle.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c   |   5 --\n drivers/net/mlx5/mlx5.c            |   2 -\n drivers/net/mlx5/mlx5.h            |   6 --\n drivers/net/mlx5/mlx5_flow.c       | 168 ++++++++++++++++++++++++++-----------\n drivers/net/mlx5/mlx5_flow.h       |  15 +++-\n drivers/net/mlx5/mlx5_flow_dv.c    |  40 +++++----\n drivers/net/mlx5/mlx5_flow_verbs.c |  24 +++---\n 7 files changed, 171 insertions(+), 89 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex c890998..4ba6d8e 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1449,11 +1449,6 @@\n \t\t\terr = ENOTSUP;\n \t\t\tgoto error;\n \t}\n-\t/*\n-\t * Allocate the buffer for flow creating, just once.\n-\t * The allocation must be done before any flow creating.\n-\t */\n-\tmlx5_flow_alloc_intermediate(eth_dev);\n \t/* Query availability of metadata reg_c's. */\n \terr = mlx5_flow_discover_mreg_c(eth_dev);\n \tif (err < 0) {\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex dc5b7a4..bb494c1 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1400,8 +1400,6 @@ struct mlx5_dev_ctx_shared *\n \tmlx5_flow_list_flush(dev, &priv->flows, true);\n \tmlx5_shared_action_flush(dev);\n \tmlx5_flow_meter_flush(dev, NULL);\n-\t/* Free the intermediate buffers for flow creation. */\n-\tmlx5_flow_free_intermediate(dev);\n \t/* Prevent crashes when queues are still in use. */\n \tdev->rx_pkt_burst = removed_rx_burst;\n \tdev->tx_pkt_burst = removed_tx_burst;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 8de5842..5400b95 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -854,10 +854,6 @@ struct mlx5_priv {\n \tstruct mlx5_drop drop_queue; /* Flow drop queues. */\n \tuint32_t flows; /* RTE Flow rules. */\n \tuint32_t ctrl_flows; /* Control flow rules. */\n-\tvoid *inter_flows; /* Intermediate resources for flow creation. */\n-\tvoid *rss_desc; /* Intermediate rss description resources. */\n-\tint flow_idx; /* Intermediate device flow index. */\n-\tint flow_nested_idx; /* Intermediate device flow index, nested. */\n \tstruct mlx5_obj_ops obj_ops; /* HW objects operations. */\n \tLIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */\n \tLIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */\n@@ -1104,8 +1100,6 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,\n void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list);\n int mlx5_flow_start_default(struct rte_eth_dev *dev);\n void mlx5_flow_stop_default(struct rte_eth_dev *dev);\n-void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);\n-void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);\n int mlx5_flow_verify(struct rte_eth_dev *dev);\n int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);\n int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 2cf15f0..e6a4571 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -832,6 +832,13 @@ struct mlx5_flow_tunnel_info {\n \t},\n };\n \n+/* Key of thread specific flow workspace data. */\n+static pthread_key_t key_workspace;\n+\n+/* Thread specific flow workspace data once initialization data. */\n+static pthread_once_t key_workspace_init;\n+\n+\n /**\n  * Translate tag ID to register.\n  *\n@@ -5532,6 +5539,38 @@ struct tunnel_default_miss_ctx {\n }\n \n /**\n+ * Adjust flow RSS workspace if needed.\n+ *\n+ * @param wks\n+ *   Pointer to thread flow work space.\n+ * @param rss_desc\n+ *   Pointer to RSS descriptor.\n+ * @param[in] nrssq_num\n+ *   New RSS queue number.\n+ *\n+ * @return\n+ *   0 on success, -1 otherwise and rte_errno is set.\n+ */\n+static int\n+flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,\n+\t\t\t  struct mlx5_flow_rss_desc *rss_desc,\n+\t\t\t  uint32_t nrssq_num)\n+{\n+\tbool fidx = !!wks->flow_idx;\n+\n+\tif (likely(nrssq_num <= wks->rssq_num[fidx]))\n+\t\treturn 0;\n+\trss_desc->queue = realloc(rss_desc->queue,\n+\t\t\t  sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));\n+\tif (!rss_desc->queue) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn -1;\n+\t}\n+\twks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);\n+\treturn 0;\n+}\n+\n+/**\n  * Create a flow and add it to @p list.\n  *\n  * @param dev\n@@ -5586,8 +5625,7 @@ struct tunnel_default_miss_ctx {\n \t\tuint8_t buffer[2048];\n \t} items_tx;\n \tstruct mlx5_flow_expand_rss *buf = &expand_buffer.buf;\n-\tstruct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)\n-\t\t\t\t\t      priv->rss_desc)[!!priv->flow_idx];\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n \tconst struct rte_flow_action *p_actions_rx;\n \tuint32_t i;\n \tuint32_t idx = 0;\n@@ -5599,11 +5637,16 @@ struct tunnel_default_miss_ctx {\n \tstruct rte_flow_action *translated_actions = NULL;\n \tstruct mlx5_flow_tunnel *tunnel;\n \tstruct tunnel_default_miss_ctx default_miss_ctx = { 0, };\n-\tint ret = flow_shared_actions_translate(original_actions,\n-\t\t\t\t\t\tshared_actions,\n-\t\t\t\t\t\t&shared_actions_n,\n-\t\t\t\t\t\t&translated_actions, error);\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tbool fidx = !!wks->flow_idx;\n+\tint ret;\n \n+\tMLX5_ASSERT(wks);\n+\trss_desc = &wks->rss_desc[fidx];\n+\tret = flow_shared_actions_translate(original_actions,\n+\t\t\t\t\t    shared_actions,\n+\t\t\t\t\t    &shared_actions_n,\n+\t\t\t\t\t    &translated_actions, error);\n \tif (ret < 0) {\n \t\tMLX5_ASSERT(translated_actions == NULL);\n \t\treturn 0;\n@@ -5636,9 +5679,11 @@ struct tunnel_default_miss_ctx {\n \t\tflow->hairpin_flow_id = hairpin_id;\n \tMLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&\n \t\t    flow->drv_type < MLX5_FLOW_TYPE_MAX);\n-\tmemset(rss_desc, 0, sizeof(*rss_desc));\n+\tmemset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));\n \trss = flow_get_rss_action(p_actions_rx);\n \tif (rss) {\n+\t\tif (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))\n+\t\t\treturn 0;\n \t\t/*\n \t\t * The following information is required by\n \t\t * mlx5_flow_hashfields_adjust() in advance.\n@@ -5668,9 +5713,9 @@ struct tunnel_default_miss_ctx {\n \t * need to be translated before another calling.\n \t * No need to use ping-pong buffer to save memory here.\n \t */\n-\tif (priv->flow_idx) {\n-\t\tMLX5_ASSERT(!priv->flow_nested_idx);\n-\t\tpriv->flow_nested_idx = priv->flow_idx;\n+\tif (fidx) {\n+\t\tMLX5_ASSERT(!wks->flow_nested_idx);\n+\t\twks->flow_nested_idx = fidx;\n \t}\n \tfor (i = 0; i < buf->entries; ++i) {\n \t\t/*\n@@ -5749,9 +5794,9 @@ struct tunnel_default_miss_ctx {\n \tflow_rxq_flags_set(dev, flow);\n \trte_free(translated_actions);\n \t/* Nested flow creation index recovery. */\n-\tpriv->flow_idx = priv->flow_nested_idx;\n-\tif (priv->flow_nested_idx)\n-\t\tpriv->flow_nested_idx = 0;\n+\twks->flow_idx = wks->flow_nested_idx;\n+\tif (wks->flow_nested_idx)\n+\t\twks->flow_nested_idx = 0;\n \ttunnel = flow_tunnel_from_rule(dev, attr, items, actions);\n \tif (tunnel) {\n \t\tflow->tunnel = 1;\n@@ -5773,9 +5818,9 @@ struct tunnel_default_miss_ctx {\n \t\tmlx5_flow_id_release(priv->sh->flow_id_pool,\n \t\t\t\t     hairpin_id);\n \trte_errno = ret;\n-\tpriv->flow_idx = priv->flow_nested_idx;\n-\tif (priv->flow_nested_idx)\n-\t\tpriv->flow_nested_idx = 0;\n+\twks->flow_idx = wks->flow_nested_idx;\n+\tif (wks->flow_nested_idx)\n+\t\twks->flow_nested_idx = 0;\n error_before_hairpin_split:\n \trte_free(translated_actions);\n \treturn 0;\n@@ -6081,48 +6126,75 @@ struct rte_flow *\n }\n \n /**\n- * Allocate intermediate resources for flow creation.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n+ * Release key of thread specific flow workspace data.\n  */\n-void\n-mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)\n+static void\n+flow_release_workspace(void *data)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_workspace *wks = data;\n \n-\tif (!priv->inter_flows) {\n-\t\tpriv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,\n-\t\t\t\t    MLX5_NUM_MAX_DEV_FLOWS *\n-\t\t\t\t    sizeof(struct mlx5_flow) +\n-\t\t\t\t    (sizeof(struct mlx5_flow_rss_desc) +\n-\t\t\t\t    sizeof(uint16_t) * UINT16_MAX) * 2, 0,\n-\t\t\t\t    SOCKET_ID_ANY);\n-\t\tif (!priv->inter_flows) {\n-\t\t\tDRV_LOG(ERR, \"can't allocate intermediate memory.\");\n-\t\t\treturn;\n-\t\t}\n-\t}\n-\tpriv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)\n-\t\t\t [MLX5_NUM_MAX_DEV_FLOWS];\n-\t/* Reset the index. */\n-\tpriv->flow_idx = 0;\n-\tpriv->flow_nested_idx = 0;\n+\tif (!wks)\n+\t\treturn;\n+\tfree(wks->rss_desc[0].queue);\n+\tfree(wks->rss_desc[1].queue);\n+\tfree(wks);\n }\n \n /**\n- * Free intermediate resources for flows.\n+ * Initialize key of thread specific flow workspace data.\n+ */\n+static void\n+flow_alloc_workspace(void)\n+{\n+\tif (pthread_key_create(&key_workspace, flow_release_workspace))\n+\t\tDRV_LOG(ERR, \"Can't create flow workspace data thread key.\");\n+}\n+\n+/**\n+ * Get thread specific flow workspace.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n+ * @return pointer to thread specific flowworkspace data, NULL on error.\n  */\n-void\n-mlx5_flow_free_intermediate(struct rte_eth_dev *dev)\n+struct mlx5_flow_workspace*\n+mlx5_flow_get_thread_workspace(void)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_workspace *data;\n \n-\tmlx5_free(priv->inter_flows);\n-\tpriv->inter_flows = NULL;\n+\tif (pthread_once(&key_workspace_init, flow_alloc_workspace)) {\n+\t\tDRV_LOG(ERR, \"Failed to init flow workspace data thread key.\");\n+\t\treturn NULL;\n+\t}\n+\tdata = pthread_getspecific(key_workspace);\n+\tif (!data) {\n+\t\tdata = calloc(1, sizeof(*data));\n+\t\tif (!data) {\n+\t\t\tDRV_LOG(ERR, \"Failed to allocate flow workspace \"\n+\t\t\t\t\"memory.\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tdata->rss_desc[0].queue = calloc(1,\n+\t\t\t\tsizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);\n+\t\tif (!data->rss_desc[0].queue)\n+\t\t\tgoto err;\n+\t\tdata->rss_desc[1].queue = calloc(1,\n+\t\t\t\tsizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);\n+\t\tif (!data->rss_desc[1].queue)\n+\t\t\tgoto err;\n+\t\tdata->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;\n+\t\tdata->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;\n+\t\tif (pthread_setspecific(key_workspace, data)) {\n+\t\t\tDRV_LOG(ERR, \"Failed to set flow workspace to thread.\");\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\treturn data;\n+err:\n+\tif (data->rss_desc[0].queue)\n+\t\tfree(data->rss_desc[0].queue);\n+\tif (data->rss_desc[1].queue)\n+\t\tfree(data->rss_desc[1].queue);\n+\tfree(data);\n+\treturn NULL;\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 06db440..668fcc3 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -75,6 +75,9 @@ enum mlx5_feature_name {\n \tMLX5_MTR_SFX,\n };\n \n+/* Default queue number. */\n+#define MLX5_RSSQ_DEFAULT_NUM 16\n+\n #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)\n@@ -603,7 +606,7 @@ struct mlx5_flow_rss_desc {\n \tuint32_t queue_num; /**< Number of entries in @p queue. */\n \tuint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n-\tuint16_t queue[]; /**< Destination queues to redirect traffic to. */\n+\tuint16_t *queue; /**< Destination queues. */\n };\n \n /* PMD flow priority for tunnel */\n@@ -1102,6 +1105,15 @@ struct rte_flow_shared_action {\n \t};\n };\n \n+/* Thread specific flow workspace intermediate data. */\n+struct mlx5_flow_workspace {\n+\tstruct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];\n+\tstruct mlx5_flow_rss_desc rss_desc[2];\n+\tuint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */\n+\tint flow_idx; /* Intermediate device flow index. */\n+\tint flow_nested_idx; /* Intermediate device flow index, nested. */\n+};\n+\n typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,\n \t\t\t\t    const struct rte_flow_attr *attr,\n \t\t\t\t    const struct rte_flow_item items[],\n@@ -1204,6 +1216,7 @@ struct mlx5_flow_driver_ops {\n \n /* mlx5_flow.c */\n \n+struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);\n struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id);\n void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool);\n uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 4a35010..f4f8e15 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -6253,9 +6253,11 @@ struct field_modify_info modify_tcp[] = {\n \tstruct mlx5_flow *dev_flow;\n \tstruct mlx5_flow_handle *dev_handle;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \n+\tMLX5_ASSERT(wks);\n \t/* In case of corrupting the memory. */\n-\tif (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {\n+\tif (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {\n \t\trte_flow_error_set(error, ENOSPC,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t   \"not free temporary device flow\");\n@@ -6269,8 +6271,8 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t\t   \"not enough memory to create flow handle\");\n \t\treturn NULL;\n \t}\n-\t/* No multi-thread supporting. */\n-\tdev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];\n+\tMLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));\n+\tdev_flow = &wks->flows[wks->flow_idx++];\n \tdev_flow->handle = dev_handle;\n \tdev_flow->handle_idx = handle_idx;\n \t/*\n@@ -8942,11 +8944,12 @@ struct field_modify_info modify_tcp[] = {\n \tconst struct rte_flow_action_queue *queue;\n \tstruct mlx5_flow_sub_actions_list *sample_act;\n \tstruct mlx5_flow_sub_actions_idx *sample_idx;\n-\tstruct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)\n-\t\t\t\t\t      priv->rss_desc)\n-\t\t\t\t\t      [!!priv->flow_nested_idx];\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n \tuint64_t action_flags = 0;\n \n+\tMLX5_ASSERT(wks);\n+\trss_desc = &wks->rss_desc[!!wks->flow_nested_idx];\n \tsample_act = &res->sample_act;\n \tsample_idx = &res->sample_idx;\n \tsample_action = (const struct rte_flow_action_sample *)action->conf;\n@@ -9148,18 +9151,18 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t     uint64_t action_flags,\n \t\t\t     struct rte_flow_error *error)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \t/* update normal path action resource into last index of array */\n \tuint32_t dest_index = MLX5_MAX_DEST_NUM - 1;\n \tstruct mlx5_flow_sub_actions_list *sample_act =\n \t\t\t\t\t&mdest_res->sample_act[dest_index];\n-\tstruct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)\n-\t\t\t\t\t      priv->rss_desc)\n-\t\t\t\t\t      [!!priv->flow_nested_idx];\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n \tuint32_t normal_idx = 0;\n \tstruct mlx5_hrxq *hrxq;\n \tuint32_t hrxq_idx;\n \n+\tMLX5_ASSERT(wks);\n+\trss_desc = &wks->rss_desc[!!wks->flow_nested_idx];\n \tif (num_of_dest > 1) {\n \t\tif (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {\n \t\t\t/* Handle QP action for mirroring */\n@@ -9249,9 +9252,8 @@ struct field_modify_info modify_tcp[] = {\n \tstruct mlx5_dev_config *dev_conf = &priv->config;\n \tstruct rte_flow *flow = dev_flow->flow;\n \tstruct mlx5_flow_handle *handle = dev_flow->handle;\n-\tstruct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)\n-\t\t\t\t\t      priv->rss_desc)\n-\t\t\t\t\t      [!!priv->flow_nested_idx];\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n \tuint64_t item_flags = 0;\n \tuint64_t last_item = 0;\n \tuint64_t action_flags = 0;\n@@ -9297,6 +9299,8 @@ struct field_modify_info modify_tcp[] = {\n \t\t.fdb_def_rule = !!priv->fdb_def_rule,\n \t};\n \n+\tMLX5_ASSERT(wks);\n+\trss_desc = &wks->rss_desc[!!wks->flow_nested_idx];\n \tmemset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));\n \tmemset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));\n \tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n@@ -10280,6 +10284,7 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t   struct mlx5_hrxq **hrxq)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \tuint32_t hrxq_idx;\n \n \tif (flow->shared_rss) {\n@@ -10295,8 +10300,7 @@ struct field_modify_info modify_tcp[] = {\n \t\t}\n \t} else {\n \t\tstruct mlx5_flow_rss_desc *rss_desc =\n-\t\t\t\t&((struct mlx5_flow_rss_desc *)priv->rss_desc)\n-\t\t\t\t[!!priv->flow_nested_idx];\n+\t\t\t\t&wks->rss_desc[!!wks->flow_nested_idx];\n \n \t\tMLX5_ASSERT(rss_desc->queue_num);\n \t\thrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,\n@@ -10347,9 +10351,11 @@ struct field_modify_info modify_tcp[] = {\n \tint n;\n \tint err;\n \tint idx;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \n-\tfor (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {\n-\t\tdev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];\n+\tMLX5_ASSERT(wks);\n+\tfor (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {\n+\t\tdev_flow = &wks->flows[idx];\n \t\tdv = &dev_flow->dv;\n \t\tdh = dev_flow->handle;\n \t\tdv_h = &dh->dvh;\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex d04c37f..0ec9acd 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -1626,7 +1626,9 @@\n \tstruct mlx5_flow *dev_flow;\n \tstruct mlx5_flow_handle *dev_handle;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \n+\tMLX5_ASSERT(wks);\n \tsize += flow_verbs_get_actions_size(actions);\n \tsize += flow_verbs_get_items_size(items);\n \tif (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {\n@@ -1636,7 +1638,7 @@\n \t\treturn NULL;\n \t}\n \t/* In case of corrupting the memory. */\n-\tif (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {\n+\tif (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {\n \t\trte_flow_error_set(error, ENOSPC,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t   \"not free temporary device flow\");\n@@ -1650,8 +1652,8 @@\n \t\t\t\t   \"not enough memory to create flow handle\");\n \t\treturn NULL;\n \t}\n-\t/* No multi-thread supporting. */\n-\tdev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];\n+\tMLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));\n+\tdev_flow = &wks->flows[wks->flow_idx++];\n \tdev_flow->handle = dev_handle;\n \tdev_flow->handle_idx = handle_idx;\n \t/* Memcpy is used, only size needs to be cleared to 0. */\n@@ -1695,11 +1697,12 @@\n \tuint64_t priority = attr->priority;\n \tuint32_t subpriority = 0;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)\n-\t\t\t\t\t      priv->rss_desc)\n-\t\t\t\t\t      [!!priv->flow_nested_idx];\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n \tchar errstr[32];\n \n+\tMLX5_ASSERT(wks);\n+\trss_desc = &wks->rss_desc[!!wks->flow_nested_idx];\n \tif (priority == MLX5_FLOW_PRIO_RSVD)\n \t\tpriority = priv->config.flow_prio - 1;\n \tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n@@ -1956,9 +1959,11 @@\n \tuint32_t dev_handles;\n \tint err;\n \tint idx;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \n-\tfor (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {\n-\t\tdev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];\n+\tMLX5_ASSERT(wks);\n+\tfor (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {\n+\t\tdev_flow = &wks->flows[idx];\n \t\thandle = dev_flow->handle;\n \t\tif (handle->fate_action == MLX5_FLOW_FATE_DROP) {\n \t\t\thrxq = mlx5_drop_action_create(dev);\n@@ -1972,8 +1977,7 @@\n \t\t} else {\n \t\t\tuint32_t hrxq_idx;\n \t\t\tstruct mlx5_flow_rss_desc *rss_desc =\n-\t\t\t\t&((struct mlx5_flow_rss_desc *)priv->rss_desc)\n-\t\t\t\t[!!priv->flow_nested_idx];\n+\t\t\t\t&wks->rss_desc[!!wks->flow_nested_idx];\n \n \t\t\tMLX5_ASSERT(rss_desc->queue_num);\n \t\t\thrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,\n",
    "prefixes": [
        "v5",
        "02/34"
    ]
}