From patchwork Fri Oct 23 07:14:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 81871 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 65AC6A04DE; Fri, 23 Oct 2020 09:16:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D187672F6; Fri, 23 Oct 2020 09:15:40 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id CEDFE72DF for ; Fri, 23 Oct 2020 09:15:14 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 23 Oct 2020 10:15:09 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09N7F2LM026736; Fri, 23 Oct 2020 10:15:07 +0300 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Fri, 23 Oct 2020 15:14:33 +0800 Message-Id: <1603437295-119083-4-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v2 03/25] net/mlx5: reuse flow Id as hairpin Id X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li Hairpin flow matching required a unique flow ID for matching. This patch reuses flow ID as hairpin flow ID, this will save some code to generate a separate hairpin ID, also saves flow memory by removing hairpin ID. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 11 ----------- drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_flow.c | 32 ++++++++++---------------------- drivers/net/mlx5/mlx5_flow.h | 6 +----- 4 files changed, 11 insertions(+), 39 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index faf947f..ce3de82 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -979,13 +979,6 @@ struct mlx5_dev_ctx_shared * MLX5_ASSERT(sh->devx_rx_uar); MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); } - sh->flow_id_pool = mlx5_flow_id_pool_alloc - ((1 << HAIRPIN_FLOW_ID_BITS) - 1); - if (!sh->flow_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ rte_spinlock_init(&sh->uar_lock_cq); @@ -1047,8 +1040,6 @@ struct mlx5_dev_ctx_shared * claim_zero(mlx5_glue->dealloc_pd(sh->pd)); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); mlx5_free(sh); MLX5_ASSERT(err > 0); rte_errno = err; @@ -1119,8 +1110,6 @@ struct mlx5_dev_ctx_shared * mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index bfb0c28..f6d38d4 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -666,7 +666,6 @@ struct mlx5_dev_ctx_shared { void *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ - struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */ void *tx_uar; /* Tx/packet pacing shared UAR. */ struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX]; /* Flex parser profiles information. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 2f2b97f..bb6fd74 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3784,9 +3784,8 @@ struct mlx5_flow_tunnel_info { struct rte_flow_action actions_rx[], struct rte_flow_action actions_tx[], struct rte_flow_item pattern_tx[], - uint32_t *flow_id) + uint32_t flow_id) { - struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_raw_encap *raw_encap; const struct rte_flow_action_raw_decap *raw_decap; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -3796,7 +3795,6 @@ struct mlx5_flow_tunnel_info { char *addr; int encap = 0; - mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: @@ -3865,7 +3863,7 @@ struct mlx5_flow_tunnel_info { set_tag = (void *)actions_rx; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); - set_tag->data = *flow_id; + set_tag->data = flow_id; tag_action->conf = set_tag; /* Create Tx item list. */ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); @@ -3874,7 +3872,7 @@ struct mlx5_flow_tunnel_info { item->type = (enum rte_flow_item_type) MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item = (void *)addr; - tag_item->data = *flow_id; + tag_item->data = flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); item->spec = tag_item; @@ -5100,7 +5098,6 @@ struct mlx5_flow_tunnel_info { uint32_t i; uint32_t idx = 0; int hairpin_flow; - uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; struct rte_flow_attr attr_factor = {0}; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); @@ -5117,24 +5114,22 @@ struct mlx5_flow_tunnel_info { external, hairpin_flow, error); if (ret < 0) return 0; + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + if (!flow) { + rte_errno = ENOMEM; + return 0; + } if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; - return 0; + goto error; } flow_hairpin_split(dev, actions, actions_rx.actions, actions_hairpin_tx.actions, items_tx.items, - &hairpin_id); + idx); p_actions_rx = actions_rx.actions; } - flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); - if (!flow) { - rte_errno = ENOMEM; - goto error_before_flow; - } flow->drv_type = flow_get_drv_type(dev, &attr_factor); - if (hairpin_id != 0) - flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); @@ -5247,11 +5242,7 @@ struct mlx5_flow_tunnel_info { flow_drv_destroy(dev, flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ -error_before_flow: ret = rte_errno; - if (hairpin_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - hairpin_id); rte_errno = ret; wks->flow_idx = wks->flow_nested_idx; if (wks->flow_nested_idx) @@ -5392,9 +5383,6 @@ struct rte_flow * */ if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); - if (flow->hairpin_flow_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - flow->hairpin_flow_id); flow_drv_destroy(dev, flow); if (list) ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 9bc7608..c0816fb 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -914,8 +914,6 @@ struct mlx5_fdir_flow { uint32_t rix_flow; /* Index to flow. */ }; -#define HAIRPIN_FLOW_ID_BITS 28 - /* Flow structure. */ struct rte_flow { ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */ @@ -923,13 +921,11 @@ struct rte_flow { /**< Device flow handles that are part of the flow. */ uint32_t drv_type:2; /**< Driver type. */ uint32_t fdir:1; /**< Identifier of associated FDIR if any. */ - uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS; - /**< The flow id used for hairpin. */ uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + uint32_t meter:16; /**< Holds flow meter id. */ uint32_t rix_mreg_copy; /**< Index to metadata register copy table resource. */ uint32_t counter; /**< Holds flow counter. */ - uint16_t meter; /**< Holds flow meter id. */ } __rte_packed; /* Thread specific flow workspace intermediate data. */