From patchwork Tue Oct 6 11:48:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 79758 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7F81A04BB; Tue, 6 Oct 2020 13:50:43 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9C2581B3D9; Tue, 6 Oct 2020 13:49:27 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A134F4C98 for ; Tue, 6 Oct 2020 13:49:24 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 6 Oct 2020 14:49:19 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 096BnC0N028553; Tue, 6 Oct 2020 14:49:18 +0300 From: Suanming Mou To: viacheslavo@nvidia.com, matan@nvidia.com Cc: rasland@nvidia.com, dev@dpdk.org, Xueming Li Date: Tue, 6 Oct 2020 19:48:46 +0800 Message-Id: <1601984948-313027-4-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH 03/25] net/mlx5: reuse flow Id as hairpin Id X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li Hairpin flow matching required a unique flow ID for matching. This patch reuses flow ID as hairpin flow ID, this will save some code to generate a separate hairpin ID, also saves flow memory by removing hairpin ID. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5.c | 11 ----------- drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_flow.c | 32 ++++++++++---------------------- drivers/net/mlx5/mlx5_flow.h | 5 +---- 4 files changed, 11 insertions(+), 38 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 16719e6..6c5c04d 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -952,13 +952,6 @@ struct mlx5_dev_ctx_shared * MLX5_ASSERT(sh->devx_rx_uar); MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); } - sh->flow_id_pool = mlx5_flow_id_pool_alloc - ((1 << HAIRPIN_FLOW_ID_BITS) - 1); - if (!sh->flow_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ rte_spinlock_init(&sh->uar_lock_cq); @@ -1020,8 +1013,6 @@ struct mlx5_dev_ctx_shared * claim_zero(mlx5_glue->dealloc_pd(sh->pd)); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); mlx5_free(sh); MLX5_ASSERT(err > 0); rte_errno = err; @@ -1092,8 +1083,6 @@ struct mlx5_dev_ctx_shared * mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 0080ac8..a3ec994 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -652,7 +652,6 @@ struct mlx5_dev_ctx_shared { void *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ - struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */ void *tx_uar; /* Tx/packet pacing shared UAR. */ struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX]; /* Flex parser profiles information. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index eeee546..f0a6a57 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3426,9 +3426,8 @@ struct mlx5_flow_tunnel_info { struct rte_flow_action actions_rx[], struct rte_flow_action actions_tx[], struct rte_flow_item pattern_tx[], - uint32_t *flow_id) + uint32_t flow_id) { - struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_raw_encap *raw_encap; const struct rte_flow_action_raw_decap *raw_decap; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -3438,7 +3437,6 @@ struct mlx5_flow_tunnel_info { char *addr; int encap = 0; - mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: @@ -3507,7 +3505,7 @@ struct mlx5_flow_tunnel_info { set_tag = (void *)actions_rx; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); - set_tag->data = *flow_id; + set_tag->data = flow_id; tag_action->conf = set_tag; /* Create Tx item list. */ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); @@ -3516,7 +3514,7 @@ struct mlx5_flow_tunnel_info { item->type = (enum rte_flow_item_type) MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item = (void *)addr; - tag_item->data = *flow_id; + tag_item->data = flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); item->spec = tag_item; @@ -4360,7 +4358,6 @@ struct mlx5_flow_tunnel_info { uint32_t i; uint32_t idx = 0; int hairpin_flow; - uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); int ret; @@ -4372,24 +4369,22 @@ struct mlx5_flow_tunnel_info { external, hairpin_flow, error); if (ret < 0) return 0; + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + if (!flow) { + rte_errno = ENOMEM; + return 0; + } if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; - return 0; + goto error; } flow_hairpin_split(dev, actions, actions_rx.actions, actions_hairpin_tx.actions, items_tx.items, - &hairpin_id); + idx); p_actions_rx = actions_rx.actions; } - flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); - if (!flow) { - rte_errno = ENOMEM; - goto error_before_flow; - } flow->drv_type = flow_get_drv_type(dev, attr); - if (hairpin_id != 0) - flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); @@ -4517,11 +4512,7 @@ struct mlx5_flow_tunnel_info { flow_drv_destroy(dev, flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ -error_before_flow: ret = rte_errno; - if (hairpin_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - hairpin_id); rte_errno = ret; wks->flow_idx = wks->flow_nested_idx; if (wks->flow_nested_idx) @@ -4662,9 +4653,6 @@ struct rte_flow * */ if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); - if (flow->hairpin_flow_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - flow->hairpin_flow_id); flow_drv_destroy(dev, flow); if (list) ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 2685481..4a89524 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -841,8 +841,6 @@ struct mlx5_fdir_flow { uint32_t rix_flow; /* Index to flow. */ }; -#define HAIRPIN_FLOW_ID_BITS 28 - /* Flow structure. */ struct rte_flow { ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */ @@ -850,13 +848,12 @@ struct rte_flow { /**< Device flow handles that are part of the flow. */ uint32_t drv_type:2; /**< Driver type. */ uint32_t fdir:1; /**< Identifier of associated FDIR if any. */ - uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS; /**< The flow id used for hairpin. */ uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + uint32_t meter:16; /**< Holds flow meter id. */ uint32_t rix_mreg_copy; /**< Index to metadata register copy table resource. */ uint32_t counter; /**< Holds flow counter. */ - uint16_t meter; /**< Holds flow meter id. */ } __rte_packed; /* Thread specific flow workspace intermediate data. */