From patchwork Tue Oct 27 12:27:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82315 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 78E57A04B5; Tue, 27 Oct 2020 13:38:33 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0FB0EBE3F; Tue, 27 Oct 2020 13:29:31 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 33C17354D for ; Tue, 27 Oct 2020 13:28:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:28 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7Q024637; Tue, 27 Oct 2020 14:28:27 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:23 +0800 Message-Id: <1603801650-442376-29-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 28/34] net/mlx5: fix sample register error flow X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, sample flow need to prepare and register the sub-actions before sample action is created. Once the same sample action exists, the sub-actions registered by the second flow should be released, or these sub-actions will be leaked. Since the exist sample action only release these same sub-actions when the sample action itself releases. When same sample action exists, call the sub-action release function for the later flow to release the redundant prepared sub-actions. Fixes: 0756228b2704 ("net/mlx5: update translate function for sample action") Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow_dv.c | 117 ++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 57 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 0527223..2d283ab 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8598,6 +8598,42 @@ struct mlx5_hlist_entry * } /** + * Release sample sub action resource. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] act_res + * Pointer to sample sub action resource. + */ +static void +flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, + struct mlx5_flow_sub_actions_idx *act_res) +{ + if (act_res->rix_hrxq) { + mlx5_hrxq_release(dev, act_res->rix_hrxq); + act_res->rix_hrxq = 0; + } + if (act_res->rix_encap_decap) { + flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap); + act_res->rix_encap_decap = 0; + } + if (act_res->rix_port_id_action) { + flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action); + act_res->rix_port_id_action = 0; + } + if (act_res->rix_tag) { + flow_dv_tag_release(dev, act_res->rix_tag); + act_res->rix_tag = 0; + } + if (act_res->cnt) { + flow_dv_counter_release(dev, act_res->cnt); + act_res->cnt = 0; + } +} + +/** * Find existing sample resource or create and register a new one. * * @param[in, out] dev @@ -8650,6 +8686,12 @@ struct mlx5_hlist_entry * __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_sample = idx; dev_flow->dv.sample_res = cache_resource; + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx); return 0; } } @@ -8718,25 +8760,13 @@ struct mlx5_hlist_entry * __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; error: - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && + cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action (cache_resource->default_miss)); - } else { - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - } + else + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); @@ -8794,6 +8824,13 @@ struct mlx5_hlist_entry * __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_dest_array = idx; dev_flow->dv.dest_array_res = cache_resource; + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + for (idx = 0; idx < resource->num_of_dest; idx++) + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx[idx]); return 0; } } @@ -10689,21 +10726,8 @@ struct mlx5_hlist_entry * if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); - } - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) { + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], &priv->sh->sample_action_list, idx, cache_resource, next); @@ -10732,7 +10756,6 @@ struct mlx5_hlist_entry * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_dest_array_resource *cache_resource; - struct mlx5_flow_sub_actions_idx *mdest_act_res; uint32_t idx = handle->dvh.rix_dest_array; uint32_t i = 0; @@ -10749,29 +10772,9 @@ struct mlx5_hlist_entry * if (cache_resource->action) claim_zero(mlx5_glue->destroy_flow_action (cache_resource->action)); - for (; i < cache_resource->num_of_dest; i++) { - mdest_act_res = &cache_resource->sample_idx[i]; - if (mdest_act_res->rix_hrxq) { - mlx5_hrxq_release(dev, - mdest_act_res->rix_hrxq); - mdest_act_res->rix_hrxq = 0; - } - if (mdest_act_res->rix_encap_decap) { - flow_dv_encap_decap_resource_release(dev, - mdest_act_res->rix_encap_decap); - mdest_act_res->rix_encap_decap = 0; - } - if (mdest_act_res->rix_port_id_action) { - flow_dv_port_id_action_resource_release(dev, - mdest_act_res->rix_port_id_action); - mdest_act_res->rix_port_id_action = 0; - } - if (mdest_act_res->rix_tag) { - flow_dv_tag_release(dev, - mdest_act_res->rix_tag); - mdest_act_res->rix_tag = 0; - } - } + for (; i < cache_resource->num_of_dest; i++) + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx[i]); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], &priv->sh->dest_array_list, idx, cache_resource, next);