From patchwork Tue Oct 6 11:48:51 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 79762 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 946C2A04BB; Tue, 6 Oct 2020 13:52:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BEB5B1B737; Tue, 6 Oct 2020 13:49:36 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id C0E211B6A3 for ; Tue, 6 Oct 2020 13:49:34 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 6 Oct 2020 14:49:28 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 096BnC0S028553; Tue, 6 Oct 2020 14:49:27 +0300 From: Suanming Mou To: viacheslavo@nvidia.com, matan@nvidia.com Cc: rasland@nvidia.com, dev@dpdk.org, Xueming Li Date: Tue, 6 Oct 2020 19:48:51 +0800 Message-Id: <1601984948-313027-9-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH 08/25] net/mlx5: make flow table cache thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion/removal, this patch uses thread safe hash list API for flow table cache hash list. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5.c | 102 ++++------------------------ drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_flow.h | 17 +++++ drivers/net/mlx5/mlx5_flow_dv.c | 147 ++++++++++++++++++++-------------------- 4 files changed, 105 insertions(+), 163 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index ddf236a..61e5e69 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -976,7 +976,7 @@ struct mlx5_dev_ctx_shared * } /** - * Destroy table hash list and all the root entries per domain. + * Destroy table hash list. * * @param[in] priv * Pointer to the private device data structure. @@ -985,46 +985,9 @@ struct mlx5_dev_ctx_shared * mlx5_free_table_hash_list(struct mlx5_priv *priv) { struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_data_entry *tbl_data; - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_hlist_entry *pos; if (!sh->flow_tbls) return; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 0; - table_key.domain = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } mlx5_hlist_destroy(sh->flow_tbls); } @@ -1039,80 +1002,45 @@ struct mlx5_dev_ctx_shared * * Zero on success, positive error code otherwise. */ int -mlx5_alloc_table_hash_list(struct mlx5_priv *priv) +mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) { + int err = 0; + /* Tables are only used in DV and DR modes. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_dev_ctx_shared *sh = priv->sh; char s[MLX5_HLIST_NAMESIZE]; - int err = 0; MLX5_ASSERT(sh); snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, - 0, false, NULL, NULL, NULL); + 0, false, flow_dv_tbl_create_cb, NULL, + flow_dv_tbl_remove_cb); if (!sh->flow_tbls) { DRV_LOG(ERR, "flow tables with hash creation failed."); err = ENOMEM; return err; } + sh->flow_tbls->ctx = sh; #ifndef HAVE_MLX5DV_DR + struct rte_flow_error error; + struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; + /* * In case we have not DR support, the zero tables should be created * because DV expect to see them even if they cannot be created by * RDMA-CORE. */ - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { + if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 1, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 0, 1, 1, &error)) { err = ENOMEM; goto error; } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 0; - table_key.domain = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); return err; error: mlx5_free_table_hash_list(priv); #endif /* HAVE_MLX5DV_DR */ +#endif return err; } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 464d2cf..f11d783 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -490,7 +490,7 @@ struct mlx5_dev_shared_port { struct { /* Table ID should be at the lowest address. */ uint32_t table_id; /**< ID of the table. */ - uint16_t reserved; /**< must be zero for comparison. */ + uint16_t dummy; /**< Dummy table for DV API. */ uint8_t domain; /**< 1 - FDB, 0 - NIC TX/RX. */ uint8_t direction; /**< 1 - egress, 0 - ingress. */ }; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 85f2528..f661d1e 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -368,6 +368,13 @@ enum mlx5_flow_fate_type { MLX5_FLOW_FATE_MAX, }; +/* Hash list callback context */ +struct mlx5_flow_cb_ctx { + struct rte_eth_dev *dev; + struct rte_flow_error *error; + void *data; +}; + /* Matcher PRM representation */ struct mlx5_flow_dv_match_params { size_t size; @@ -1074,4 +1081,14 @@ int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, const struct rte_flow_attr *attr); int mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error); + +/* hash list callbacks: */ +struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list, + uint64_t key, void *entry_ctx); +void flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); +struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev, + uint32_t table_id, uint8_t egress, uint8_t transfer, + uint8_t dummy, struct rte_flow_error *error); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index fafe188..fa19873 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -7597,6 +7597,48 @@ struct field_modify_info modify_tcp[] = { } +struct mlx5_hlist_entry * +flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_tbl_data_entry *tbl_data; + struct rte_flow_error *error = ctx; + union mlx5_flow_tbl_key key = { .v64 = key64 }; + struct mlx5_flow_tbl_resource *tbl; + void *domain; + uint32_t idx = 0; + int ret; + + tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); + if (!tbl_data) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate flow table data entry"); + return NULL; + } + tbl_data->idx = idx; + tbl = &tbl_data->tbl; + if (key.dummy) + return &tbl_data->entry; + if (key.domain) + domain = sh->fdb_domain; + else if (key.direction) + domain = sh->tx_domain; + else + domain = sh->rx_domain; + ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create flow table object"); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + rte_atomic32_init(&tbl_data->jump.refcnt); + return &tbl_data->entry; +} + /** * Get a flow table. * @@ -7608,86 +7650,51 @@ struct field_modify_info modify_tcp[] = { * Direction of the table. * @param[in] transfer * E-Switch or NIC flow. + * @param[in] dummy + * Dummy entry for dv API. * @param[out] error * pointer to error structure. * * @return * Returns tables resource based on the index, NULL in case of failed. */ -static struct mlx5_flow_tbl_resource * +struct mlx5_flow_tbl_resource * flow_dv_tbl_resource_get(struct rte_eth_dev *dev, uint32_t table_id, uint8_t egress, - uint8_t transfer, + uint8_t transfer, uint8_t dummy, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_resource *tbl; union mlx5_flow_tbl_key table_key = { { .table_id = table_id, - .reserved = 0, + .dummy = dummy, .domain = !!transfer, .direction = !!egress, } }; - struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, - table_key.v64, NULL); + struct mlx5_hlist_entry *entry; struct mlx5_flow_tbl_data_entry *tbl_data; - uint32_t idx = 0; - int ret; - void *domain; - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); - return tbl; - } - tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); - if (!tbl_data) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate flow table data entry"); + entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, error); + if (!entry) return NULL; - } - tbl_data->idx = idx; - tbl = &tbl_data->tbl; - pos = &tbl_data->entry; - if (transfer) - domain = sh->fdb_domain; - else if (egress) - domain = sh->tx_domain; - else - domain = sh->rx_domain; - ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); - if (ret) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create flow table object"); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); - return NULL; - } - /* - * No multi-threads now, but still better to initialize the reference - * count before insert it into the hash list. - */ - rte_atomic32_init(&tbl->refcnt); - /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); - pos->key = table_key.v64; - ret = !mlx5_hlist_insert(sh->flow_tbls, pos); - if (ret < 0) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot insert flow table data entry"); - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); - } - rte_atomic32_inc(&tbl->refcnt); - return tbl; + tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + return &tbl_data->tbl; +} + +void +flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + + MLX5_ASSERT(entry && sh); + if (tbl_data->tbl.obj) + mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } /** @@ -7712,18 +7719,7 @@ struct field_modify_info modify_tcp[] = { if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { - struct mlx5_hlist_entry *pos = &tbl_data->entry; - - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - tbl->obj = NULL; - /* remove the entry from the hash list and free memory. */ - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], - tbl_data->idx); - return 0; - } - return 1; + return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); } /** @@ -7762,7 +7758,7 @@ struct field_modify_info modify_tcp[] = { int ret; tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, error); + key->domain, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -8492,7 +8488,8 @@ struct field_modify_info modify_tcp[] = { return ret; tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, - attr->transfer, error); + attr->transfer, 0, + error); if (!tbl) return rte_flow_error_set (error, errno, @@ -9685,7 +9682,7 @@ struct field_modify_info modify_tcp[] = { dtb = &mtb->ingress; /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, - egress, transfer, &error); + egress, transfer, 0, &error); if (!dtb->tbl) { DRV_LOG(ERR, "Failed to create meter policer table."); return -1; @@ -9693,7 +9690,7 @@ struct field_modify_info modify_tcp[] = { /* Create the meter suffix table with SUFFIX level. */ dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_SUFFIX, - egress, transfer, &error); + egress, transfer, 0, &error); if (!dtb->sfx_tbl) { DRV_LOG(ERR, "Failed to create meter suffix table."); return -1;