From patchwork Fri Oct 23 07:14:51 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 81889 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id ADF7DA04DE; Fri, 23 Oct 2020 09:23:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 12C79A94F; Fri, 23 Oct 2020 09:17:13 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 971779AF1 for ; Fri, 23 Oct 2020 09:15:44 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 23 Oct 2020 10:15:42 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09N7F2Le026736; Fri, 23 Oct 2020 10:15:41 +0300 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Fri, 23 Oct 2020 15:14:51 +0800 Message-Id: <1603437295-119083-22-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v2 21/25] net/mlx5: introduce thread safe linked list cache X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li New API of linked list for cache: - Optimized for small amount cache list. - Optimized for read-most list. - Thread safe. - Since number of entries are limited, entries allocated by API. - For dynamic entry size, pass 0 as entry size, then the creation callback allocate the entry. - Since number of entries are limited, no need to use indexed pool to allocate memory. API will remove entry and free with mlx5_free. - Search API is not supposed to be used in multi-thread. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_utils.c | 160 ++++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_utils.h | 183 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 343 insertions(+) diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 1867fde..13590dd 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -217,6 +217,166 @@ struct mlx5_hlist_entry* mlx5_free(h); } +/********************* Cache list ************************/ + +static struct mlx5_cache_entry * +mlx5_clist_default_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *ctx __rte_unused) +{ + return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); +} + +static void +mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) +{ + mlx5_free(entry); +} + +int +mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, + uint32_t entry_size, void *ctx, + mlx5_cache_create_cb cb_create, + mlx5_cache_match_cb cb_match, + mlx5_cache_remove_cb cb_remove) +{ + MLX5_ASSERT(list); + if (!cb_match || (!cb_create ^ !cb_remove)) + return -1; + if (name) + snprintf(list->name, sizeof(list->name), "%s", name); + list->entry_sz = entry_size; + list->ctx = ctx; + list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; + list->cb_match = cb_match; + list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; + rte_rwlock_init(&list->lock); + DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); + LIST_INIT(&list->head); + return 0; +} + +static struct mlx5_cache_entry * +__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) +{ + struct mlx5_cache_entry *entry; + + LIST_FOREACH(entry, &list->head, next) { + if (list->cb_match(list, entry, ctx)) + continue; + if (reuse) { + __atomic_add_fetch(&entry->ref_cnt, 1, + __ATOMIC_RELAXED); + DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", + list->name, (void *)entry, entry->ref_cnt); + } + break; + } + return entry; +} + +static struct mlx5_cache_entry * +cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) +{ + struct mlx5_cache_entry *entry; + + rte_rwlock_read_lock(&list->lock); + entry = __cache_lookup(list, ctx, reuse); + rte_rwlock_read_unlock(&list->lock); + return entry; +} + +struct mlx5_cache_entry * +mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) +{ + return cache_lookup(list, ctx, false); +} + +struct mlx5_cache_entry * +mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) +{ + struct mlx5_cache_entry *entry; + uint32_t prev_gen_cnt = 0; + + MLX5_ASSERT(list); + prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); + /* Lookup with read lock, reuse if found. */ + entry = cache_lookup(list, ctx, true); + if (entry) + return entry; + /* Not found, append with write lock - block read from other threads. */ + rte_rwlock_write_lock(&list->lock); + /* If list changed by other threads before lock, search again. */ + if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { + /* Lookup and reuse w/o read lock. */ + entry = __cache_lookup(list, ctx, true); + if (entry) + goto done; + } + entry = list->cb_create(list, entry, ctx); + if (!entry) { + DRV_LOG(ERR, "Failed to init cache list %s entry %p.", + list->name, (void *)entry); + goto done; + } + entry->ref_cnt = 1; + LIST_INSERT_HEAD(&list->head, entry, next); + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); + __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); + DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", + list->name, (void *)entry, entry->ref_cnt); +done: + rte_rwlock_write_unlock(&list->lock); + return entry; +} + +int +mlx5_cache_unregister(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + rte_rwlock_write_lock(&list->lock); + MLX5_ASSERT(entry && entry->next.le_prev); + DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", + list->name, (void *)entry, entry->ref_cnt); + if (--entry->ref_cnt) { + rte_rwlock_write_unlock(&list->lock); + return 1; + } + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); + __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); + LIST_REMOVE(entry, next); + list->cb_remove(list, entry); + rte_rwlock_write_unlock(&list->lock); + DRV_LOG(DEBUG, "Cache list %s entry %p removed.", + list->name, (void *)entry); + return 0; +} + +void +mlx5_cache_list_destroy(struct mlx5_cache_list *list) +{ + struct mlx5_cache_entry *entry; + + MLX5_ASSERT(list); + /* no LIST_FOREACH_SAFE, using while instead */ + while (!LIST_EMPTY(&list->head)) { + entry = LIST_FIRST(&list->head); + LIST_REMOVE(entry, next); + list->cb_remove(list, entry); + DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", + list->name, (void *)entry); + } + memset(list, 0, sizeof(*list)); +} + +uint32_t +mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) +{ + MLX5_ASSERT(list); + return __atomic_load_n(&list->count, __ATOMIC_RELAXED); +} + /********************* Indexed pool **********************/ static inline void diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index 6968d94..be6e5f6 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -446,6 +446,189 @@ struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, */ void mlx5_hlist_destroy(struct mlx5_hlist *h); +/************************ cache list *****************************/ + +/** Maximum size of string for naming. */ +#define MLX5_NAME_SIZE 32 + +struct mlx5_cache_list; + +/** + * Structure of the entry in the cache list, user should define its own struct + * that contains this in order to store the data. + */ +struct mlx5_cache_entry { + LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */ + uint32_t ref_cnt; /* Reference count. */ +}; + +/** + * Type of callback function for entry removal. + * + * @param list + * The cache list. + * @param entry + * The entry in the list. + */ +typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + +/** + * Type of function for user defined matching. + * + * @param list + * The cache list. + * @param entry + * The entry in the list. + * @param ctx + * The pointer to new entry context. + * + * @return + * 0 if matching, non-zero number otherwise. + */ +typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *ctx); + +/** + * Type of function for user defined cache list entry creation. + * + * @param list + * The cache list. + * @param entry + * The new allocated entry, NULL if list entry size unspecified, + * New entry has to be allocated in callback and return. + * @param ctx + * The pointer to new entry context. + * + * @return + * Pointer of entry on success, NULL otherwise. + */ +typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb) + (struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *ctx); + +/** + * Linked cache list structure. + * + * Entry in cache list could be reused if entry already exists, + * reference count will increase and the existing entry returns. + * + * When destroy an entry from list, decrease reference count and only + * destroy when no further reference. + * + * Linked list cache is designed for limited number of entries cache, + * read mostly, less modification. + * + * For huge amount of entries cache, please consider hash list cache. + * + */ +struct mlx5_cache_list { + char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */ + uint32_t entry_sz; /**< Entry size, 0: use create callback. */ + rte_rwlock_t lock; /* read/write lock. */ + uint32_t gen_cnt; /* List modification will update generation count. */ + uint32_t count; /* number of entries in list. */ + void *ctx; /* user objects target to callback. */ + mlx5_cache_create_cb cb_create; /**< entry create callback. */ + mlx5_cache_match_cb cb_match; /**< entry match callback. */ + mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */ + LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head; +}; + +/** + * Initialize a cache list. + * + * @param list + * Pointer to the hast list table. + * @param name + * Name of the cache list. + * @param entry_size + * Entry size to allocate, 0 to allocate by creation callback. + * @param ctx + * Pointer to the list context data. + * @param cb_create + * Callback function for entry create. + * @param cb_match + * Callback function for entry match. + * @param cb_remove + * Callback function for entry remove. + * @return + * 0 on success, otherwise failure. + */ +int mlx5_cache_list_init(struct mlx5_cache_list *list, + const char *name, uint32_t entry_size, void *ctx, + mlx5_cache_create_cb cb_create, + mlx5_cache_match_cb cb_match, + mlx5_cache_remove_cb cb_remove); + +/** + * Search an entry matching the key. + * + * Result returned might be destroyed by other thread, must use + * this function only in main thread. + * + * @param list + * Pointer to the cache list. + * @param ctx + * Common context parameter used by entry callback function. + * + * @return + * Pointer of the cache entry if found, NULL otherwise. + */ +struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list, + void *ctx); + +/** + * Reuse or create an entry to the cache list. + * + * @param list + * Pointer to the hast list table. + * @param ctx + * Common context parameter used by callback function. + * + * @return + * registered entry on success, NULL otherwise + */ +struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list, + void *ctx); + +/** + * Remove an entry from the cache list. + * + * User should guarantee the validity of the entry. + * + * @param list + * Pointer to the hast list. + * @param entry + * Entry to be removed from the cache list table. + * @return + * 0 on entry removed, 1 on entry still referenced. + */ +int mlx5_cache_unregister(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + +/** + * Destroy the cache list. + * + * @param list + * Pointer to the cache list. + */ +void mlx5_cache_list_destroy(struct mlx5_cache_list *list); + +/** + * Get entry number from the cache list. + * + * @param list + * Pointer to the hast list. + * @return + * Cache list entry number. + */ +uint32_t +mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list); + +/********************************* indexed pool *************************/ + /** * This function allocates non-initialized memory entry from pool. * In NUMA systems, the memory entry allocated resides on the same