@@ -445,6 +445,11 @@ Limitations
- 256 ports maximum.
- 4M connections maximum.
+- Multiple-thread flow insertion:
+
+ - In order to achieve best insertion rate, application should manage the flows on the rte-lcore.
+ - Better to configure ``reclaim_mem_mode`` as 0 to accelerate the flow object allocate and release with cache.
+
Statistics
----------
@@ -55,6 +55,12 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Mellanox mlx5 driver.**
+
+ Updated the Mellanox mlx5 driver with new features and improvements, including:
+
+ * Optimize multiple-thread flow insertion rate.
+
Removed Items
-------------
@@ -14,6 +14,8 @@
#include <rte_kvargs.h>
#include <rte_devargs.h>
#include <rte_bitops.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
#include <rte_os_shim.h>
#include "mlx5_prm.h"
@@ -11,39 +11,324 @@
#include "mlx5_common_utils.h"
#include "mlx5_common_log.h"
-/********************* Hash List **********************/
+/********************* mlx5 list ************************/
+
+static int
+mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,
+ bool lcores_share, mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free)
+{
+ int i;
+
+ if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
+ !cb_clone_free) {
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (name)
+ snprintf(list->name, sizeof(list->name), "%s", name);
+ list->ctx = ctx;
+ list->lcores_share = lcores_share;
+ list->cb_create = cb_create;
+ list->cb_match = cb_match;
+ list->cb_remove = cb_remove;
+ list->cb_clone = cb_clone;
+ list->cb_clone_free = cb_clone_free;
+ rte_rwlock_init(&list->lock);
+ DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
+ for (i = 0; i <= RTE_MAX_LCORE; i++)
+ LIST_INIT(&list->cache[i].h);
+ return 0;
+}
+
+struct mlx5_list *
+mlx5_list_create(const char *name, void *ctx, bool lcores_share,
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free)
+{
+ struct mlx5_list *list;
+
+ list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
+ if (!list)
+ return NULL;
+ if (mlx5_list_init(list, name, ctx, lcores_share,
+ cb_create, cb_match, cb_remove, cb_clone,
+ cb_clone_free) != 0) {
+ mlx5_free(list);
+ return NULL;
+ }
+ return list;
+}
+
+static struct mlx5_list_entry *
+__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
+{
+ struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
+ uint32_t ret;
+
+ while (entry != NULL) {
+ if (list->cb_match(list->ctx, entry, ctx) == 0) {
+ if (reuse) {
+ ret = __atomic_add_fetch(&entry->ref_cnt, 1,
+ __ATOMIC_RELAXED) - 1;
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
+ list->name, (void *)entry,
+ entry->ref_cnt);
+ } else if (lcore_index < RTE_MAX_LCORE) {
+ ret = __atomic_load_n(&entry->ref_cnt,
+ __ATOMIC_RELAXED);
+ }
+ if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
+ return entry;
+ if (reuse && ret == 0)
+ entry->ref_cnt--; /* Invalid entry. */
+ }
+ entry = LIST_NEXT(entry, next);
+ }
+ return NULL;
+}
+
+struct mlx5_list_entry *
+mlx5_list_lookup(struct mlx5_list *list, void *ctx)
+{
+ struct mlx5_list_entry *entry = NULL;
+ int i;
+
+ rte_rwlock_read_lock(&list->lock);
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ entry = __list_lookup(list, i, ctx, false);
+ if (entry)
+ break;
+ }
+ rte_rwlock_read_unlock(&list->lock);
+ return entry;
+}
+
+static struct mlx5_list_entry *
+mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
+ struct mlx5_list_entry *gentry, void *ctx)
+{
+ struct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx);
+
+ if (unlikely(!lentry))
+ return NULL;
+ lentry->ref_cnt = 1u;
+ lentry->gentry = gentry;
+ lentry->lcore_idx = (uint32_t)lcore_index;
+ LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
+ return lentry;
+}
+
+static void
+__list_cache_clean(struct mlx5_list *list, int lcore_index)
+{
+ struct mlx5_list_cache *c = &list->cache[lcore_index];
+ struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
+ uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
+ __ATOMIC_RELAXED);
+
+ while (inv_cnt != 0 && entry != NULL) {
+ struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
+
+ if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ LIST_REMOVE(entry, next);
+ if (list->lcores_share)
+ list->cb_clone_free(list->ctx, entry);
+ else
+ list->cb_remove(list->ctx, entry);
+ inv_cnt--;
+ }
+ entry = nentry;
+ }
+}
+
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
+{
+ struct mlx5_list_entry *entry, *local_entry;
+ volatile uint32_t prev_gen_cnt = 0;
+ int lcore_index = rte_lcore_index(rte_lcore_id());
+
+ MLX5_ASSERT(list);
+ MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
+ if (unlikely(lcore_index == -1)) {
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+ /* 0. Free entries that was invalidated by other lcores. */
+ __list_cache_clean(list, lcore_index);
+ /* 1. Lookup in local cache. */
+ local_entry = __list_lookup(list, lcore_index, ctx, true);
+ if (local_entry)
+ return local_entry;
+ if (list->lcores_share) {
+ /* 2. Lookup with read lock on global list, reuse if found. */
+ rte_rwlock_read_lock(&list->lock);
+ entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
+ if (likely(entry)) {
+ rte_rwlock_read_unlock(&list->lock);
+ return mlx5_list_cache_insert(list, lcore_index, entry,
+ ctx);
+ }
+ prev_gen_cnt = list->gen_cnt;
+ rte_rwlock_read_unlock(&list->lock);
+ }
+ /* 3. Prepare new entry for global list and for cache. */
+ entry = list->cb_create(list->ctx, ctx);
+ if (unlikely(!entry))
+ return NULL;
+ entry->ref_cnt = 1u;
+ if (!list->lcores_share) {
+ entry->lcore_idx = (uint32_t)lcore_index;
+ LIST_INSERT_HEAD(&list->cache[lcore_index].h, entry, next);
+ __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
+ list->name, lcore_index, (void *)entry, entry->ref_cnt);
+ return entry;
+ }
+ local_entry = list->cb_clone(list->ctx, entry, ctx);
+ if (unlikely(!local_entry)) {
+ list->cb_remove(list->ctx, entry);
+ return NULL;
+ }
+ local_entry->ref_cnt = 1u;
+ local_entry->gentry = entry;
+ local_entry->lcore_idx = (uint32_t)lcore_index;
+ rte_rwlock_write_lock(&list->lock);
+ /* 4. Make sure the same entry was not created before the write lock. */
+ if (unlikely(prev_gen_cnt != list->gen_cnt)) {
+ struct mlx5_list_entry *oentry = __list_lookup(list,
+ RTE_MAX_LCORE,
+ ctx, true);
+
+ if (unlikely(oentry)) {
+ /* 4.5. Found real race!!, reuse the old entry. */
+ rte_rwlock_write_unlock(&list->lock);
+ list->cb_remove(list->ctx, entry);
+ list->cb_clone_free(list->ctx, local_entry);
+ return mlx5_list_cache_insert(list, lcore_index, oentry,
+ ctx);
+ }
+ }
+ /* 5. Update lists. */
+ LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
+ list->gen_cnt++;
+ rte_rwlock_write_unlock(&list->lock);
+ LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
+ __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
+ (void *)entry, entry->ref_cnt);
+ return local_entry;
+}
-static struct mlx5_hlist_entry *
-mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
- void *ctx __rte_unused)
+int
+mlx5_list_unregister(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
{
- return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
+ struct mlx5_list_entry *gentry = entry->gentry;
+ int lcore_idx;
+
+ if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+ return 1;
+ lcore_idx = rte_lcore_index(rte_lcore_id());
+ MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
+ if (entry->lcore_idx == (uint32_t)lcore_idx) {
+ LIST_REMOVE(entry, next);
+ if (list->lcores_share)
+ list->cb_clone_free(list->ctx, entry);
+ else
+ list->cb_remove(list->ctx, entry);
+ } else if (likely(lcore_idx != -1)) {
+ __atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
+ __ATOMIC_RELAXED);
+ } else {
+ return 0;
+ }
+ if (!list->lcores_share) {
+ __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
+ list->name, (void *)entry);
+ return 0;
+ }
+ if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+ return 1;
+ rte_rwlock_write_lock(&list->lock);
+ if (likely(gentry->ref_cnt == 0)) {
+ LIST_REMOVE(gentry, next);
+ rte_rwlock_write_unlock(&list->lock);
+ list->cb_remove(list->ctx, gentry);
+ __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
+ list->name, (void *)gentry);
+ return 0;
+ }
+ rte_rwlock_write_unlock(&list->lock);
+ return 1;
}
static void
-mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
- struct mlx5_hlist_entry *entry)
+mlx5_list_uninit(struct mlx5_list *list)
+{
+ struct mlx5_list_entry *entry;
+ int i;
+
+ MLX5_ASSERT(list);
+ for (i = 0; i <= RTE_MAX_LCORE; i++) {
+ while (!LIST_EMPTY(&list->cache[i].h)) {
+ entry = LIST_FIRST(&list->cache[i].h);
+ LIST_REMOVE(entry, next);
+ if (i == RTE_MAX_LCORE) {
+ list->cb_remove(list->ctx, entry);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p "
+ "destroyed.", list->name,
+ (void *)entry);
+ } else {
+ list->cb_clone_free(list->ctx, entry);
+ }
+ }
+ }
+}
+
+void
+mlx5_list_destroy(struct mlx5_list *list)
+{
+ mlx5_list_uninit(list);
+ mlx5_free(list);
+}
+
+uint32_t
+mlx5_list_get_entry_num(struct mlx5_list *list)
{
- mlx5_free(entry);
+ MLX5_ASSERT(list);
+ return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
}
+/********************* Hash List **********************/
+
struct mlx5_hlist *
-mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
- uint32_t flags, mlx5_hlist_create_cb cb_create,
- mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
+mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
+ bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free)
{
struct mlx5_hlist *h;
uint32_t act_size;
uint32_t alloc_size;
uint32_t i;
- if (!size || !cb_match || (!cb_create ^ !cb_remove))
- return NULL;
/* Align to the next power of 2, 32bits integer is enough now. */
if (!rte_is_power_of_2(size)) {
act_size = rte_align32pow2(size);
- DRV_LOG(DEBUG, "Size 0x%" PRIX32 " is not power of 2, "
- "will be aligned to 0x%" PRIX32 ".", size, act_size);
+ DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
+ "be aligned to 0x%" PRIX32 ".", size, act_size);
} else {
act_size = size;
}
@@ -57,61 +342,24 @@ mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
name ? name : "None");
return NULL;
}
- if (name)
- snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
- h->table_sz = act_size;
h->mask = act_size - 1;
- h->entry_sz = entry_size;
- h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
- h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
- h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
- h->cb_match = cb_match;
- h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
- for (i = 0; i < act_size; i++)
- rte_rwlock_init(&h->buckets[i].lock);
- DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
- h->name, act_size);
- return h;
-}
-
-static struct mlx5_hlist_entry *
-__hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
- void *ctx, bool reuse)
-{
- struct mlx5_hlist_head *first;
- struct mlx5_hlist_entry *node;
-
- MLX5_ASSERT(h);
- first = &h->buckets[idx].head;
- LIST_FOREACH(node, first, next) {
- if (!h->cb_match(h, node, key, ctx)) {
- if (reuse) {
- __atomic_add_fetch(&node->ref_cnt, 1,
- __ATOMIC_RELAXED);
- DRV_LOG(DEBUG, "Hash list %s entry %p "
- "reuse: %u.",
- h->name, (void *)node, node->ref_cnt);
- }
- break;
+ h->lcores_share = lcores_share;
+ h->direct_key = direct_key;
+ for (i = 0; i < act_size; i++) {
+ if (mlx5_list_init(&h->buckets[i].l, name, ctx, lcores_share,
+ cb_create, cb_match, cb_remove, cb_clone,
+ cb_clone_free) != 0) {
+ mlx5_free(h);
+ return NULL;
}
}
- return node;
+ DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
+ name, act_size);
+ return h;
}
-static struct mlx5_hlist_entry *
-hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
- void *ctx, bool reuse)
-{
- struct mlx5_hlist_entry *node;
-
- MLX5_ASSERT(h);
- rte_rwlock_read_lock(&h->buckets[idx].lock);
- node = __hlist_lookup(h, key, idx, ctx, reuse);
- rte_rwlock_read_unlock(&h->buckets[idx].lock);
- return node;
-}
-struct mlx5_hlist_entry *
+struct mlx5_list_entry *
mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
{
uint32_t idx;
@@ -120,102 +368,44 @@ mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
idx = (uint32_t)(key & h->mask);
else
idx = rte_hash_crc_8byte(key, 0) & h->mask;
- return hlist_lookup(h, key, idx, ctx, false);
+ return mlx5_list_lookup(&h->buckets[idx].l, ctx);
}
-struct mlx5_hlist_entry*
+struct mlx5_list_entry*
mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
{
uint32_t idx;
- struct mlx5_hlist_head *first;
- struct mlx5_hlist_bucket *b;
- struct mlx5_hlist_entry *entry;
- uint32_t prev_gen_cnt = 0;
+ struct mlx5_list_entry *entry;
if (h->direct_key)
idx = (uint32_t)(key & h->mask);
else
idx = rte_hash_crc_8byte(key, 0) & h->mask;
- MLX5_ASSERT(h);
- b = &h->buckets[idx];
- /* Use write lock directly for write-most list. */
- if (!h->write_most) {
- prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE);
- entry = hlist_lookup(h, key, idx, ctx, true);
- if (entry)
- return entry;
+ entry = mlx5_list_register(&h->buckets[idx].l, ctx);
+ if (likely(entry)) {
+ if (h->lcores_share)
+ entry->gentry->bucket_idx = idx;
+ else
+ entry->bucket_idx = idx;
}
- rte_rwlock_write_lock(&b->lock);
- /* Check if the list changed by other threads. */
- if (h->write_most ||
- prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) {
- entry = __hlist_lookup(h, key, idx, ctx, true);
- if (entry)
- goto done;
- }
- first = &b->head;
- entry = h->cb_create(h, key, ctx);
- if (!entry) {
- rte_errno = ENOMEM;
- DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
- goto done;
- }
- entry->idx = idx;
- entry->ref_cnt = 1;
- LIST_INSERT_HEAD(first, entry, next);
- __atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
- DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
- h->name, (void *)entry, entry->ref_cnt);
-done:
- rte_rwlock_write_unlock(&b->lock);
return entry;
}
int
-mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
+mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
{
- uint32_t idx = entry->idx;
+ uint32_t idx = h->lcores_share ? entry->gentry->bucket_idx :
+ entry->bucket_idx;
- rte_rwlock_write_lock(&h->buckets[idx].lock);
- MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
- DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
- h->name, (void *)entry, entry->ref_cnt);
- if (--entry->ref_cnt) {
- rte_rwlock_write_unlock(&h->buckets[idx].lock);
- return 1;
- }
- LIST_REMOVE(entry, next);
- /* Set to NULL to get rid of removing action for more than once. */
- entry->next.le_prev = NULL;
- h->cb_remove(h, entry);
- rte_rwlock_write_unlock(&h->buckets[idx].lock);
- DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
- h->name, (void *)entry);
- return 0;
+ return mlx5_list_unregister(&h->buckets[idx].l, entry);
}
void
mlx5_hlist_destroy(struct mlx5_hlist *h)
{
- uint32_t idx;
- struct mlx5_hlist_entry *entry;
+ uint32_t i;
- MLX5_ASSERT(h);
- for (idx = 0; idx < h->table_sz; ++idx) {
- /* No LIST_FOREACH_SAFE, using while instead. */
- while (!LIST_EMPTY(&h->buckets[idx].head)) {
- entry = LIST_FIRST(&h->buckets[idx].head);
- LIST_REMOVE(entry, next);
- /*
- * The owner of whole element which contains data entry
- * is the user, so it's the user's duty to do the clean
- * up and the free work because someone may not put the
- * hlist entry at the beginning(suggested to locate at
- * the beginning). Or else the default free function
- * will be used.
- */
- h->cb_remove(h, entry);
- }
- }
+ for (i = 0; i <= h->mask; i++)
+ mlx5_list_uninit(&h->buckets[i].l);
mlx5_free(h);
}
@@ -7,106 +7,221 @@
#include "mlx5_common.h"
-#define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */
-#define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */
+/************************ mlx5 list *****************************/
-/** Maximum size of string for naming the hlist table. */
-#define MLX5_HLIST_NAMESIZE 32
+/** Maximum size of string for naming. */
+#define MLX5_NAME_SIZE 32
-struct mlx5_hlist;
+struct mlx5_list;
/**
- * Structure of the entry in the hash list, user should define its own struct
- * that contains this in order to store the data. The 'key' is 64-bits right
- * now and its user's responsibility to guarantee there is no collision.
+ * Structure of the entry in the mlx5 list, user should define its own struct
+ * that contains this in order to store the data.
*/
-struct mlx5_hlist_entry {
- LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
- uint32_t idx; /* Bucket index the entry belongs to. */
- uint32_t ref_cnt; /* Reference count. */
-};
+struct mlx5_list_entry {
+ LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
+ uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+ uint32_t lcore_idx;
+ union {
+ struct mlx5_list_entry *gentry;
+ uint32_t bucket_idx;
+ };
+} __rte_packed;
-/** Structure for hash head. */
-LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
+struct mlx5_list_cache {
+ LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
+ uint32_t inv_cnt; /* Invalid entries counter. */
+} __rte_cache_aligned;
/**
* Type of callback function for entry removal.
*
- * @param list
- * The hash list.
+ * @param tool_ctx
+ * The tool instance user context.
* @param entry
* The entry in the list.
*/
-typedef void (*mlx5_hlist_remove_cb)(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
+typedef void (*mlx5_list_remove_cb)(void *tool_ctx,
+ struct mlx5_list_entry *entry);
/**
* Type of function for user defined matching.
*
- * @param list
- * The hash list.
+ * @param tool_ctx
+ * The tool instance context.
* @param entry
* The entry in the list.
- * @param key
- * The new entry key.
* @param ctx
* The pointer to new entry context.
*
* @return
* 0 if matching, non-zero number otherwise.
*/
-typedef int (*mlx5_hlist_match_cb)(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *ctx);
+typedef int (*mlx5_list_match_cb)(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *ctx);
+
+typedef struct mlx5_list_entry *(*mlx5_list_clone_cb)(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *ctx);
+
+typedef void (*mlx5_list_clone_free_cb)(void *tool_ctx,
+ struct mlx5_list_entry *entry);
/**
- * Type of function for user defined hash list entry creation.
+ * Type of function for user defined mlx5 list entry creation.
*
- * @param list
- * The hash list.
- * @param key
- * The key of the new entry.
+ * @param tool_ctx
+ * The mlx5 tool instance context.
* @param ctx
* The pointer to new entry context.
*
* @return
- * Pointer to allocated entry on success, NULL otherwise.
+ * Pointer of entry on success, NULL otherwise.
+ */
+typedef struct mlx5_list_entry *(*mlx5_list_create_cb)(void *tool_ctx,
+ void *ctx);
+
+/**
+ * Linked mlx5 list structure.
+ *
+ * Entry in mlx5 list could be reused if entry already exists,
+ * reference count will increase and the existing entry returns.
+ *
+ * When destroy an entry from list, decrease reference count and only
+ * destroy when no further reference.
+ *
+ * Linked list is designed for limited number of entries,
+ * read mostly, less modification.
+ *
+ * For huge amount of entries, please consider hash list.
+ *
+ */
+struct mlx5_list {
+ char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
+ void *ctx; /* user objects target to callback. */
+ bool lcores_share; /* Whether to share objects between the lcores. */
+ mlx5_list_create_cb cb_create; /**< entry create callback. */
+ mlx5_list_match_cb cb_match; /**< entry match callback. */
+ mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
+ mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
+ mlx5_list_clone_free_cb cb_clone_free;
+ struct mlx5_list_cache cache[RTE_MAX_LCORE + 1];
+ /* Lcore cache, last index is the global cache. */
+ volatile uint32_t gen_cnt; /* List modification may update it. */
+ volatile uint32_t count; /* number of entries in list. */
+ rte_rwlock_t lock; /* read/write lock. */
+};
+
+/**
+ * Create a mlx5 list.
+ *
+ * @param list
+ * Pointer to the hast list table.
+ * @param name
+ * Name of the mlx5 list.
+ * @param ctx
+ * Pointer to the list context data.
+ * @param lcores_share
+ * Whether to share objects between the lcores.
+ * @param cb_create
+ * Callback function for entry create.
+ * @param cb_match
+ * Callback function for entry match.
+ * @param cb_remove
+ * Callback function for entry remove.
+ * @return
+ * List pointer on success, otherwise NULL.
+ */
+struct mlx5_list *mlx5_list_create(const char *name, void *ctx,
+ bool lcores_share,
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free);
+
+/**
+ * Search an entry matching the key.
+ *
+ * Result returned might be destroyed by other thread, must use
+ * this function only in main thread.
+ *
+ * @param list
+ * Pointer to the mlx5 list.
+ * @param ctx
+ * Common context parameter used by entry callback function.
+ *
+ * @return
+ * Pointer of the list entry if found, NULL otherwise.
+ */
+struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
+ void *ctx);
+
+/**
+ * Reuse or create an entry to the mlx5 list.
+ *
+ * @param list
+ * Pointer to the hast list table.
+ * @param ctx
+ * Common context parameter used by callback function.
+ *
+ * @return
+ * registered entry on success, NULL otherwise
+ */
+struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
+ void *ctx);
+
+/**
+ * Remove an entry from the mlx5 list.
+ *
+ * User should guarantee the validity of the entry.
+ *
+ * @param list
+ * Pointer to the hast list.
+ * @param entry
+ * Entry to be removed from the mlx5 list table.
+ * @return
+ * 0 on entry removed, 1 on entry still referenced.
*/
-typedef struct mlx5_hlist_entry *(*mlx5_hlist_create_cb)
- (struct mlx5_hlist *list,
- uint64_t key, void *ctx);
+int mlx5_list_unregister(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
-/* Hash list bucket head. */
+/**
+ * Destroy the mlx5 list.
+ *
+ * @param list
+ * Pointer to the mlx5 list.
+ */
+void mlx5_list_destroy(struct mlx5_list *list);
+
+/**
+ * Get entry number from the mlx5 list.
+ *
+ * @param list
+ * Pointer to the hast list.
+ * @return
+ * mlx5 list entry number.
+ */
+uint32_t
+mlx5_list_get_entry_num(struct mlx5_list *list);
+
+/********************* Hash List **********************/
+
+/* Hash list bucket. */
struct mlx5_hlist_bucket {
- struct mlx5_hlist_head head; /* List head. */
- rte_rwlock_t lock; /* Bucket lock. */
- uint32_t gen_cnt; /* List modification will update generation count. */
+ struct mlx5_list l;
} __rte_cache_aligned;
/**
* Hash list table structure
*
- * Entry in hash list could be reused if entry already exists, reference
- * count will increase and the existing entry returns.
- *
- * When destroy an entry from list, decrease reference count and only
- * destroy when no further reference.
+ * The hash list bucket using the mlx5_list object for managing.
*/
struct mlx5_hlist {
- char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
- /**< number of heads, need to be power of 2. */
- uint32_t table_sz;
- uint32_t entry_sz; /**< Size of entry, used to allocate entry. */
- /**< mask to get the index of the list heads. */
- uint32_t mask;
- bool direct_key; /* Use the new entry key directly as hash index. */
- bool write_most; /* List mostly used for append new or destroy. */
- void *ctx;
- mlx5_hlist_create_cb cb_create; /**< entry create callback. */
- mlx5_hlist_match_cb cb_match; /**< entry match callback. */
- mlx5_hlist_remove_cb cb_remove; /**< entry remove callback. */
+ uint32_t mask; /* A mask for the bucket index range. */
+ uint8_t flags;
+ bool direct_key; /* Whether to use the key directly as hash index. */
+ bool lcores_share; /* Whether to share objects between the lcores. */
struct mlx5_hlist_bucket buckets[] __rte_cache_aligned;
- /**< list bucket arrays. */
};
/**
@@ -123,23 +238,33 @@ struct mlx5_hlist {
* Heads array size of the hash list.
* @param entry_size
* Entry size to allocate if cb_create not specified.
- * @param flags
- * The hash list attribute flags.
+ * @param direct key
+ * Whether to use the key directly as hash index.
+ * @param lcores_share
+ * Whether to share objects between the lcores.
+ * @param ctx
+ * The hlist instance context.
* @param cb_create
* Callback function for entry create.
* @param cb_match
* Callback function for entry match.
- * @param cb_destroy
- * Callback function for entry destroy.
+ * @param cb_remove
+ * Callback function for entry remove.
+ * @param cb_clone
+ * Callback function for entry clone.
+ * @param cb_clone_free
+ * Callback function for entry clone free.
* @return
* Pointer of the hash list table created, NULL on failure.
*/
__rte_internal
struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size,
- uint32_t entry_size, uint32_t flags,
- mlx5_hlist_create_cb cb_create,
- mlx5_hlist_match_cb cb_match,
- mlx5_hlist_remove_cb cb_destroy);
+ bool direct_key, bool lcores_share,
+ void *ctx, mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove,
+ mlx5_list_clone_cb cb_clone,
+ mlx5_list_clone_free_cb cb_clone_free);
/**
* Search an entry matching the key.
@@ -158,7 +283,7 @@ struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size,
* Pointer of the hlist entry if found, NULL otherwise.
*/
__rte_internal
-struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key,
+struct mlx5_list_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key,
void *ctx);
/**
@@ -177,7 +302,7 @@ struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key,
* registered entry on success, NULL otherwise
*/
__rte_internal
-struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
+struct mlx5_list_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
void *ctx);
/**
@@ -192,7 +317,7 @@ struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
* 0 on entry removed, 1 on entry still referenced.
*/
__rte_internal
-int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
+int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry);
/**
* Destroy the hash list table, all the entries already inserted into the lists
@@ -261,7 +261,7 @@ static int
mlx5_alloc_shared_dr(struct mlx5_priv *priv)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
- char s[MLX5_HLIST_NAMESIZE] __rte_unused;
+ char s[MLX5_NAME_SIZE] __rte_unused;
int err;
MLX5_ASSERT(sh && sh->refcnt);
@@ -274,7 +274,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/* Init port id action list. */
snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
- sh->port_id_action_list = mlx5_list_create(s, sh,
+ sh->port_id_action_list = mlx5_list_create(s, sh, true,
flow_dv_port_id_create_cb,
flow_dv_port_id_match_cb,
flow_dv_port_id_remove_cb,
@@ -284,7 +284,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init push vlan action list. */
snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
- sh->push_vlan_action_list = mlx5_list_create(s, sh,
+ sh->push_vlan_action_list = mlx5_list_create(s, sh, true,
flow_dv_push_vlan_create_cb,
flow_dv_push_vlan_match_cb,
flow_dv_push_vlan_remove_cb,
@@ -294,7 +294,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init sample action list. */
snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
- sh->sample_action_list = mlx5_list_create(s, sh,
+ sh->sample_action_list = mlx5_list_create(s, sh, true,
flow_dv_sample_create_cb,
flow_dv_sample_match_cb,
flow_dv_sample_remove_cb,
@@ -304,7 +304,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init dest array action list. */
snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
- sh->dest_array_list = mlx5_list_create(s, sh,
+ sh->dest_array_list = mlx5_list_create(s, sh, true,
flow_dv_dest_array_create_cb,
flow_dv_dest_array_match_cb,
flow_dv_dest_array_remove_cb,
@@ -314,44 +314,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Create tags hash list table. */
snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
- sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
- MLX5_HLIST_WRITE_MOST,
- flow_dv_tag_create_cb,
+ sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, false,
+ false, sh, flow_dv_tag_create_cb,
flow_dv_tag_match_cb,
- flow_dv_tag_remove_cb);
+ flow_dv_tag_remove_cb,
+ flow_dv_tag_clone_cb,
+ flow_dv_tag_clone_free_cb);
if (!sh->tag_table) {
DRV_LOG(ERR, "tags with hash creation failed.");
err = ENOMEM;
goto error;
}
- sh->tag_table->ctx = sh;
snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name);
sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
- 0, MLX5_HLIST_WRITE_MOST |
- MLX5_HLIST_DIRECT_KEY,
+ true, false, sh,
flow_dv_modify_create_cb,
flow_dv_modify_match_cb,
- flow_dv_modify_remove_cb);
+ flow_dv_modify_remove_cb,
+ flow_dv_modify_clone_cb,
+ flow_dv_modify_clone_free_cb);
if (!sh->modify_cmds) {
DRV_LOG(ERR, "hdr modify hash creation failed");
err = ENOMEM;
goto error;
}
- sh->modify_cmds->ctx = sh;
snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
sh->encaps_decaps = mlx5_hlist_create(s,
MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
- 0, MLX5_HLIST_DIRECT_KEY |
- MLX5_HLIST_WRITE_MOST,
+ true, true, sh,
flow_dv_encap_decap_create_cb,
flow_dv_encap_decap_match_cb,
- flow_dv_encap_decap_remove_cb);
+ flow_dv_encap_decap_remove_cb,
+ flow_dv_encap_decap_clone_cb,
+ flow_dv_encap_decap_clone_free_cb);
if (!sh->encaps_decaps) {
DRV_LOG(ERR, "encap decap hash creation failed");
err = ENOMEM;
goto error;
}
- sh->encaps_decaps->ctx = sh;
#endif
#ifdef HAVE_MLX5DV_DR
void *domain;
@@ -1748,7 +1748,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
- priv->hrxqs = mlx5_list_create("hrxq", eth_dev, mlx5_hrxq_create_cb,
+ priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
+ mlx5_hrxq_create_cb,
mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb,
mlx5_hrxq_clone_cb,
@@ -1780,15 +1781,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->sh->dv_regc0_mask) {
priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
MLX5_FLOW_MREG_HTABLE_SZ,
- 0, 0,
+ false, true, eth_dev,
flow_dv_mreg_create_cb,
flow_dv_mreg_match_cb,
- flow_dv_mreg_remove_cb);
+ flow_dv_mreg_remove_cb,
+ flow_dv_mreg_clone_cb,
+ flow_dv_mreg_clone_free_cb);
if (!priv->mreg_cp_tbl) {
err = ENOMEM;
goto error;
}
- priv->mreg_cp_tbl->ctx = eth_dev;
}
rte_spinlock_init(&priv->shared_act_sl);
mlx5_flow_counter_mode_config(eth_dev);
@@ -1358,20 +1358,22 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
/* Tables are only used in DV and DR modes. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_dev_ctx_shared *sh = priv->sh;
- char s[MLX5_HLIST_NAMESIZE];
+ char s[MLX5_NAME_SIZE];
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
- 0, 0, flow_dv_tbl_create_cb,
+ false, true, sh,
+ flow_dv_tbl_create_cb,
flow_dv_tbl_match_cb,
- flow_dv_tbl_remove_cb);
+ flow_dv_tbl_remove_cb,
+ flow_dv_tbl_clone_cb,
+ flow_dv_tbl_clone_free_cb);
if (!sh->flow_tbls) {
DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
- sh->flow_tbls->ctx = sh;
#ifndef HAVE_MLX5DV_DR
struct rte_flow_error error;
struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
@@ -84,6 +84,7 @@ struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
struct rte_flow_error *error;
void *data;
+ void *data2;
};
/* Device attributes used in mlx5 PMD */
@@ -3983,28 +3983,27 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx);
int
-flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+flow_dv_mreg_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
+ container_of(entry, typeof(*mcp_res), hlist_ent);
- return mcp_res->mark_id != key;
+ return mcp_res->mark_id != *(uint32_t *)(ctx->data);
}
-struct mlx5_hlist_entry *
-flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct rte_flow_error *error = ctx->error;
uint32_t idx = 0;
int ret;
- uint32_t mark_id = key;
+ uint32_t mark_id = *(uint32_t *)(ctx->data);
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
.ingress = 1,
@@ -4110,6 +4109,36 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
return &mcp_res->hlist_ent;
}
+struct mlx5_list_entry *
+flow_dv_mreg_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
+
+ mcp_res = mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ memcpy(mcp_res, oentry, sizeof(*mcp_res));
+ mcp_res->idx = idx;
+ return &mcp_res->hlist_ent;
+}
+
+void
+flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+}
+
/**
* Add a flow of copying flow metadata registers in RX_CP_TBL.
*
@@ -4140,10 +4169,11 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
+ .data = &mark_id,
};
/* Check if already registered. */
@@ -4156,11 +4186,11 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
}
void
-flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
+flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
struct mlx5_flow_mreg_copy_resource *mcp_res =
- container_of(entry, typeof(*mcp_res), hlist_ent);
- struct rte_eth_dev *dev = list->ctx;
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
MLX5_ASSERT(mcp_res->rix_flow);
@@ -4206,14 +4236,17 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
static void
flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID, NULL);
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
if (!entry)
return;
mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
@@ -4239,6 +4272,8 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_flow_cb_ctx ctx;
+ uint32_t mark_id;
/* Check whether extensive metadata feature is engaged. */
if (!priv->config.dv_flow_en ||
@@ -4250,9 +4285,11 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
* Add default mreg copy flow may be called multiple time, but
* only be called once in stop. Avoid register it twice.
*/
- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
+ mark_id = MLX5_DEFAULT_COPY_ID;
+ ctx.data = &mark_id;
+ if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
return 0;
- mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
+ mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
if (!mcp_res)
return -rte_errno;
return 0;
@@ -8350,7 +8387,7 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
union tunnel_offload_mark mbits = { .val = mark };
union mlx5_flow_tbl_key table_key = {
{
@@ -8362,16 +8399,20 @@ tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
.is_egress = 0,
}
};
- he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &table_key.v64,
+ };
+
+ he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, &ctx);
return he ?
container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
}
static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+mlx5_flow_tunnel_grp2tbl_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
@@ -8380,26 +8421,26 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
}
static int
-mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx __rte_unused)
+mlx5_flow_tunnel_grp2tbl_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
}
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
- void *ctx __rte_unused)
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key tbl = {
- .val = key,
+ .val = *(uint64_t *)(ctx->data),
};
tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
@@ -8428,13 +8469,36 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
return NULL;
}
+static struct mlx5_list_entry *
+mlx5_flow_tunnel_grp2tbl_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx __rte_unused)
+{
+ struct tunnel_tbl_entry *tte = mlx5_malloc(MLX5_MEM_SYS, sizeof(*tte),
+ 0, SOCKET_ID_ANY);
+
+ if (!tte)
+ return NULL;
+ memcpy(tte, oentry, sizeof(*tte));
+ return &tte->hash;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+ mlx5_free(tte);
+}
+
static uint32_t
tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
struct rte_flow_error *error)
{
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
struct tunnel_tbl_entry *tte;
union tunnel_tbl_key key = {
.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
@@ -8442,9 +8506,12 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
};
struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
struct mlx5_hlist *group_hash;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = &key.val,
+ };
group_hash = tunnel ? tunnel->groups : thub->groups;
- he = mlx5_hlist_register(group_hash, key.val, NULL);
+ he = mlx5_hlist_register(group_hash, key.val, &ctx);
if (!he)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -8558,15 +8625,17 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
return NULL;
}
- tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
+ tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, false, true,
+ priv->sh,
mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!tunnel->groups) {
mlx5_ipool_free(ipool, id);
return NULL;
}
- tunnel->groups->ctx = priv->sh;
/* initiate new PMD tunnel */
memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
tunnel->tunnel_id = id;
@@ -8666,15 +8735,17 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
LIST_INIT(&thub->tunnels);
rte_spinlock_init(&thub->sl);
thub->groups = mlx5_hlist_create("flow groups",
- rte_align32pow2(MLX5_MAX_TABLES), 0,
- 0, mlx5_flow_tunnel_grp2tbl_create_cb,
+ rte_align32pow2(MLX5_MAX_TABLES),
+ false, true, sh,
+ mlx5_flow_tunnel_grp2tbl_create_cb,
mlx5_flow_tunnel_grp2tbl_match_cb,
- mlx5_flow_tunnel_grp2tbl_remove_cb);
+ mlx5_flow_tunnel_grp2tbl_remove_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_cb,
+ mlx5_flow_tunnel_grp2tbl_clone_free_cb);
if (!thub->groups) {
err = -rte_errno;
goto err;
}
- thub->groups->ctx = sh;
sh->tunnel_hub = thub;
return 0;
@@ -480,7 +480,7 @@ struct mlx5_flow_dv_matcher {
/* Encap/decap resource structure. */
struct mlx5_flow_dv_encap_decap_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/* Pointer to next element. */
uint32_t refcnt; /**< Reference counter. */
void *action;
@@ -495,7 +495,7 @@ struct mlx5_flow_dv_encap_decap_resource {
/* Tag resource structure. */
struct mlx5_flow_dv_tag_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/**< hash list entry for tag resource, tag value as the key. */
void *action;
/**< Tag action object. */
@@ -519,7 +519,7 @@ struct mlx5_flow_dv_tag_resource {
/* Modify resource structure */
struct mlx5_flow_dv_modify_hdr_resource {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
void *action; /**< Modify header action object. */
/* Key area for hash list matching: */
uint8_t ft_type; /**< Flow table type, Rx or Tx. */
@@ -569,7 +569,7 @@ struct mlx5_flow_mreg_copy_resource {
* - Key is 32/64-bit MARK action ID.
* - MUST be the first entry.
*/
- struct mlx5_hlist_entry hlist_ent;
+ struct mlx5_list_entry hlist_ent;
LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
/* List entry for device flows. */
uint32_t idx;
@@ -586,7 +586,7 @@ struct mlx5_flow_tbl_tunnel_prm {
/* Table data structure of the hash organization. */
struct mlx5_flow_tbl_data_entry {
- struct mlx5_hlist_entry entry;
+ struct mlx5_list_entry entry;
/**< hash list entry, 64-bits key inside. */
struct mlx5_flow_tbl_resource tbl;
/**< flow table resource. */
@@ -926,7 +926,7 @@ struct mlx5_flow_tunnel_hub {
/* convert jump group to flow table ID in tunnel rules */
struct tunnel_tbl_entry {
- struct mlx5_hlist_entry hash;
+ struct mlx5_list_entry hash;
uint32_t flow_table;
uint32_t tunnel_id;
uint32_t group;
@@ -1573,110 +1573,105 @@ int mlx5_action_handle_flush(struct rte_eth_dev *dev);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
-/* Hash list callbacks for flow tables: */
-struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *entry_ctx);
-int flow_dv_tbl_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
+int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
+void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *entry_ctx);
+void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
uint32_t table_level, uint8_t egress, uint8_t transfer,
bool external, const struct mlx5_flow_tunnel *tunnel,
uint32_t group_id, uint8_t dummy,
uint32_t table_id, struct rte_flow_error *error);
-struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *cb_ctx);
-int flow_dv_tag_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
+int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_tag_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_modify_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx);
-struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *ctx);
-void flow_dv_modify_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *ctx);
-int flow_dv_mreg_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry, uint64_t key,
+void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *cb_ctx);
+void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
+int flow_dv_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_modify_create_cb(void *tool_ctx, void *ctx);
+void flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_modify_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *oentry,
+ void *ctx);
+void flow_dv_modify_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
+int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_encap_decap_match_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry,
- uint64_t key, void *cb_ctx);
-struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
- uint64_t key, void *cb_ctx);
-void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry);
-
-int flow_dv_matcher_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *ctx);
-struct mlx5_list_entry *flow_dv_matcher_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *ctx);
-void flow_dv_matcher_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
+void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry,
+ void *ctx);
+void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
-int flow_dv_port_id_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-struct mlx5_list_entry *flow_dv_port_id_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *cb_ctx);
-void flow_dv_port_id_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-struct mlx5_list_entry *flow_dv_port_id_clone_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
+int flow_dv_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_port_id_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused);
-int flow_dv_push_vlan_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-struct mlx5_list_entry *flow_dv_push_vlan_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *cb_ctx);
-void flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-struct mlx5_list_entry *flow_dv_push_vlan_clone_cb
- (struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-void flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-
-int flow_dv_sample_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-struct mlx5_list_entry *flow_dv_sample_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *cb_ctx);
-void flow_dv_sample_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-struct mlx5_list_entry *flow_dv_sample_clone_cb
- (struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-void flow_dv_sample_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-
-int flow_dv_dest_array_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-struct mlx5_list_entry *flow_dv_dest_array_create_cb(struct mlx5_list *list,
+struct mlx5_list_entry *flow_dv_encap_decap_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_encap_decap_remove_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx,
struct mlx5_list_entry *entry,
void *cb_ctx);
-void flow_dv_dest_array_remove_cb(struct mlx5_list *list,
+void flow_dv_encap_decap_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *ctx);
+struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx);
+void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
+int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx);
+void flow_dv_port_id_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused);
+
+int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_sample_clone_free_cb(void *tool_ctx,
struct mlx5_list_entry *entry);
-struct mlx5_list_entry *flow_dv_dest_array_clone_cb
- (struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *cb_ctx);
-void flow_dv_dest_array_clone_free_cb(struct mlx5_list *list,
+
+int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx);
+struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
+ void *cb_ctx);
+void flow_dv_dest_array_remove_cb(void *tool_ctx,
struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry);
+
struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
uint32_t age_idx);
int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
@@ -3580,25 +3580,9 @@ flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
return 0;
}
-/**
- * Match encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param key
- * Key of the new entry.
- * @param ctx_cb
- * Pointer to new encap_decap resource.
- *
- * @return
- * 0 on matching, none-zero otherwise.
- */
int
-flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key __rte_unused, void *cb_ctx)
+flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
@@ -3617,25 +3601,10 @@ flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
return -1;
}
-/**
- * Allocate encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param ctx_cb
- * Pointer to new encap_decap resource.
- *
- * @return
- * 0 on matching, none-zero otherwise.
- */
-struct mlx5_hlist_entry *
-flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
- uint64_t key __rte_unused,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *domain;
struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
@@ -3673,6 +3642,38 @@ flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
return &resource->entry;
}
+struct mlx5_list_entry *
+flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ uint32_t idx;
+
+ cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ &idx);
+ if (!cache_resource) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ memcpy(cache_resource, oentry, sizeof(*cache_resource));
+ cache_resource->idx = idx;
+ return &cache_resource->entry;
+}
+
+void
+flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_dv_encap_decap_resource *res =
+ container_of(entry, typeof(*res), entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
+}
+
/**
* Find existing encap/decap resource or create and register a new one.
*
@@ -3697,7 +3698,7 @@ flow_dv_encap_decap_resource_register
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
union {
struct {
uint32_t ft_type:8;
@@ -3774,23 +3775,21 @@ flow_dv_jump_tbl_resource_register
}
int
-flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
struct mlx5_flow_dv_port_id_action_resource *res =
- container_of(entry, typeof(*res), entry);
+ container_of(entry, typeof(*res), entry);
return ref->port_id != res->port_id;
}
struct mlx5_list_entry *
-flow_dv_port_id_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
struct mlx5_flow_dv_port_id_action_resource *resource;
@@ -3821,11 +3820,11 @@ flow_dv_port_id_create_cb(struct mlx5_list *list,
}
struct mlx5_list_entry *
-flow_dv_port_id_clone_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_port_id_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *resource;
uint32_t idx;
@@ -3843,12 +3842,11 @@ flow_dv_port_id_clone_cb(struct mlx5_list *list,
}
void
-flow_dv_port_id_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_port_id_action_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
}
@@ -3893,23 +3891,21 @@ flow_dv_port_id_action_resource_register
}
int
-flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry, void *cb_ctx)
+flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
struct mlx5_flow_dv_push_vlan_action_resource *res =
- container_of(entry, typeof(*res), entry);
+ container_of(entry, typeof(*res), entry);
return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
}
struct mlx5_list_entry *
-flow_dv_push_vlan_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
struct mlx5_flow_dv_push_vlan_action_resource *resource;
@@ -3946,11 +3942,11 @@ flow_dv_push_vlan_create_cb(struct mlx5_list *list,
}
struct mlx5_list_entry *
-flow_dv_push_vlan_clone_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_push_vlan_clone_cb(void *tool_ctx,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *resource;
uint32_t idx;
@@ -3968,12 +3964,11 @@ flow_dv_push_vlan_clone_cb(struct mlx5_list *list,
}
void
-flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
}
@@ -5294,30 +5289,14 @@ flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
return ret;
}
-/**
- * Match modify-header resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- * @param key
- * Key of the new entry.
- * @param ctx
- * Pointer to new modify-header resource.
- *
- * @return
- * 0 on matching, non-zero otherwise.
- */
int
-flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry,
- uint64_t key __rte_unused, void *cb_ctx)
+flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
struct mlx5_flow_dv_modify_hdr_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
key_len += ref->actions_num * sizeof(ref->actions[0]);
@@ -5325,11 +5304,10 @@ flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
memcmp(&ref->ft_type, &resource->ft_type, key_len);
}
-struct mlx5_hlist_entry *
-flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
- void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *ns;
struct mlx5_flow_dv_modify_hdr_resource *entry;
@@ -5368,6 +5346,33 @@ flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
return &entry->entry;
}
+struct mlx5_list_entry *
+flow_dv_modify_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *oentry, void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_modify_hdr_resource *entry;
+ struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
+ uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
+
+ entry = mlx5_malloc(0, sizeof(*entry) + data_len, 0, SOCKET_ID_ANY);
+ if (!entry) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ return NULL;
+ }
+ memcpy(entry, oentry, sizeof(*entry) + data_len);
+ return &entry->entry;
+}
+
+void
+flow_dv_modify_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ mlx5_free(entry);
+}
+
/**
* Validate the sample action.
*
@@ -5639,7 +5644,7 @@ flow_dv_modify_hdr_resource_register
uint32_t key_len = sizeof(*resource) -
offsetof(typeof(*resource), ft_type) +
resource->actions_num * sizeof(resource->actions[0]);
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.error = error,
.data = resource,
@@ -9915,7 +9920,7 @@ flow_dv_matcher_enable(uint32_t *match_criteria)
}
static struct mlx5_list_entry *
-flow_dv_matcher_clone_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -9938,22 +9943,22 @@ flow_dv_matcher_clone_cb(struct mlx5_list *list __rte_unused,
}
static void
-flow_dv_matcher_clone_free_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
mlx5_free(entry);
}
-struct mlx5_hlist_entry *
-flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
+struct mlx5_list_entry *
+flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
struct mlx5_flow_tbl_data_entry *tbl_data;
- struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
+ struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
struct rte_flow_error *error = ctx->error;
- union mlx5_flow_tbl_key key = { .v64 = key64 };
+ union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
struct mlx5_flow_tbl_resource *tbl;
void *domain;
uint32_t idx = 0;
@@ -10010,7 +10015,7 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
key.level, key.id);
- tbl_data->matchers = mlx5_list_create(matcher_name, sh,
+ tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
flow_dv_matcher_create_cb,
flow_dv_matcher_match_cb,
flow_dv_matcher_remove_cb,
@@ -10030,13 +10035,13 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
}
int
-flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry, uint64_t key64,
- void *cb_ctx __rte_unused)
+flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+ void *cb_ctx)
{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
- union mlx5_flow_tbl_key key = { .v64 = key64 };
+ union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
return tbl_data->level != key.level ||
tbl_data->id != key.id ||
@@ -10045,6 +10050,39 @@ flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
tbl_data->is_egress != !!key.is_egress;
}
+struct mlx5_list_entry *
+flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ struct rte_flow_error *error = ctx->error;
+ uint32_t idx = 0;
+
+ tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
+ if (!tbl_data) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow table data entry");
+ return NULL;
+ }
+ memcpy(tbl_data, oentry, sizeof(*tbl_data));
+ tbl_data->idx = idx;
+ return &tbl_data->entry;
+}
+
+void
+flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
+}
+
/**
* Get a flow table.
*
@@ -10095,9 +10133,10 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
- .data = &tt_prm,
+ .data = &table_key.v64,
+ .data2 = &tt_prm,
};
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_tbl_data_entry *tbl_data;
entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
@@ -10116,12 +10155,11 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
}
void
-flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_tbl_data_entry *tbl_data =
- container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
+ container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
MLX5_ASSERT(entry && sh);
if (tbl_data->jump.action)
@@ -10129,7 +10167,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
if (tbl_data->tbl.obj)
mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
if (tbl_data->tunnel_offload && tbl_data->external) {
- struct mlx5_hlist_entry *he;
+ struct mlx5_list_entry *he;
struct mlx5_hlist *tunnel_grp_hash;
struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
union tunnel_tbl_key tunnel_key = {
@@ -10138,11 +10176,14 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
.group = tbl_data->group_id
};
uint32_t table_level = tbl_data->level;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = (void *)&tunnel_key.val,
+ };
tunnel_grp_hash = tbl_data->tunnel ?
tbl_data->tunnel->groups :
thub->groups;
- he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
+ he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
if (he)
mlx5_hlist_unregister(tunnel_grp_hash, he);
DRV_LOG(DEBUG,
@@ -10181,7 +10222,7 @@ flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
}
int
-flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -10196,11 +10237,9 @@ flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_matcher_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_matcher *ref = ctx->data;
struct mlx5_flow_dv_matcher *resource;
@@ -10297,29 +10336,29 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
return 0;
}
-struct mlx5_hlist_entry *
-flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
+struct mlx5_list_entry *
+flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct rte_flow_error *error = ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_tag_resource *entry;
uint32_t idx = 0;
int ret;
entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
if (!entry) {
- rte_flow_error_set(error, ENOMEM,
+ rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
return NULL;
}
entry->idx = idx;
- entry->tag_id = key;
- ret = mlx5_flow_os_create_flow_action_tag(key,
+ entry->tag_id = *(uint32_t *)(ctx->data);
+ ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
&entry->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
- rte_flow_error_set(error, ENOMEM,
+ rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
return NULL;
@@ -10328,14 +10367,45 @@ flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
}
int
-flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry, uint64_t key,
- void *cb_ctx __rte_unused)
+flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_tag_resource *tag =
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+
+ return *(uint32_t *)(ctx->data) != tag->tag_id;
+}
+
+struct mlx5_list_entry *
+flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+ void *cb_ctx)
{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_dv_tag_resource *entry;
+ uint32_t idx = 0;
+
+ entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
+ if (!entry) {
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate tag resource memory");
+ return NULL;
+ }
+ memcpy(entry, oentry, sizeof(*entry));
+ entry->idx = idx;
+ return &entry->entry;
+}
+
+void
+flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_tag_resource *tag =
- container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
- return key != tag->tag_id;
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
}
/**
@@ -10362,9 +10432,13 @@ flow_dv_tag_resource_register
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_tag_resource *resource;
- struct mlx5_hlist_entry *entry;
+ struct mlx5_list_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .error = error,
+ .data = &tag_be24,
+ };
- entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
+ entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, &ctx);
if (entry) {
resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
entry);
@@ -10376,12 +10450,11 @@ flow_dv_tag_resource_register
}
void
-flow_dv_tag_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_tag_resource *tag =
- container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
+ container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
MLX5_ASSERT(tag && sh && tag->action);
claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
@@ -10696,7 +10769,7 @@ flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
}
int
-flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -10725,9 +10798,7 @@ flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
@@ -10814,7 +10885,7 @@ flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_sample_clone_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
@@ -10840,16 +10911,15 @@ flow_dv_sample_clone_cb(struct mlx5_list *list __rte_unused,
}
void
-flow_dv_sample_clone_free_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_sample_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
struct rte_eth_dev *dev = resource->dev;
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
- resource->idx);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
}
/**
@@ -10892,14 +10962,14 @@ flow_dv_sample_resource_register(struct rte_eth_dev *dev,
}
int
-flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
+flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
struct rte_eth_dev *dev = ctx->dev;
struct mlx5_flow_dv_dest_array_resource *resource =
- container_of(entry, typeof(*resource), entry);
+ container_of(entry, typeof(*resource), entry);
uint32_t idx = 0;
if (ctx_resource->num_of_dest == resource->num_of_dest &&
@@ -10921,9 +10991,7 @@ flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
@@ -11028,9 +11096,9 @@ flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_dest_array_clone_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry __rte_unused,
+ void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
@@ -11056,8 +11124,8 @@ flow_dv_dest_array_clone_cb(struct mlx5_list *list __rte_unused,
}
void
-flow_dv_dest_array_clone_free_cb(struct mlx5_list *list __rte_unused,
- struct mlx5_list_entry *entry)
+flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_dest_array_resource *resource =
container_of(entry, typeof(*resource), entry);
@@ -13531,7 +13599,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
}
void
-flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_matcher *resource = container_of(entry,
@@ -13568,19 +13636,10 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
return ret;
}
-/**
- * Release encap_decap resource.
- *
- * @param list
- * Pointer to the hash list.
- * @param entry
- * Pointer to exist resource entry object.
- */
void
-flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
- struct mlx5_hlist_entry *entry)
+flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_encap_decap_resource *res =
container_of(entry, typeof(*res), entry);
@@ -13640,8 +13699,8 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
- struct mlx5_hlist_entry *entry)
+flow_dv_modify_remove_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_modify_hdr_resource *res =
container_of(entry, typeof(*res), entry);
@@ -13673,10 +13732,9 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_port_id_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_port_id_action_resource *resource =
container_of(entry, typeof(*resource), entry);
@@ -13730,10 +13788,9 @@ flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
}
void
-flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct mlx5_dev_ctx_shared *sh = list->ctx;
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *resource =
container_of(entry, typeof(*resource), entry);
@@ -13802,7 +13859,7 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
@@ -13850,7 +13907,7 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
+flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_dest_array_resource *resource =
@@ -222,17 +222,14 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
bool standalone);
-struct mlx5_list_entry *mlx5_hrxq_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused, void *cb_ctx);
-int mlx5_hrxq_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
+struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
+int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
-void mlx5_hrxq_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-struct mlx5_list_entry *mlx5_hrxq_clone_cb(struct mlx5_list *list,
+void mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
struct mlx5_list_entry *entry,
void *cb_ctx __rte_unused);
-void mlx5_hrxq_clone_free_cb(struct mlx5_list *list,
+void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry);
uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc);
@@ -2093,25 +2093,10 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
return ret;
}
-/**
- * Match an Rx Hash queue.
- *
- * @param list
- * mlx5 list pointer.
- * @param entry
- * Hash queue entry pointer.
- * @param cb_ctx
- * Context of the callback function.
- *
- * @return
- * 0 if match, none zero if not match.
- */
int
-mlx5_hrxq_match_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *cb_ctx)
+mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -2251,10 +2236,9 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
* Hash queue entry pointer.
*/
void
-mlx5_hrxq_remove_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
__mlx5_hrxq_remove(dev, hrxq);
@@ -2305,25 +2289,10 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
return NULL;
}
-/**
- * Create an Rx Hash queue.
- *
- * @param list
- * mlx5 list pointer.
- * @param entry
- * Hash queue entry pointer.
- * @param cb_ctx
- * Context of the callback function.
- *
- * @return
- * queue entry on success, NULL otherwise.
- */
struct mlx5_list_entry *
-mlx5_hrxq_create_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry __rte_unused,
- void *cb_ctx)
+mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq;
@@ -2333,11 +2302,10 @@ mlx5_hrxq_create_cb(struct mlx5_list *list,
}
struct mlx5_list_entry *
-mlx5_hrxq_clone_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry,
+mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx __rte_unused)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx = 0;
@@ -2351,10 +2319,9 @@ mlx5_hrxq_clone_cb(struct mlx5_list *list,
}
void
-mlx5_hrxq_clone_free_cb(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
+mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -8,257 +8,6 @@
#include "mlx5_utils.h"
-
-/********************* mlx5 list ************************/
-
-struct mlx5_list *
-mlx5_list_create(const char *name, void *ctx,
- mlx5_list_create_cb cb_create,
- mlx5_list_match_cb cb_match,
- mlx5_list_remove_cb cb_remove,
- mlx5_list_clone_cb cb_clone,
- mlx5_list_clone_free_cb cb_clone_free)
-{
- struct mlx5_list *list;
- int i;
-
- if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
- !cb_clone_free) {
- rte_errno = EINVAL;
- return NULL;
- }
- list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
- if (!list)
- return NULL;
- if (name)
- snprintf(list->name, sizeof(list->name), "%s", name);
- list->ctx = ctx;
- list->cb_create = cb_create;
- list->cb_match = cb_match;
- list->cb_remove = cb_remove;
- list->cb_clone = cb_clone;
- list->cb_clone_free = cb_clone_free;
- rte_rwlock_init(&list->lock);
- DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
- for (i = 0; i <= RTE_MAX_LCORE; i++)
- LIST_INIT(&list->cache[i].h);
- return list;
-}
-
-static struct mlx5_list_entry *
-__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
-{
- struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
- uint32_t ret;
-
- while (entry != NULL) {
- if (list->cb_match(list, entry, ctx) == 0) {
- if (reuse) {
- ret = __atomic_add_fetch(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED) - 1;
- DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
- list->name, (void *)entry,
- entry->ref_cnt);
- } else if (lcore_index < RTE_MAX_LCORE) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
- }
- if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
- return entry;
- if (reuse && ret == 0)
- entry->ref_cnt--; /* Invalid entry. */
- }
- entry = LIST_NEXT(entry, next);
- }
- return NULL;
-}
-
-struct mlx5_list_entry *
-mlx5_list_lookup(struct mlx5_list *list, void *ctx)
-{
- struct mlx5_list_entry *entry = NULL;
- int i;
-
- rte_rwlock_read_lock(&list->lock);
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- entry = __list_lookup(list, i, ctx, false);
- if (entry)
- break;
- }
- rte_rwlock_read_unlock(&list->lock);
- return entry;
-}
-
-static struct mlx5_list_entry *
-mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
- struct mlx5_list_entry *gentry, void *ctx)
-{
- struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
-
- if (unlikely(!lentry))
- return NULL;
- lentry->ref_cnt = 1u;
- lentry->gentry = gentry;
- lentry->lcore_idx = (uint32_t)lcore_index;
- LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
- return lentry;
-}
-
-static void
-__list_cache_clean(struct mlx5_list *list, int lcore_index)
-{
- struct mlx5_list_cache *c = &list->cache[lcore_index];
- struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
-
- while (inv_cnt != 0 && entry != NULL) {
- struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
-
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
- LIST_REMOVE(entry, next);
- list->cb_clone_free(list, entry);
- inv_cnt--;
- }
- entry = nentry;
- }
-}
-
-struct mlx5_list_entry *
-mlx5_list_register(struct mlx5_list *list, void *ctx)
-{
- struct mlx5_list_entry *entry, *local_entry;
- volatile uint32_t prev_gen_cnt = 0;
- int lcore_index = rte_lcore_index(rte_lcore_id());
-
- MLX5_ASSERT(list);
- MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
- if (unlikely(lcore_index == -1)) {
- rte_errno = ENOTSUP;
- return NULL;
- }
- /* 0. Free entries that was invalidated by other lcores. */
- __list_cache_clean(list, lcore_index);
- /* 1. Lookup in local cache. */
- local_entry = __list_lookup(list, lcore_index, ctx, true);
- if (local_entry)
- return local_entry;
- /* 2. Lookup with read lock on global list, reuse if found. */
- rte_rwlock_read_lock(&list->lock);
- entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
- if (likely(entry)) {
- rte_rwlock_read_unlock(&list->lock);
- return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
- }
- prev_gen_cnt = list->gen_cnt;
- rte_rwlock_read_unlock(&list->lock);
- /* 3. Prepare new entry for global list and for cache. */
- entry = list->cb_create(list, entry, ctx);
- if (unlikely(!entry))
- return NULL;
- local_entry = list->cb_clone(list, entry, ctx);
- if (unlikely(!local_entry)) {
- list->cb_remove(list, entry);
- return NULL;
- }
- entry->ref_cnt = 1u;
- local_entry->ref_cnt = 1u;
- local_entry->gentry = entry;
- local_entry->lcore_idx = (uint32_t)lcore_index;
- rte_rwlock_write_lock(&list->lock);
- /* 4. Make sure the same entry was not created before the write lock. */
- if (unlikely(prev_gen_cnt != list->gen_cnt)) {
- struct mlx5_list_entry *oentry = __list_lookup(list,
- RTE_MAX_LCORE,
- ctx, true);
-
- if (unlikely(oentry)) {
- /* 4.5. Found real race!!, reuse the old entry. */
- rte_rwlock_write_unlock(&list->lock);
- list->cb_remove(list, entry);
- list->cb_clone_free(list, local_entry);
- return mlx5_list_cache_insert(list, lcore_index, oentry,
- ctx);
- }
- }
- /* 5. Update lists. */
- LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
- list->gen_cnt++;
- rte_rwlock_write_unlock(&list->lock);
- LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
- __atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
- (void *)entry, entry->ref_cnt);
- return local_entry;
-}
-
-int
-mlx5_list_unregister(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
-{
- struct mlx5_list_entry *gentry = entry->gentry;
- int lcore_idx;
-
- if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
- return 1;
- lcore_idx = rte_lcore_index(rte_lcore_id());
- MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
- if (entry->lcore_idx == (uint32_t)lcore_idx) {
- LIST_REMOVE(entry, next);
- list->cb_clone_free(list, entry);
- } else if (likely(lcore_idx != -1)) {
- __atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
- __ATOMIC_RELAXED);
- } else {
- return 0;
- }
- if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
- return 1;
- rte_rwlock_write_lock(&list->lock);
- if (likely(gentry->ref_cnt == 0)) {
- LIST_REMOVE(gentry, next);
- rte_rwlock_write_unlock(&list->lock);
- list->cb_remove(list, gentry);
- __atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
- list->name, (void *)gentry);
- return 0;
- }
- rte_rwlock_write_unlock(&list->lock);
- return 1;
-}
-
-void
-mlx5_list_destroy(struct mlx5_list *list)
-{
- struct mlx5_list_entry *entry;
- int i;
-
- MLX5_ASSERT(list);
- for (i = 0; i <= RTE_MAX_LCORE; i++) {
- while (!LIST_EMPTY(&list->cache[i].h)) {
- entry = LIST_FIRST(&list->cache[i].h);
- LIST_REMOVE(entry, next);
- if (i == RTE_MAX_LCORE) {
- list->cb_remove(list, entry);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p "
- "destroyed.", list->name,
- (void *)entry);
- } else {
- list->cb_clone_free(list, entry);
- }
- }
- }
- mlx5_free(list);
-}
-
-uint32_t
-mlx5_list_get_entry_num(struct mlx5_list *list)
-{
- MLX5_ASSERT(list);
- return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
-}
-
/********************* Indexed pool **********************/
static inline void
@@ -296,203 +296,6 @@ log2above(unsigned int v)
return l + r;
}
-/************************ mlx5 list *****************************/
-
-/** Maximum size of string for naming. */
-#define MLX5_NAME_SIZE 32
-
-struct mlx5_list;
-
-/**
- * Structure of the entry in the mlx5 list, user should define its own struct
- * that contains this in order to store the data.
- */
-struct mlx5_list_entry {
- LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- uint32_t ref_cnt; /* 0 means, entry is invalid. */
- uint32_t lcore_idx;
- struct mlx5_list_entry *gentry;
-};
-
-struct mlx5_list_cache {
- LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
-} __rte_cache_aligned;
-
-/**
- * Type of callback function for entry removal.
- *
- * @param list
- * The mlx5 list.
- * @param entry
- * The entry in the list.
- */
-typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-
-/**
- * Type of function for user defined matching.
- *
- * @param list
- * The mlx5 list.
- * @param entry
- * The entry in the list.
- * @param ctx
- * The pointer to new entry context.
- *
- * @return
- * 0 if matching, non-zero number otherwise.
- */
-typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *ctx);
-
-typedef struct mlx5_list_entry *(*mlx5_list_clone_cb)
- (struct mlx5_list *list,
- struct mlx5_list_entry *entry, void *ctx);
-
-typedef void (*mlx5_list_clone_free_cb)(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-
-/**
- * Type of function for user defined mlx5 list entry creation.
- *
- * @param list
- * The mlx5 list.
- * @param entry
- * The new allocated entry, NULL if list entry size unspecified,
- * New entry has to be allocated in callback and return.
- * @param ctx
- * The pointer to new entry context.
- *
- * @return
- * Pointer of entry on success, NULL otherwise.
- */
-typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
- (struct mlx5_list *list,
- struct mlx5_list_entry *entry,
- void *ctx);
-
-/**
- * Linked mlx5 list structure.
- *
- * Entry in mlx5 list could be reused if entry already exists,
- * reference count will increase and the existing entry returns.
- *
- * When destroy an entry from list, decrease reference count and only
- * destroy when no further reference.
- *
- * Linked list is designed for limited number of entries,
- * read mostly, less modification.
- *
- * For huge amount of entries, please consider hash list.
- *
- */
-struct mlx5_list {
- char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
- volatile uint32_t gen_cnt;
- /* List modification will update generation count. */
- volatile uint32_t count; /* number of entries in list. */
- void *ctx; /* user objects target to callback. */
- rte_rwlock_t lock; /* read/write lock. */
- mlx5_list_create_cb cb_create; /**< entry create callback. */
- mlx5_list_match_cb cb_match; /**< entry match callback. */
- mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
- mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
- mlx5_list_clone_free_cb cb_clone_free;
- struct mlx5_list_cache cache[RTE_MAX_LCORE + 1];
- /* Lcore cache, last index is the global cache. */
-};
-
-/**
- * Create a mlx5 list.
- *
- * @param list
- * Pointer to the hast list table.
- * @param name
- * Name of the mlx5 list.
- * @param ctx
- * Pointer to the list context data.
- * @param cb_create
- * Callback function for entry create.
- * @param cb_match
- * Callback function for entry match.
- * @param cb_remove
- * Callback function for entry remove.
- * @return
- * List pointer on success, otherwise NULL.
- */
-struct mlx5_list *mlx5_list_create(const char *name, void *ctx,
- mlx5_list_create_cb cb_create,
- mlx5_list_match_cb cb_match,
- mlx5_list_remove_cb cb_remove,
- mlx5_list_clone_cb cb_clone,
- mlx5_list_clone_free_cb cb_clone_free);
-
-/**
- * Search an entry matching the key.
- *
- * Result returned might be destroyed by other thread, must use
- * this function only in main thread.
- *
- * @param list
- * Pointer to the mlx5 list.
- * @param ctx
- * Common context parameter used by entry callback function.
- *
- * @return
- * Pointer of the list entry if found, NULL otherwise.
- */
-struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
- void *ctx);
-
-/**
- * Reuse or create an entry to the mlx5 list.
- *
- * @param list
- * Pointer to the hast list table.
- * @param ctx
- * Common context parameter used by callback function.
- *
- * @return
- * registered entry on success, NULL otherwise
- */
-struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
- void *ctx);
-
-/**
- * Remove an entry from the mlx5 list.
- *
- * User should guarantee the validity of the entry.
- *
- * @param list
- * Pointer to the hast list.
- * @param entry
- * Entry to be removed from the mlx5 list table.
- * @return
- * 0 on entry removed, 1 on entry still referenced.
- */
-int mlx5_list_unregister(struct mlx5_list *list,
- struct mlx5_list_entry *entry);
-
-/**
- * Destroy the mlx5 list.
- *
- * @param list
- * Pointer to the mlx5 list.
- */
-void mlx5_list_destroy(struct mlx5_list *list);
-
-/**
- * Get entry number from the mlx5 list.
- *
- * @param list
- * Pointer to the hast list.
- * @return
- * mlx5 list entry number.
- */
-uint32_t
-mlx5_list_get_entry_num(struct mlx5_list *list);
-
/********************************* indexed pool *************************/
/**
@@ -608,7 +608,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
- priv->hrxqs = mlx5_list_create("hrxq", eth_dev,
+ priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
mlx5_hrxq_clone_free_cb);