@@ -272,27 +272,27 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* The resources below are only valid with DV support. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /* Init port id action cache list. */
- snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name);
- mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh,
+ /* Init port id action mlx5 list. */
+ snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
+ mlx5_list_create(&sh->port_id_action_list, s, 0, sh,
flow_dv_port_id_create_cb,
flow_dv_port_id_match_cb,
flow_dv_port_id_remove_cb);
- /* Init push vlan action cache list. */
- snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name);
- mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh,
+ /* Init push vlan action mlx5 list. */
+ snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
+ mlx5_list_create(&sh->push_vlan_action_list, s, 0, sh,
flow_dv_push_vlan_create_cb,
flow_dv_push_vlan_match_cb,
flow_dv_push_vlan_remove_cb);
- /* Init sample action cache list. */
- snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name);
- mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh,
+ /* Init sample action mlx5 list. */
+ snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
+ mlx5_list_create(&sh->sample_action_list, s, 0, sh,
flow_dv_sample_create_cb,
flow_dv_sample_match_cb,
flow_dv_sample_remove_cb);
- /* Init dest array action cache list. */
- snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name);
- mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh,
+ /* Init dest array action mlx5 list. */
+ snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
+ mlx5_list_create(&sh->dest_array_list, s, 0, sh,
flow_dv_dest_array_create_cb,
flow_dv_dest_array_match_cb,
flow_dv_dest_array_remove_cb);
@@ -509,8 +509,8 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
mlx5_release_tunnel_hub(sh, priv->dev_port);
sh->tunnel_hub = NULL;
}
- mlx5_cache_list_destroy(&sh->port_id_action_list);
- mlx5_cache_list_destroy(&sh->push_vlan_action_list);
+ mlx5_list_destroy(&sh->port_id_action_list);
+ mlx5_list_destroy(&sh->push_vlan_action_list);
mlx5_free_table_hash_list(priv);
}
@@ -1713,7 +1713,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
- mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
+ mlx5_list_create(&priv->hrxqs, "hrxq", 0, eth_dev,
mlx5_hrxq_create_cb,
mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb);
@@ -1774,7 +1774,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_l3t_destroy(priv->mtr_profile_tbl);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
- mlx5_cache_list_destroy(&priv->hrxqs);
+ mlx5_list_destroy(&priv->hrxqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
@@ -1612,7 +1612,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
- mlx5_cache_list_destroy(&priv->hrxqs);
+ mlx5_list_destroy(&priv->hrxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
@@ -79,7 +79,7 @@ enum mlx5_flow_type {
MLX5_FLOW_TYPE_MAXI,
};
-/* Hash and cache list callback context. */
+/* Hlist and list callback context. */
struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
struct rte_flow_error *error;
@@ -1137,10 +1137,10 @@ struct mlx5_dev_ctx_shared {
struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
struct mlx5_hlist *modify_cmds;
struct mlx5_hlist *tag_table;
- struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */
- struct mlx5_cache_list push_vlan_action_list; /* Push VLAN actions. */
- struct mlx5_cache_list sample_action_list; /* List of sample actions. */
- struct mlx5_cache_list dest_array_list;
+ struct mlx5_list port_id_action_list; /* Port ID action list. */
+ struct mlx5_list push_vlan_action_list; /* Push VLAN actions. */
+ struct mlx5_list sample_action_list; /* List of sample actions. */
+ struct mlx5_list dest_array_list;
/* List of destination array actions. */
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
void *default_miss_action; /* Default miss action. */
@@ -1244,7 +1244,7 @@ struct mlx5_ind_table_obj {
/* Hash Rx queue. */
__extension__
struct mlx5_hrxq {
- struct mlx5_cache_entry entry; /* Cache entry. */
+ struct mlx5_list_entry entry; /* List entry. */
uint32_t standalone:1; /* This object used in shared action. */
struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
RTE_STD_C11
@@ -1382,7 +1382,7 @@ struct mlx5_priv {
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
- struct mlx5_cache_list hrxqs; /* Hash Rx queues. */
+ struct mlx5_list hrxqs; /* Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
/* Indirection tables. */
@@ -1392,7 +1392,6 @@ struct mlx5_priv {
/**< Verbs modify header action object. */
uint8_t ft_type; /**< Flow table type, Rx or Tx. */
uint8_t max_lro_msg_size;
- /* Tags resources cache. */
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
@@ -468,7 +468,7 @@ struct mlx5_flow_dv_match_params {
/* Matcher structure. */
struct mlx5_flow_dv_matcher {
- struct mlx5_cache_entry entry; /**< Pointer to the next element. */
+ struct mlx5_list_entry entry; /**< Pointer to the next element. */
struct mlx5_flow_tbl_resource *tbl;
/**< Pointer to the table(group) the matcher associated with. */
void *matcher_object; /**< Pointer to DV matcher */
@@ -548,7 +548,7 @@ struct mlx5_flow_dv_jump_tbl_resource {
/* Port ID resource structure. */
struct mlx5_flow_dv_port_id_action_resource {
- struct mlx5_cache_entry entry;
+ struct mlx5_list_entry entry;
void *action; /**< Action object. */
uint32_t port_id; /**< Port ID value. */
uint32_t idx; /**< Indexed pool memory index. */
@@ -556,7 +556,7 @@ struct mlx5_flow_dv_port_id_action_resource {
/* Push VLAN action resource structure */
struct mlx5_flow_dv_push_vlan_action_resource {
- struct mlx5_cache_entry entry; /* Cache entry. */
+ struct mlx5_list_entry entry; /* Cache entry. */
void *action; /**< Action object. */
uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
rte_be32_t vlan_tag; /**< VLAN tag value. */
@@ -591,7 +591,7 @@ struct mlx5_flow_tbl_data_entry {
/**< hash list entry, 64-bits key inside. */
struct mlx5_flow_tbl_resource tbl;
/**< flow table resource. */
- struct mlx5_cache_list matchers;
+ struct mlx5_list matchers;
/**< matchers' header associated with the flow table. */
struct mlx5_flow_dv_jump_tbl_resource jump;
/**< jump resource, at most one for each table created. */
@@ -632,7 +632,7 @@ struct mlx5_flow_sub_actions_idx {
/* Sample action resource structure. */
struct mlx5_flow_dv_sample_resource {
- struct mlx5_cache_entry entry; /**< Cache entry. */
+ struct mlx5_list_entry entry; /**< Cache entry. */
union {
void *verbs_action; /**< Verbs sample action object. */
void **sub_actions; /**< Sample sub-action array. */
@@ -654,7 +654,7 @@ struct mlx5_flow_dv_sample_resource {
/* Destination array action resource structure. */
struct mlx5_flow_dv_dest_array_resource {
- struct mlx5_cache_entry entry; /**< Cache entry. */
+ struct mlx5_list_entry entry; /**< Cache entry. */
uint32_t idx; /** Destination array action object index. */
uint8_t ft_type; /** Flow Table Type */
uint8_t num_of_dest; /**< Number of destination actions. */
@@ -1631,43 +1631,45 @@ struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
-int flow_dv_matcher_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *ctx);
-struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *ctx);
-void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_port_id_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_push_vlan_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_sample_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_sample_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
-
-int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_dest_array_create_cb
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
+int flow_dv_matcher_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *ctx);
+struct mlx5_list_entry *flow_dv_matcher_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
+ void *ctx);
+void flow_dv_matcher_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_port_id_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_port_id_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+void flow_dv_port_id_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_push_vlan_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_push_vlan_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+void flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_sample_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_sample_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+void flow_dv_sample_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
+
+int flow_dv_dest_array_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_dest_array_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx);
+void flow_dv_dest_array_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
uint32_t age_idx);
int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
@@ -3607,18 +3607,17 @@ flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
uint64_t key __rte_unused, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
- struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
- struct mlx5_flow_dv_encap_decap_resource *cache_resource;
-
- cache_resource = container_of(entry,
- struct mlx5_flow_dv_encap_decap_resource,
- entry);
- if (resource->reformat_type == cache_resource->reformat_type &&
- resource->ft_type == cache_resource->ft_type &&
- resource->flags == cache_resource->flags &&
- resource->size == cache_resource->size &&
+ struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
+ struct mlx5_flow_dv_encap_decap_resource *resource;
+
+ resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
+ entry);
+ if (resource->reformat_type == ctx_resource->reformat_type &&
+ resource->ft_type == ctx_resource->ft_type &&
+ resource->flags == ctx_resource->flags &&
+ resource->size == ctx_resource->size &&
!memcmp((const void *)resource->buf,
- (const void *)cache_resource->buf,
+ (const void *)ctx_resource->buf,
resource->size))
return 0;
return -1;
@@ -3645,31 +3644,30 @@ flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5dv_dr_domain *domain;
- struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
- struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
+ struct mlx5_flow_dv_encap_decap_resource *resource;
uint32_t idx;
int ret;
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
/* Register new encap/decap resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &idx);
- if (!cache_resource) {
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
+ if (!resource) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
return NULL;
}
- *cache_resource = *resource;
- cache_resource->idx = idx;
- ret = mlx5_flow_os_create_flow_action_packet_reformat
- (sh->ctx, domain, cache_resource,
- &cache_resource->action);
+ *resource = *ctx_resource;
+ resource->idx = idx;
+ ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
+ resource,
+ &resource->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
rte_flow_error_set(ctx->error, ENOMEM,
@@ -3678,7 +3676,7 @@ flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
return NULL;
}
- return &cache_resource->entry;
+ return &resource->entry;
}
/**
@@ -3782,8 +3780,8 @@ flow_dv_jump_tbl_resource_register
}
int
-flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
@@ -3793,30 +3791,30 @@ flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
return ref->port_id != res->port_id;
}
-struct mlx5_cache_entry *
-flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_port_id_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
- struct mlx5_flow_dv_port_id_action_resource *cache;
+ struct mlx5_flow_dv_port_id_action_resource *resource;
uint32_t idx;
int ret;
/* Register new port id action resource. */
- cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
- if (!cache) {
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
+ if (!resource) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate port_id action cache memory");
+ "cannot allocate port_id action memory");
return NULL;
}
- *cache = *ref;
+ *resource = *ref;
ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
ref->port_id,
- &cache->action);
+ &resource->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
rte_flow_error_set(ctx->error, ENOMEM,
@@ -3824,8 +3822,8 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
"cannot create action");
return NULL;
}
- cache->idx = idx;
- return &cache->entry;
+ resource->idx = idx;
+ return &resource->entry;
}
/**
@@ -3833,8 +3831,8 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to port ID action resource.
+ * @param[in, out] ref
+ * Pointer to port ID action resource reference.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
@@ -3846,30 +3844,30 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
static int
flow_dv_port_id_action_resource_register
(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_port_id_action_resource *resource,
+ struct mlx5_flow_dv_port_id_action_resource *ref,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_cache_entry *entry;
- struct mlx5_flow_dv_port_id_action_resource *cache;
+ struct mlx5_list_entry *entry;
+ struct mlx5_flow_dv_port_id_action_resource *resource;
struct mlx5_flow_cb_ctx ctx = {
.error = error,
- .data = resource,
+ .data = ref,
};
- entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
+ entry = mlx5_list_register(&priv->sh->port_id_action_list, &ctx);
if (!entry)
return -rte_errno;
- cache = container_of(entry, typeof(*cache), entry);
- dev_flow->dv.port_id_action = cache;
- dev_flow->handle->rix_port_id_action = cache->idx;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->dv.port_id_action = resource;
+ dev_flow->handle->rix_port_id_action = resource->idx;
return 0;
}
int
-flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
@@ -3879,28 +3877,28 @@ flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
}
-struct mlx5_cache_entry *
-flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_push_vlan_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
- struct mlx5_flow_dv_push_vlan_action_resource *cache;
+ struct mlx5_flow_dv_push_vlan_action_resource *resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx;
int ret;
/* Register new port id action resource. */
- cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
- if (!cache) {
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
+ if (!resource) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate push_vlan action cache memory");
+ "cannot allocate push_vlan action memory");
return NULL;
}
- *cache = *ref;
+ *resource = *ref;
if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
@@ -3908,7 +3906,7 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
else
domain = sh->tx_domain;
ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
- &cache->action);
+ &resource->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
rte_flow_error_set(ctx->error, ENOMEM,
@@ -3916,8 +3914,8 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
"cannot create push vlan action");
return NULL;
}
- cache->idx = idx;
- return &cache->entry;
+ resource->idx = idx;
+ return &resource->entry;
}
/**
@@ -3925,8 +3923,8 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
*
* @param [in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to port ID action resource.
+ * @param[in, out] ref
+ * Pointer to port ID action resource reference.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
@@ -3938,25 +3936,25 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
static int
flow_dv_push_vlan_action_resource_register
(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_push_vlan_action_resource *resource,
+ struct mlx5_flow_dv_push_vlan_action_resource *ref,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_push_vlan_action_resource *cache;
- struct mlx5_cache_entry *entry;
+ struct mlx5_flow_dv_push_vlan_action_resource *resource;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.error = error,
- .data = resource,
+ .data = ref,
};
- entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
+ entry = mlx5_list_register(&priv->sh->push_vlan_action_list, &ctx);
if (!entry)
return -rte_errno;
- cache = container_of(entry, typeof(*cache), entry);
+ resource = container_of(entry, typeof(*resource), entry);
- dev_flow->handle->dvh.rix_push_vlan = cache->idx;
- dev_flow->dv.push_vlan_res = cache;
+ dev_flow->handle->dvh.rix_push_vlan = resource->idx;
+ dev_flow->dv.push_vlan_res = resource;
return 0;
}
@@ -9957,13 +9955,13 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
return NULL;
}
}
- MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
+ MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
key.level, key.id);
- mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
- flow_dv_matcher_create_cb,
- flow_dv_matcher_match_cb,
- flow_dv_matcher_remove_cb);
+ mlx5_list_create(&tbl_data->matchers, matcher_name, 0, sh,
+ flow_dv_matcher_create_cb,
+ flow_dv_matcher_match_cb,
+ flow_dv_matcher_remove_cb);
return &tbl_data->entry;
}
@@ -10091,7 +10089,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
tbl_data->tunnel->tunnel_id : 0,
tbl_data->group_id);
}
- mlx5_cache_list_destroy(&tbl_data->matchers);
+ mlx5_list_destroy(&tbl_data->matchers);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
@@ -10119,8 +10117,8 @@ flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
}
int
-flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_matcher *ref = ctx->data;
@@ -10133,15 +10131,15 @@ flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
(const void *)ref->mask.buf, ref->mask.size);
}
-struct mlx5_cache_entry *
-flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_matcher_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_dv_matcher *ref = ctx->data;
- struct mlx5_flow_dv_matcher *cache;
+ struct mlx5_flow_dv_matcher *resource;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&ref->mask,
@@ -10150,29 +10148,30 @@ flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
typeof(*tbl), tbl);
int ret;
- cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
- if (!cache) {
+ resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
+ SOCKET_ID_ANY);
+ if (!resource) {
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot create matcher");
return NULL;
}
- *cache = *ref;
+ *resource = *ref;
dv_attr.match_criteria_enable =
- flow_dv_matcher_enable(cache->mask.buf);
+ flow_dv_matcher_enable(resource->mask.buf);
dv_attr.priority = ref->priority;
if (tbl->is_egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
- &cache->matcher_object);
+ &resource->matcher_object);
if (ret) {
- mlx5_free(cache);
+ mlx5_free(resource);
rte_flow_error_set(ctx->error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot create matcher");
return NULL;
}
- return &cache->entry;
+ return &resource->entry;
}
/**
@@ -10201,8 +10200,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
uint32_t group_id,
struct rte_flow_error *error)
{
- struct mlx5_cache_entry *entry;
- struct mlx5_flow_dv_matcher *cache;
+ struct mlx5_list_entry *entry;
+ struct mlx5_flow_dv_matcher *resource;
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
struct mlx5_flow_cb_ctx ctx = {
@@ -10222,15 +10221,15 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
ref->tbl = tbl;
- entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(&tbl_data->matchers, &ctx);
if (!entry) {
flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate ref memory");
}
- cache = container_of(entry, typeof(*cache), entry);
- dev_flow->handle->dvh.matcher = cache;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->handle->dvh.matcher = resource;
return 0;
}
@@ -10298,15 +10297,15 @@ flow_dv_tag_resource_register
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_tag_resource *cache_resource;
+ struct mlx5_flow_dv_tag_resource *resource;
struct mlx5_hlist_entry *entry;
entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
if (entry) {
- cache_resource = container_of
- (entry, struct mlx5_flow_dv_tag_resource, entry);
- dev_flow->handle->dvh.rix_tag = cache_resource->idx;
- dev_flow->dv.tag_resource = cache_resource;
+ resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
+ entry);
+ dev_flow->handle->dvh.rix_tag = resource->idx;
+ dev_flow->dv.tag_resource = resource;
return 0;
}
return -rte_errno;
@@ -10633,68 +10632,69 @@ flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
}
int
-flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
- struct mlx5_flow_dv_sample_resource *resource = ctx->data;
- struct mlx5_flow_dv_sample_resource *cache_resource =
- container_of(entry, typeof(*cache_resource), entry);
-
- if (resource->ratio == cache_resource->ratio &&
- resource->ft_type == cache_resource->ft_type &&
- resource->ft_id == cache_resource->ft_id &&
- resource->set_action == cache_resource->set_action &&
- !memcmp((void *)&resource->sample_act,
- (void *)&cache_resource->sample_act,
+ struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
+ struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
+ typeof(*resource),
+ entry);
+
+ if (ctx_resource->ratio == resource->ratio &&
+ ctx_resource->ft_type == resource->ft_type &&
+ ctx_resource->ft_id == resource->ft_id &&
+ ctx_resource->set_action == resource->set_action &&
+ !memcmp((void *)&ctx_resource->sample_act,
+ (void *)&resource->sample_act,
sizeof(struct mlx5_flow_sub_actions_list))) {
/*
* Existing sample action should release the prepared
* sub-actions reference counter.
*/
flow_dv_sample_sub_actions_release(dev,
- &resource->sample_idx);
+ &ctx_resource->sample_idx);
return 0;
}
return 1;
}
-struct mlx5_cache_entry *
-flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
- struct mlx5_flow_dv_sample_resource *resource = ctx->data;
- void **sample_dv_actions = resource->sub_actions;
- struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
+ void **sample_dv_actions = ctx_resource->sub_actions;
+ struct mlx5_flow_dv_sample_resource *resource;
struct mlx5dv_dr_flow_sampler_attr sampler_attr;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
uint32_t idx = 0;
const uint32_t next_ft_step = 1;
- uint32_t next_ft_id = resource->ft_id + next_ft_step;
+ uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
uint8_t is_egress = 0;
uint8_t is_transfer = 0;
struct rte_flow_error *error = ctx->error;
/* Register new sample resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
- if (!cache_resource) {
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
+ if (!resource) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate resource memory");
return NULL;
}
- *cache_resource = *resource;
+ *resource = *ctx_resource;
/* Create normal path table level */
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
is_transfer = 1;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
is_egress = 1;
tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
is_egress, is_transfer,
@@ -10707,8 +10707,8 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
"for sample");
goto error;
}
- cache_resource->normal_path_tbl = tbl;
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ resource->normal_path_tbl = tbl;
+ if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
if (!sh->default_miss_action) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -10717,33 +10717,33 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
"created");
goto error;
}
- sample_dv_actions[resource->sample_act.actions_num++] =
+ sample_dv_actions[ctx_resource->sample_act.actions_num++] =
sh->default_miss_action;
}
/* Create a DR sample action */
- sampler_attr.sample_ratio = cache_resource->ratio;
+ sampler_attr.sample_ratio = resource->ratio;
sampler_attr.default_next_table = tbl->obj;
- sampler_attr.num_sample_actions = resource->sample_act.actions_num;
+ sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
&sample_dv_actions[0];
- sampler_attr.action = cache_resource->set_action;
+ sampler_attr.action = resource->set_action;
if (mlx5_os_flow_dr_create_flow_action_sampler
- (&sampler_attr, &cache_resource->verbs_action)) {
+ (&sampler_attr, &resource->verbs_action)) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create sample action");
goto error;
}
- cache_resource->idx = idx;
- cache_resource->dev = dev;
- return &cache_resource->entry;
+ resource->idx = idx;
+ resource->dev = dev;
+ return &resource->entry;
error:
- if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
+ if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
flow_dv_sample_sub_actions_release(dev,
- &cache_resource->sample_idx);
- if (cache_resource->normal_path_tbl)
+ &resource->sample_idx);
+ if (resource->normal_path_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
- cache_resource->normal_path_tbl);
+ resource->normal_path_tbl);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
return NULL;
@@ -10754,8 +10754,8 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in] resource
- * Pointer to sample resource.
+ * @param[in] ref
+ * Pointer to sample resource reference.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
@@ -10766,66 +10766,66 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
*/
static int
flow_dv_sample_resource_register(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_sample_resource *resource,
+ struct mlx5_flow_dv_sample_resource *ref,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv_sample_resource *cache_resource;
- struct mlx5_cache_entry *entry;
+ struct mlx5_flow_dv_sample_resource *resource;
+ struct mlx5_list_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
- .data = resource,
+ .data = ref,
};
- entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
+ entry = mlx5_list_register(&priv->sh->sample_action_list, &ctx);
if (!entry)
return -rte_errno;
- cache_resource = container_of(entry, typeof(*cache_resource), entry);
- dev_flow->handle->dvh.rix_sample = cache_resource->idx;
- dev_flow->dv.sample_res = cache_resource;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->handle->dvh.rix_sample = resource->idx;
+ dev_flow->dv.sample_res = resource;
return 0;
}
int
-flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
- struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
struct rte_eth_dev *dev = ctx->dev;
- struct mlx5_flow_dv_dest_array_resource *cache_resource =
- container_of(entry, typeof(*cache_resource), entry);
+ struct mlx5_flow_dv_dest_array_resource *resource =
+ container_of(entry, typeof(*resource), entry);
uint32_t idx = 0;
- if (resource->num_of_dest == cache_resource->num_of_dest &&
- resource->ft_type == cache_resource->ft_type &&
- !memcmp((void *)cache_resource->sample_act,
- (void *)resource->sample_act,
- (resource->num_of_dest *
+ if (ctx_resource->num_of_dest == resource->num_of_dest &&
+ ctx_resource->ft_type == resource->ft_type &&
+ !memcmp((void *)resource->sample_act,
+ (void *)ctx_resource->sample_act,
+ (ctx_resource->num_of_dest *
sizeof(struct mlx5_flow_sub_actions_list)))) {
/*
* Existing sample action should release the prepared
* sub-actions reference counter.
*/
- for (idx = 0; idx < resource->num_of_dest; idx++)
+ for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
flow_dv_sample_sub_actions_release(dev,
- &resource->sample_idx[idx]);
+ &ctx_resource->sample_idx[idx]);
return 0;
}
return 1;
}
-struct mlx5_cache_entry *
-flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct rte_eth_dev *dev = ctx->dev;
- struct mlx5_flow_dv_dest_array_resource *cache_resource;
- struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+ struct mlx5_flow_dv_dest_array_resource *resource;
+ struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
struct mlx5_priv *priv = dev->data->dev_private;
@@ -10838,23 +10838,23 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
int ret;
/* Register new destination array resource. */
- cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
&res_idx);
- if (!cache_resource) {
+ if (!resource) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate resource memory");
return NULL;
}
- *cache_resource = *resource;
+ *resource = *ctx_resource;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
- for (idx = 0; idx < resource->num_of_dest; idx++) {
+ for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5dv_dr_action_dest_attr),
@@ -10867,7 +10867,7 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
goto error;
}
dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
- sample_act = &resource->sample_act[idx];
+ sample_act = &ctx_resource->sample_act[idx];
action_flags = sample_act->action_flags;
switch (action_flags) {
case MLX5_FLOW_ACTION_QUEUE:
@@ -10898,9 +10898,9 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
/* create a dest array actioin */
ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
- cache_resource->num_of_dest,
+ resource->num_of_dest,
dest_attr,
- &cache_resource->action);
+ &resource->action);
if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -10908,19 +10908,18 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
"cannot create destination array action");
goto error;
}
- cache_resource->idx = res_idx;
- cache_resource->dev = dev;
- for (idx = 0; idx < resource->num_of_dest; idx++)
+ resource->idx = res_idx;
+ resource->dev = dev;
+ for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
mlx5_free(dest_attr[idx]);
- return &cache_resource->entry;
+ return &resource->entry;
error:
- for (idx = 0; idx < resource->num_of_dest; idx++) {
+ for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
flow_dv_sample_sub_actions_release(dev,
- &cache_resource->sample_idx[idx]);
+ &resource->sample_idx[idx]);
if (dest_attr[idx])
mlx5_free(dest_attr[idx]);
}
-
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
return NULL;
}
@@ -10930,8 +10929,8 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
- * @param[in] resource
- * Pointer to destination array resource.
+ * @param[in] ref
+ * Pointer to destination array resource reference.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
@@ -10942,25 +10941,25 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
*/
static int
flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow_dv_dest_array_resource *ref,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_dv_dest_array_resource *resource;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.dev = dev,
.error = error,
- .data = resource,
+ .data = ref,
};
- entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
+ entry = mlx5_list_register(&priv->sh->dest_array_list, &ctx);
if (!entry)
return -rte_errno;
- cache_resource = container_of(entry, typeof(*cache_resource), entry);
- dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
- dev_flow->dv.dest_array_res = cache_resource;
+ resource = container_of(entry, typeof(*resource), entry);
+ dev_flow->handle->dvh.rix_dest_array = resource->idx;
+ dev_flow->dv.dest_array_res = resource;
return 0;
}
@@ -13402,14 +13401,15 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
}
void
-flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry)
+flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
- entry);
+ struct mlx5_flow_dv_matcher *resource = container_of(entry,
+ typeof(*resource),
+ entry);
- claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
- mlx5_free(cache);
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
+ mlx5_free(resource);
}
/**
@@ -13433,7 +13433,7 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
int ret;
MLX5_ASSERT(matcher->matcher_object);
- ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
+ ret = mlx5_list_unregister(&tbl->matchers, &matcher->entry);
flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
return ret;
}
@@ -13452,7 +13452,7 @@ flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
struct mlx5_flow_dv_encap_decap_resource *res =
- container_of(entry, typeof(*res), entry);
+ container_of(entry, typeof(*res), entry);
claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
@@ -13474,15 +13474,14 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
uint32_t encap_decap_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ struct mlx5_flow_dv_encap_decap_resource *resource;
- cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- encap_decap_idx);
- if (!cache_resource)
+ resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ encap_decap_idx);
+ if (!resource)
return 0;
- MLX5_ASSERT(cache_resource->action);
- return mlx5_hlist_unregister(priv->sh->encaps_decaps,
- &cache_resource->entry);
+ MLX5_ASSERT(resource->action);
+ return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
}
/**
@@ -13544,15 +13543,15 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
+flow_dv_port_id_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct mlx5_flow_dv_port_id_action_resource *cache =
- container_of(entry, typeof(*cache), entry);
+ struct mlx5_flow_dv_port_id_action_resource *resource =
+ container_of(entry, typeof(*resource), entry);
- claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
+ claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
}
/**
@@ -13571,14 +13570,14 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
uint32_t port_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_port_id_action_resource *cache;
+ struct mlx5_flow_dv_port_id_action_resource *resource;
- cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
- if (!cache)
+ resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
+ if (!resource)
return 0;
- MLX5_ASSERT(cache->action);
- return mlx5_cache_unregister(&priv->sh->port_id_action_list,
- &cache->entry);
+ MLX5_ASSERT(resource->action);
+ return mlx5_list_unregister(&priv->sh->port_id_action_list,
+ &resource->entry);
}
/**
@@ -13601,15 +13600,15 @@ flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
}
void
-flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
+flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
{
struct mlx5_dev_ctx_shared *sh = list->ctx;
- struct mlx5_flow_dv_push_vlan_action_resource *cache =
- container_of(entry, typeof(*cache), entry);
+ struct mlx5_flow_dv_push_vlan_action_resource *resource =
+ container_of(entry, typeof(*resource), entry);
- claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
- mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
+ claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
}
/**
@@ -13628,15 +13627,15 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_push_vlan_action_resource *cache;
+ struct mlx5_flow_dv_push_vlan_action_resource *resource;
uint32_t idx = handle->dvh.rix_push_vlan;
- cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
- if (!cache)
+ resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
+ if (!resource)
return 0;
- MLX5_ASSERT(cache->action);
- return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
- &cache->entry);
+ MLX5_ASSERT(resource->action);
+ return mlx5_list_unregister(&priv->sh->push_vlan_action_list,
+ &resource->entry);
}
/**
@@ -13673,26 +13672,24 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
}
void
-flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry)
+flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_flow_dv_sample_resource *cache_resource =
- container_of(entry, typeof(*cache_resource), entry);
- struct rte_eth_dev *dev = cache_resource->dev;
+ struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
+ typeof(*resource),
+ entry);
+ struct rte_eth_dev *dev = resource->dev;
struct mlx5_priv *priv = dev->data->dev_private;
- if (cache_resource->verbs_action)
+ if (resource->verbs_action)
claim_zero(mlx5_flow_os_destroy_flow_action
- (cache_resource->verbs_action));
- if (cache_resource->normal_path_tbl)
+ (resource->verbs_action));
+ if (resource->normal_path_tbl)
flow_dv_tbl_resource_release(MLX5_SH(dev),
- cache_resource->normal_path_tbl);
- flow_dv_sample_sub_actions_release(dev,
- &cache_resource->sample_idx);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
- cache_resource->idx);
- DRV_LOG(DEBUG, "sample resource %p: removed",
- (void *)cache_resource);
+ resource->normal_path_tbl);
+ flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
+ DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
}
/**
@@ -13711,38 +13708,36 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5_flow_dv_sample_resource *resource;
- cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
- handle->dvh.rix_sample);
- if (!cache_resource)
+ resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ handle->dvh.rix_sample);
+ if (!resource)
return 0;
- MLX5_ASSERT(cache_resource->verbs_action);
- return mlx5_cache_unregister(&priv->sh->sample_action_list,
- &cache_resource->entry);
+ MLX5_ASSERT(resource->verbs_action);
+ return mlx5_list_unregister(&priv->sh->sample_action_list,
+ &resource->entry);
}
void
-flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry)
+flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry)
{
- struct mlx5_flow_dv_dest_array_resource *cache_resource =
- container_of(entry, typeof(*cache_resource), entry);
- struct rte_eth_dev *dev = cache_resource->dev;
+ struct mlx5_flow_dv_dest_array_resource *resource =
+ container_of(entry, typeof(*resource), entry);
+ struct rte_eth_dev *dev = resource->dev;
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t i = 0;
- MLX5_ASSERT(cache_resource->action);
- if (cache_resource->action)
- claim_zero(mlx5_flow_os_destroy_flow_action
- (cache_resource->action));
- for (; i < cache_resource->num_of_dest; i++)
+ MLX5_ASSERT(resource->action);
+ if (resource->action)
+ claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+ for (; i < resource->num_of_dest; i++)
flow_dv_sample_sub_actions_release(dev,
- &cache_resource->sample_idx[i]);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
- cache_resource->idx);
+ &resource->sample_idx[i]);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
DRV_LOG(DEBUG, "destination array resource %p: removed",
- (void *)cache_resource);
+ (void *)resource);
}
/**
@@ -13761,15 +13756,15 @@ flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_handle *handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_dv_dest_array_resource *cache;
+ struct mlx5_flow_dv_dest_array_resource *resource;
- cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
- handle->dvh.rix_dest_array);
- if (!cache)
+ resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ handle->dvh.rix_dest_array);
+ if (!resource)
return 0;
- MLX5_ASSERT(cache->action);
- return mlx5_cache_unregister(&priv->sh->dest_array_list,
- &cache->entry);
+ MLX5_ASSERT(resource->action);
+ return mlx5_list_unregister(&priv->sh->dest_array_list,
+ &resource->entry);
}
static void
@@ -14619,7 +14614,7 @@ __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
tbl = container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_cache_unregister(&tbl->matchers,
+ mlx5_list_unregister(&tbl->matchers,
&color_rule->matcher->entry);
TAILQ_REMOVE(&sub_policy->color_rules[i],
color_rule, next_port);
@@ -15412,8 +15407,8 @@ flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
if (mtrmng->def_matcher[i]) {
tbl = container_of(mtrmng->def_matcher[i]->tbl,
struct mlx5_flow_tbl_data_entry, tbl);
- mlx5_cache_unregister(&tbl->matchers,
- &mtrmng->def_matcher[i]->entry);
+ mlx5_list_unregister(&tbl->matchers,
+ &mtrmng->def_matcher[i]->entry);
mtrmng->def_matcher[i] = NULL;
}
for (j = 0; j < MLX5_REG_BITS; j++) {
@@ -15422,8 +15417,8 @@ flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
container_of(mtrmng->drop_matcher[i][j]->tbl,
struct mlx5_flow_tbl_data_entry,
tbl);
- mlx5_cache_unregister(&tbl->matchers,
- &mtrmng->drop_matcher[i][j]->entry);
+ mlx5_list_unregister(&tbl->matchers,
+ &mtrmng->drop_matcher[i][j]->entry);
mtrmng->drop_matcher[i][j] = NULL;
}
}
@@ -15521,7 +15516,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
struct mlx5_flow_dv_matcher **policy_matcher,
struct rte_flow_error *error)
{
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
@@ -15557,7 +15552,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
matcher.priority = priority;
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(&tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR, "Failed to register meter drop matcher.");
return -1;
@@ -15665,7 +15660,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_data_entry *tbl =
container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_cache_unregister(&tbl->matchers,
+ mlx5_list_unregister(&tbl->matchers,
&color_rule->matcher->entry);
}
mlx5_free(color_rule);
@@ -16017,7 +16012,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
0, &error);
uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
.size = sizeof(matcher.mask.buf) -
@@ -16063,7 +16058,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
matcher.crc = rte_raw_cksum
((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(&tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR, "Failed to register meter "
"drop default matcher.");
@@ -16100,7 +16095,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
matcher.crc = rte_raw_cksum
((const void *)matcher.mask.buf,
matcher.mask.size);
- entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+ entry = mlx5_list_register(&tbl_data->matchers, &ctx);
if (!entry) {
DRV_LOG(ERR,
"Failed to register meter drop matcher.");
@@ -16517,7 +16512,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_data_entry *tbl =
container_of(color_rule->matcher->tbl,
typeof(*tbl), tbl);
- mlx5_cache_unregister(&tbl->matchers,
+ mlx5_list_unregister(&tbl->matchers,
&color_rule->matcher->entry);
}
mlx5_free(color_rule);
@@ -222,13 +222,13 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
bool standalone);
-struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
-int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry,
+struct mlx5_list_entry *mlx5_hrxq_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused, void *cb_ctx);
+int mlx5_hrxq_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
void *cb_ctx);
-void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
+void mlx5_hrxq_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc);
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
@@ -2093,7 +2093,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
* Match an Rx Hash queue.
*
* @param list
- * Cache list pointer.
+ * mlx5 list pointer.
* @param entry
* Hash queue entry pointer.
* @param cb_ctx
@@ -2103,8 +2103,8 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
* 0 if match, none zero if not match.
*/
int
-mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry,
+mlx5_hrxq_match_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
void *cb_ctx)
{
struct rte_eth_dev *dev = list->ctx;
@@ -2242,13 +2242,13 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
* Index to Hash Rx queue to release.
*
* @param list
- * Cache list pointer.
+ * mlx5 list pointer.
* @param entry
* Hash queue entry pointer.
*/
void
-mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
+mlx5_hrxq_remove_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
{
struct rte_eth_dev *dev = list->ctx;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -2305,7 +2305,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
* Create an Rx Hash queue.
*
* @param list
- * Cache list pointer.
+ * mlx5 list pointer.
* @param entry
* Hash queue entry pointer.
* @param cb_ctx
@@ -2314,9 +2314,9 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
* @return
* queue entry on success, NULL otherwise.
*/
-struct mlx5_cache_entry *
-mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+mlx5_hrxq_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused,
void *cb_ctx)
{
struct rte_eth_dev *dev = list->ctx;
@@ -2344,7 +2344,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
};
@@ -2352,7 +2352,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
if (rss_desc->shared_rss) {
hrxq = __mlx5_hrxq_create(dev, rss_desc);
} else {
- entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+ entry = mlx5_list_register(&priv->hrxqs, &ctx);
if (!entry)
return 0;
hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -2382,7 +2382,7 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
if (!hrxq)
return 0;
if (!hrxq->standalone)
- return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+ return mlx5_list_unregister(&priv->hrxqs, &hrxq->entry);
__mlx5_hrxq_remove(dev, hrxq);
return 0;
}
@@ -2470,7 +2470,7 @@ mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- return mlx5_cache_list_get_entry_num(&priv->hrxqs);
+ return mlx5_list_get_entry_num(&priv->hrxqs);
}
/**
@@ -9,29 +9,29 @@
#include "mlx5_utils.h"
-/********************* Cache list ************************/
+/********************* MLX5 list ************************/
-static struct mlx5_cache_entry *
-mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
+static struct mlx5_list_entry *
+mlx5_list_default_create_cb(struct mlx5_list *list,
+ struct mlx5_list_entry *entry __rte_unused,
void *ctx __rte_unused)
{
return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
}
static void
-mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
- struct mlx5_cache_entry *entry)
+mlx5_list_default_remove_cb(struct mlx5_list *list __rte_unused,
+ struct mlx5_list_entry *entry)
{
mlx5_free(entry);
}
int
-mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
+mlx5_list_create(struct mlx5_list *list, const char *name,
uint32_t entry_size, void *ctx,
- mlx5_cache_create_cb cb_create,
- mlx5_cache_match_cb cb_match,
- mlx5_cache_remove_cb cb_remove)
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove)
{
MLX5_ASSERT(list);
if (!cb_match || (!cb_create ^ !cb_remove))
@@ -40,19 +40,19 @@ mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
snprintf(list->name, sizeof(list->name), "%s", name);
list->entry_sz = entry_size;
list->ctx = ctx;
- list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
+ list->cb_create = cb_create ? cb_create : mlx5_list_default_create_cb;
list->cb_match = cb_match;
- list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
+ list->cb_remove = cb_remove ? cb_remove : mlx5_list_default_remove_cb;
rte_rwlock_init(&list->lock);
- DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
+ DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
LIST_INIT(&list->head);
return 0;
}
-static struct mlx5_cache_entry *
-__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+static struct mlx5_list_entry *
+__list_lookup(struct mlx5_list *list, void *ctx, bool reuse)
{
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
LIST_FOREACH(entry, &list->head, next) {
if (list->cb_match(list, entry, ctx))
@@ -60,7 +60,7 @@ __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
if (reuse) {
__atomic_add_fetch(&entry->ref_cnt, 1,
__ATOMIC_RELAXED);
- DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p ref++: %u.",
list->name, (void *)entry, entry->ref_cnt);
}
break;
@@ -68,33 +68,33 @@ __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
return entry;
}
-static struct mlx5_cache_entry *
-cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+static struct mlx5_list_entry *
+list_lookup(struct mlx5_list *list, void *ctx, bool reuse)
{
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
rte_rwlock_read_lock(&list->lock);
- entry = __cache_lookup(list, ctx, reuse);
+ entry = __list_lookup(list, ctx, reuse);
rte_rwlock_read_unlock(&list->lock);
return entry;
}
-struct mlx5_cache_entry *
-mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+struct mlx5_list_entry *
+mlx5_list_lookup(struct mlx5_list *list, void *ctx)
{
- return cache_lookup(list, ctx, false);
+ return list_lookup(list, ctx, false);
}
-struct mlx5_cache_entry *
-mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
{
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
uint32_t prev_gen_cnt = 0;
MLX5_ASSERT(list);
prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
/* Lookup with read lock, reuse if found. */
- entry = cache_lookup(list, ctx, true);
+ entry = list_lookup(list, ctx, true);
if (entry)
return entry;
/* Not found, append with write lock - block read from other threads. */
@@ -102,13 +102,13 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
/* If list changed by other threads before lock, search again. */
if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
/* Lookup and reuse w/o read lock. */
- entry = __cache_lookup(list, ctx, true);
+ entry = __list_lookup(list, ctx, true);
if (entry)
goto done;
}
entry = list->cb_create(list, entry, ctx);
if (!entry) {
- DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
+ DRV_LOG(ERR, "Failed to init mlx5 list %s entry %p.",
list->name, (void *)entry);
goto done;
}
@@ -116,7 +116,7 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
LIST_INSERT_HEAD(&list->head, entry, next);
__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
- DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.",
list->name, (void *)entry, entry->ref_cnt);
done:
rte_rwlock_write_unlock(&list->lock);
@@ -124,12 +124,12 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
}
int
-mlx5_cache_unregister(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
+mlx5_list_unregister(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
{
rte_rwlock_write_lock(&list->lock);
MLX5_ASSERT(entry && entry->next.le_prev);
- DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p ref--: %u.",
list->name, (void *)entry, entry->ref_cnt);
if (--entry->ref_cnt) {
rte_rwlock_write_unlock(&list->lock);
@@ -140,15 +140,15 @@ mlx5_cache_unregister(struct mlx5_cache_list *list,
LIST_REMOVE(entry, next);
list->cb_remove(list, entry);
rte_rwlock_write_unlock(&list->lock);
- DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
list->name, (void *)entry);
return 0;
}
void
-mlx5_cache_list_destroy(struct mlx5_cache_list *list)
+mlx5_list_destroy(struct mlx5_list *list)
{
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
MLX5_ASSERT(list);
/* no LIST_FOREACH_SAFE, using while instead */
@@ -156,14 +156,14 @@ mlx5_cache_list_destroy(struct mlx5_cache_list *list)
entry = LIST_FIRST(&list->head);
LIST_REMOVE(entry, next);
list->cb_remove(list, entry);
- DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p destroyed.",
list->name, (void *)entry);
}
memset(list, 0, sizeof(*list));
}
uint32_t
-mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
+mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
@@ -297,19 +297,19 @@ log2above(unsigned int v)
return l + r;
}
-/************************ cache list *****************************/
+/************************ mlx5 list *****************************/
/** Maximum size of string for naming. */
#define MLX5_NAME_SIZE 32
-struct mlx5_cache_list;
+struct mlx5_list;
/**
- * Structure of the entry in the cache list, user should define its own struct
+ * Structure of the entry in the mlx5 list, user should define its own struct
* that contains this in order to store the data.
*/
-struct mlx5_cache_entry {
- LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */
+struct mlx5_list_entry {
+ LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
uint32_t ref_cnt; /* Reference count. */
};
@@ -317,18 +317,18 @@ struct mlx5_cache_entry {
* Type of callback function for entry removal.
*
* @param list
- * The cache list.
+ * The mlx5 list.
* @param entry
* The entry in the list.
*/
-typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
+typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
/**
* Type of function for user defined matching.
*
* @param list
- * The cache list.
+ * The mlx5 list.
* @param entry
* The entry in the list.
* @param ctx
@@ -337,14 +337,14 @@ typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
* @return
* 0 if matching, non-zero number otherwise.
*/
-typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry, void *ctx);
+typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,
+ struct mlx5_list_entry *entry, void *ctx);
/**
- * Type of function for user defined cache list entry creation.
+ * Type of function for user defined mlx5 list entry creation.
*
* @param list
- * The cache list.
+ * The mlx5 list.
* @param entry
* The new allocated entry, NULL if list entry size unspecified,
* New entry has to be allocated in callback and return.
@@ -354,46 +354,46 @@ typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
* @return
* Pointer of entry on success, NULL otherwise.
*/
-typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
- (struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry,
+typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
+ (struct mlx5_list *list,
+ struct mlx5_list_entry *entry,
void *ctx);
/**
- * Linked cache list structure.
+ * Linked mlx5 list structure.
*
- * Entry in cache list could be reused if entry already exists,
+ * Entry in mlx5 list could be reused if entry already exists,
* reference count will increase and the existing entry returns.
*
* When destroy an entry from list, decrease reference count and only
* destroy when no further reference.
*
- * Linked list cache is designed for limited number of entries cache,
+ * Linked list is designed for limited number of entries,
* read mostly, less modification.
*
- * For huge amount of entries cache, please consider hash list cache.
+ * For huge amount of entries, please consider hash list.
*
*/
-struct mlx5_cache_list {
- char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
+struct mlx5_list {
+ char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
uint32_t entry_sz; /**< Entry size, 0: use create callback. */
rte_rwlock_t lock; /* read/write lock. */
uint32_t gen_cnt; /* List modification will update generation count. */
uint32_t count; /* number of entries in list. */
void *ctx; /* user objects target to callback. */
- mlx5_cache_create_cb cb_create; /**< entry create callback. */
- mlx5_cache_match_cb cb_match; /**< entry match callback. */
- mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
- LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
+ mlx5_list_create_cb cb_create; /**< entry create callback. */
+ mlx5_list_match_cb cb_match; /**< entry match callback. */
+ mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
+ LIST_HEAD(mlx5_list_head, mlx5_list_entry) head;
};
/**
- * Initialize a cache list.
+ * Create a mlx5 list.
*
* @param list
* Pointer to the hast list table.
* @param name
- * Name of the cache list.
+ * Name of the mlx5 list.
* @param entry_size
* Entry size to allocate, 0 to allocate by creation callback.
* @param ctx
@@ -407,11 +407,11 @@ struct mlx5_cache_list {
* @return
* 0 on success, otherwise failure.
*/
-int mlx5_cache_list_init(struct mlx5_cache_list *list,
+int mlx5_list_create(struct mlx5_list *list,
const char *name, uint32_t entry_size, void *ctx,
- mlx5_cache_create_cb cb_create,
- mlx5_cache_match_cb cb_match,
- mlx5_cache_remove_cb cb_remove);
+ mlx5_list_create_cb cb_create,
+ mlx5_list_match_cb cb_match,
+ mlx5_list_remove_cb cb_remove);
/**
* Search an entry matching the key.
@@ -420,18 +420,18 @@ int mlx5_cache_list_init(struct mlx5_cache_list *list,
* this function only in main thread.
*
* @param list
- * Pointer to the cache list.
+ * Pointer to the mlx5 list.
* @param ctx
* Common context parameter used by entry callback function.
*
* @return
- * Pointer of the cache entry if found, NULL otherwise.
+ * Pointer of the list entry if found, NULL otherwise.
*/
-struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
+struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
void *ctx);
/**
- * Reuse or create an entry to the cache list.
+ * Reuse or create an entry to the mlx5 list.
*
* @param list
* Pointer to the hast list table.
@@ -441,42 +441,42 @@ struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
* @return
* registered entry on success, NULL otherwise
*/
-struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
+struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
void *ctx);
/**
- * Remove an entry from the cache list.
+ * Remove an entry from the mlx5 list.
*
* User should guarantee the validity of the entry.
*
* @param list
* Pointer to the hast list.
* @param entry
- * Entry to be removed from the cache list table.
+ * Entry to be removed from the mlx5 list table.
* @return
* 0 on entry removed, 1 on entry still referenced.
*/
-int mlx5_cache_unregister(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry);
+int mlx5_list_unregister(struct mlx5_list *list,
+ struct mlx5_list_entry *entry);
/**
- * Destroy the cache list.
+ * Destroy the mlx5 list.
*
* @param list
- * Pointer to the cache list.
+ * Pointer to the mlx5 list.
*/
-void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
+void mlx5_list_destroy(struct mlx5_list *list);
/**
- * Get entry number from the cache list.
+ * Get entry number from the mlx5 list.
*
* @param list
* Pointer to the hast list.
* @return
- * Cache list entry number.
+ * mlx5 list entry number.
*/
uint32_t
-mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list);
+mlx5_list_get_entry_num(struct mlx5_list *list);
/********************************* indexed pool *************************/
@@ -610,10 +610,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
- mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
- mlx5_hrxq_create_cb,
- mlx5_hrxq_match_cb,
- mlx5_hrxq_remove_cb);
+ mlx5_list_create(&priv->hrxqs, "hrxq", 0, eth_dev,
+ mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
+ mlx5_hrxq_remove_cb);
/* Query availability of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {