@@ -952,13 +952,6 @@ struct mlx5_dev_ctx_shared *
MLX5_ASSERT(sh->devx_rx_uar);
MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
}
- sh->flow_id_pool = mlx5_flow_id_pool_alloc
- ((1 << HAIRPIN_FLOW_ID_BITS) - 1);
- if (!sh->flow_id_pool) {
- DRV_LOG(ERR, "can't create flow id pool");
- err = ENOMEM;
- goto error;
- }
#ifndef RTE_ARCH_64
/* Initialize UAR access locks for 32bit implementations. */
rte_spinlock_init(&sh->uar_lock_cq);
@@ -1020,8 +1013,6 @@ struct mlx5_dev_ctx_shared *
claim_zero(mlx5_glue->dealloc_pd(sh->pd));
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
- if (sh->flow_id_pool)
- mlx5_flow_id_pool_release(sh->flow_id_pool);
mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
@@ -1092,8 +1083,6 @@ struct mlx5_dev_ctx_shared *
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
- if (sh->flow_id_pool)
- mlx5_flow_id_pool_release(sh->flow_id_pool);
pthread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
return;
@@ -652,7 +652,6 @@ struct mlx5_dev_ctx_shared {
void *devx_comp; /* DEVX async comp obj. */
struct mlx5_devx_obj *tis; /* TIS object. */
struct mlx5_devx_obj *td; /* Transport domain. */
- struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
void *tx_uar; /* Tx/packet pacing shared UAR. */
struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
/* Flex parser profiles information. */
@@ -3426,9 +3426,8 @@ struct mlx5_flow_tunnel_info {
struct rte_flow_action actions_rx[],
struct rte_flow_action actions_tx[],
struct rte_flow_item pattern_tx[],
- uint32_t *flow_id)
+ uint32_t flow_id)
{
- struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_raw_encap *raw_encap;
const struct rte_flow_action_raw_decap *raw_decap;
struct mlx5_rte_flow_action_set_tag *set_tag;
@@ -3438,7 +3437,6 @@ struct mlx5_flow_tunnel_info {
char *addr;
int encap = 0;
- mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
@@ -3507,7 +3505,7 @@ struct mlx5_flow_tunnel_info {
set_tag = (void *)actions_rx;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
- set_tag->data = *flow_id;
+ set_tag->data = flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
@@ -3516,7 +3514,7 @@ struct mlx5_flow_tunnel_info {
item->type = (enum rte_flow_item_type)
MLX5_RTE_FLOW_ITEM_TYPE_TAG;
tag_item = (void *)addr;
- tag_item->data = *flow_id;
+ tag_item->data = flow_id;
tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
MLX5_ASSERT(set_tag->id > REG_NON);
item->spec = tag_item;
@@ -4360,7 +4358,6 @@ struct mlx5_flow_tunnel_info {
uint32_t i;
uint32_t idx = 0;
int hairpin_flow;
- uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
int ret;
@@ -4372,24 +4369,22 @@ struct mlx5_flow_tunnel_info {
external, hairpin_flow, error);
if (ret < 0)
return 0;
+ flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+ if (!flow) {
+ rte_errno = ENOMEM;
+ return 0;
+ }
if (hairpin_flow > 0) {
if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
rte_errno = EINVAL;
- return 0;
+ goto error;
}
flow_hairpin_split(dev, actions, actions_rx.actions,
actions_hairpin_tx.actions, items_tx.items,
- &hairpin_id);
+ idx);
p_actions_rx = actions_rx.actions;
}
- flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
- if (!flow) {
- rte_errno = ENOMEM;
- goto error_before_flow;
- }
flow->drv_type = flow_get_drv_type(dev, attr);
- if (hairpin_id != 0)
- flow->hairpin_flow_id = hairpin_id;
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
@@ -4517,11 +4512,7 @@ struct mlx5_flow_tunnel_info {
flow_drv_destroy(dev, flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
rte_errno = ret; /* Restore rte_errno. */
-error_before_flow:
ret = rte_errno;
- if (hairpin_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- hairpin_id);
rte_errno = ret;
wks->flow_idx = wks->flow_nested_idx;
if (wks->flow_nested_idx)
@@ -4662,9 +4653,6 @@ struct rte_flow *
*/
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
- if (flow->hairpin_flow_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- flow->hairpin_flow_id);
flow_drv_destroy(dev, flow);
if (list)
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
@@ -841,8 +841,6 @@ struct mlx5_fdir_flow {
uint32_t rix_flow; /* Index to flow. */
};
-#define HAIRPIN_FLOW_ID_BITS 28
-
/* Flow structure. */
struct rte_flow {
ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
@@ -850,13 +848,12 @@ struct rte_flow {
/**< Device flow handles that are part of the flow. */
uint32_t drv_type:2; /**< Driver type. */
uint32_t fdir:1; /**< Identifier of associated FDIR if any. */
- uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS;
/**< The flow id used for hairpin. */
uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
+ uint32_t meter:16; /**< Holds flow meter id. */
uint32_t rix_mreg_copy;
/**< Index to metadata register copy table resource. */
uint32_t counter; /**< Holds flow counter. */
- uint16_t meter; /**< Holds flow meter id. */
} __rte_packed;
/* Thread specific flow workspace intermediate data. */