@@ -1159,7 +1159,12 @@ enum mlx5_aso_ct_state {
/* Generic ASO connection tracking structure. */
struct mlx5_aso_ct_action {
- LIST_ENTRY(mlx5_aso_ct_action) next; /* Pointer to the next ASO CT. */
+ union {
+ LIST_ENTRY(mlx5_aso_ct_action) next;
+ /* Pointer to the next ASO CT. Used only in SWS. */
+ struct mlx5_aso_ct_pool *pool;
+ /* Pointer to action pool. Used only in HWS. */
+ };
void *dr_action_orig; /* General action object for original dir. */
void *dr_action_rply; /* General action object for reply dir. */
uint32_t refcnt; /* Action used count in device flows. */
@@ -1173,15 +1178,30 @@ struct mlx5_aso_ct_action {
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+
/* ASO connection tracking software pool definition. */
struct mlx5_aso_ct_pool {
uint16_t index; /* Pool index in pools array. */
+ /* Free ASO CT index in the pool. Used by HWS. */
+ struct mlx5_indexed_pool *cts;
struct mlx5_devx_obj *devx_obj;
- /* The first devx object in the bulk, used for freeing (not yet). */
- struct mlx5_aso_ct_action actions[MLX5_ASO_CT_ACTIONS_PER_POOL];
+ union {
+ void *dummy_action;
+ /* Dummy action to increase the reference count in the driver. */
+ struct mlx5dr_action *dr_action;
+ /* HWS action. */
+ };
+ struct mlx5_aso_ct_action actions[0];
/* CT action structures bulk. */
};
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
LIST_HEAD(aso_ct_list, mlx5_aso_ct_action);
/* Pools management structure for ASO connection tracking pools. */
@@ -1647,6 +1667,7 @@ struct mlx5_priv {
LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo;
struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
+ struct mlx5_aso_ct_pool *hws_ctpool; /* HW steering's CT pool. */
#endif
};
@@ -83,6 +83,10 @@ enum {
#define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
+#define MLX5_ACTION_CTX_CT_GET_IDX MLX5_INDIRECT_ACT_CT_GET_IDX
+#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
+#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -903,6 +903,15 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
return -1;
}
+static inline struct mlx5_aso_ct_pool*
+__mlx5_aso_ct_get_pool(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct)
+{
+ if (likely(sh->config.dv_flow_en == 2))
+ return ct->pool;
+ return container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+}
+
/*
* Post a WQE to the ASO CT SQ to modify the context.
*
@@ -945,7 +954,7 @@ mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);
sq->elts[sq->head & mask].ct = ct;
sq->elts[sq->head & mask].query_data = NULL;
- pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ pool = __mlx5_aso_ct_get_pool(sh, ct);
/* Each WQE will have a single CT object. */
wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
ct->offset);
@@ -1113,7 +1122,7 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,
wqe_idx = sq->head & mask;
sq->elts[wqe_idx].ct = ct;
sq->elts[wqe_idx].query_data = data;
- pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ pool = __mlx5_aso_ct_get_pool(sh, ct);
/* Each WQE will have a single CT object. */
wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
ct->offset);
@@ -1231,7 +1240,7 @@ mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
/* Waiting for wqe resource. */
rte_delay_us_sleep(10u);
} while (--poll_wqe_times);
- pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ pool = __mlx5_aso_ct_get_pool(sh, ct);
DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
ct->offset, pool->index);
return -1;
@@ -1267,7 +1276,7 @@ mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
/* Waiting for CQE ready, consider should block or sleep. */
rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
} while (--poll_cqe_times);
- pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ pool = __mlx5_aso_ct_get_pool(sh, ct);
DRV_LOG(ERR, "Fail to poll CQE for ASO CT %d in pool %d",
ct->offset, pool->index);
return -1;
@@ -1383,7 +1392,7 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
else
rte_delay_us_sleep(10u);
} while (--poll_wqe_times);
- pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ pool = __mlx5_aso_ct_get_pool(sh, ct);
DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
ct->offset, pool->index);
return -1;
@@ -12790,6 +12790,7 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
struct mlx5_devx_obj *obj = NULL;
uint32_t i;
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
+ size_t mem_size;
obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
priv->sh->cdev->pdn,
@@ -12799,7 +12800,10 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
return NULL;
}
- pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ mem_size = sizeof(struct mlx5_aso_ct_action) *
+ MLX5_ASO_CT_ACTIONS_PER_POOL +
+ sizeof(*pool);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
claim_zero(mlx5_devx_cmd_destroy(obj));
@@ -14,9 +14,19 @@
#include "mlx5dr_send.h"
#include "mlx5_hws_cnt.h"
+#define MLX5_HW_INV_QUEUE UINT32_MAX
+
/* The maximum actions support in the flow. */
#define MLX5_HW_MAX_ACTS 16
+/*
+ * The default ipool threshold value indicates which per_core_cache
+ * value to set.
+ */
+#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
+/* The default min local cache size. */
+#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
+
/* Default push burst threshold. */
#define BURST_THR 32u
@@ -323,6 +333,24 @@ flow_hw_tir_action_register(struct rte_eth_dev *dev,
return hrxq;
}
+static __rte_always_inline int
+flow_hw_ct_compile(struct rte_eth_dev *dev, uint32_t idx,
+ struct mlx5dr_rule_action *rule_act)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_action *ct;
+
+ ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
+ if (!ct || mlx5_aso_ct_available(priv->sh, ct))
+ return -1;
+ rule_act->action = priv->hws_ctpool->dr_action;
+ rule_act->aso_ct.offset = ct->offset;
+ rule_act->aso_ct.direction = ct->is_original ?
+ MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
+ MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
+ return 0;
+}
+
/**
* Destroy DR actions created by action template.
*
@@ -622,6 +650,10 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
action_src, action_dst, act_idx))
return -1;
break;
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ if (flow_hw_ct_compile(dev, idx, &acts->rule_acts[action_dst]))
+ return -1;
+ break;
default:
DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
break;
@@ -1057,6 +1089,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
bool reformat_used = false;
uint16_t action_pos;
uint16_t jump_pos;
+ uint32_t ct_idx;
int err;
flow_hw_modify_field_init(&mhdr, at);
@@ -1279,6 +1312,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
}
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ action_pos = at->actions_off[actions - action_start];
+ if (masks->conf) {
+ ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
+ ((uint32_t)(uintptr_t)actions->conf);
+ if (flow_hw_ct_compile(dev, ct_idx,
+ &acts->rule_acts[action_pos]))
+ goto err;
+ } else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, action_pos)) {
+ goto err;
+ }
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -1506,6 +1553,10 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev,
&rule_act->counter.offset))
return -1;
break;
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ if (flow_hw_ct_compile(dev, idx, rule_act))
+ return -1;
+ break;
default:
DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
break;
@@ -1691,6 +1742,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
uint64_t item_flags;
struct mlx5_hw_jump_action *jump;
struct mlx5_hrxq *hrxq;
+ uint32_t ct_idx;
cnt_id_t cnt_id;
action = &actions[act_data->action_src];
@@ -1824,6 +1876,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
return ret;
job->flow->cnt_id = act_data->shared_counter.id;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
+ ((uint32_t)(uintptr_t)action->conf);
+ if (flow_hw_ct_compile(dev, ct_idx,
+ &rule_acts[act_data->action_dst]))
+ return -1;
+ break;
default:
break;
}
@@ -2348,6 +2407,8 @@ flow_hw_table_create(struct rte_eth_dev *dev,
if (nb_flows < cfg.trunk_size) {
cfg.per_core_cache = 0;
cfg.trunk_size = nb_flows;
+ } else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
+ cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
}
/* Check if we requires too many templates. */
if (nb_item_templates > max_tpl ||
@@ -2867,6 +2928,9 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_COUNT:
/* TODO: Validation logic */
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ /* TODO: Validation logic */
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -2893,6 +2957,7 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
[RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,
+ [RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
};
static int
@@ -2921,6 +2986,11 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
action_types[*curr_off] = MLX5DR_ACTION_TYP_CTR;
*curr_off = *curr_off + 1;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
+ *curr_off = *curr_off + 1;
+ break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
return -EINVAL;
@@ -3375,6 +3445,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
case RTE_FLOW_ITEM_TYPE_ICMP:
case RTE_FLOW_ITEM_TYPE_ICMP6:
+ case RTE_FLOW_ITEM_TYPE_CONNTRACK:
break;
case RTE_FLOW_ITEM_TYPE_END:
items_end = true;
@@ -4570,6 +4641,84 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)
return -EINVAL;
}
+static void
+flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_aso_ct_pool *pool)
+{
+ if (pool->dr_action)
+ mlx5dr_action_destroy(pool->dr_action);
+ if (pool->devx_obj)
+ claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
+ if (pool->cts)
+ mlx5_ipool_destroy(pool->cts);
+ mlx5_free(pool);
+}
+
+static struct mlx5_aso_ct_pool *
+flow_hw_ct_pool_create(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *port_attr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pool *pool;
+ struct mlx5_devx_obj *obj;
+ uint32_t nb_cts = rte_align32pow2(port_attr->nb_cts);
+ uint32_t log_obj_size = rte_log2_u32(nb_cts);
+ struct mlx5_indexed_pool_config cfg = {
+ .size = sizeof(struct mlx5_aso_ct_action),
+ .trunk_size = 1 << 12,
+ .per_core_cache = 1 << 13,
+ .need_lock = 1,
+ .release_mem_en = !!priv->sh->config.reclaim_mode,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_hw_ct_action",
+ };
+ int reg_id;
+ uint32_t flags;
+
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+ if (!pool) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
+ priv->sh->cdev->pdn,
+ log_obj_size);
+ if (!obj) {
+ rte_errno = ENODATA;
+ DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
+ goto err;
+ }
+ pool->devx_obj = obj;
+ reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
+ flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
+ if (priv->sh->config.dv_esw_en && priv->master)
+ flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
+ pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
+ (struct mlx5dr_devx_obj *)obj,
+ reg_id - REG_C_0, flags);
+ if (!pool->dr_action)
+ goto err;
+ /*
+ * No need for local cache if CT number is a small number. Since
+ * flow insertion rate will be very limited in that case. Here let's
+ * set the number to less than default trunk size 4K.
+ */
+ if (nb_cts <= cfg.trunk_size) {
+ cfg.per_core_cache = 0;
+ cfg.trunk_size = nb_cts;
+ } else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
+ cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
+ }
+ pool->cts = mlx5_ipool_create(&cfg);
+ if (!pool->cts)
+ goto err;
+ return pool;
+err:
+ flow_hw_ct_pool_destroy(dev, pool);
+ return NULL;
+}
+
/**
* Configure port HWS resources.
*
@@ -4755,6 +4904,11 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
if (_queue_attr)
mlx5_free(_queue_attr);
+ if (port_attr->nb_cts) {
+ priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);
+ if (!priv->hws_ctpool)
+ goto err;
+ }
if (port_attr->nb_counters) {
priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
nb_queue);
@@ -4763,6 +4917,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
return 0;
err:
+ if (priv->hws_ctpool) {
+ flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
+ priv->hws_ctpool = NULL;
+ }
flow_hw_free_vport_actions(priv);
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
if (priv->hw_drop[i])
@@ -4835,6 +4993,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
}
if (priv->hws_cpool)
mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
+ if (priv->hws_ctpool) {
+ flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
+ priv->hws_ctpool = NULL;
+ }
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
claim_zero(mlx5dr_context_close(priv->dr_ctx));
@@ -4997,6 +5159,169 @@ flow_hw_clear_flow_metadata_config(void)
mlx5_flow_hw_flow_metadata_xmeta_en = 0;
}
+static int
+flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,
+ uint32_t idx,
+ struct rte_flow_error *error)
+{
+ uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
+ uint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
+ struct rte_eth_dev *owndev = &rte_eth_devices[owner];
+ struct mlx5_priv *priv = owndev->data->dev_private;
+ struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
+ struct mlx5_aso_ct_action *ct;
+
+ ct = mlx5_ipool_get(pool->cts, ct_idx);
+ if (!ct) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid CT destruction index");
+ }
+ __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
+ __ATOMIC_RELAXED);
+ mlx5_ipool_free(pool->cts, ct_idx);
+ return 0;
+}
+
+static int
+flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t idx,
+ struct rte_flow_action_conntrack *profile,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
+ struct mlx5_aso_ct_action *ct;
+ uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
+ uint32_t ct_idx;
+
+ if (owner != PORT_ID(priv))
+ return rte_flow_error_set(error, EACCES,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Can't query CT object owned by another port");
+ ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
+ ct = mlx5_ipool_get(pool->cts, ct_idx);
+ if (!ct) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid CT query index");
+ }
+ profile->peer_port = ct->peer;
+ profile->is_original_dir = ct->is_original;
+ if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, profile))
+ return rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to query CT context");
+ return 0;
+}
+
+
+static int
+flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_modify_conntrack *action_conf,
+ uint32_t idx, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
+ struct mlx5_aso_ct_action *ct;
+ const struct rte_flow_action_conntrack *new_prf;
+ uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
+ uint32_t ct_idx;
+ int ret = 0;
+
+ if (PORT_ID(priv) != owner)
+ return rte_flow_error_set(error, EACCES,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Can't update CT object owned by another port");
+ ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
+ ct = mlx5_ipool_get(pool->cts, ct_idx);
+ if (!ct) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid CT update index");
+ }
+ new_prf = &action_conf->new_ct;
+ if (action_conf->direction)
+ ct->is_original = !!new_prf->is_original_dir;
+ if (action_conf->state) {
+ /* Only validate the profile when it needs to be updated. */
+ ret = mlx5_validate_action_ct(dev, new_prf, error);
+ if (ret)
+ return ret;
+ ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
+ if (ret)
+ return rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to send CT context update WQE");
+ if (queue != MLX5_HW_INV_QUEUE)
+ return 0;
+ /* Block until ready or a failure in synchronous mode. */
+ ret = mlx5_aso_ct_available(priv->sh, ct);
+ if (ret)
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Timeout to get the CT update");
+ }
+ return ret;
+}
+
+static struct rte_flow_action_handle *
+flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_action_conntrack *pro,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
+ struct mlx5_aso_ct_action *ct;
+ uint32_t ct_idx = 0;
+ int ret;
+
+ if (!pool) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "CT is not enabled");
+ return 0;
+ }
+ ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
+ if (!ct) {
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to allocate CT object");
+ return 0;
+ }
+ ct->offset = ct_idx - 1;
+ ct->is_original = !!pro->is_original_dir;
+ ct->peer = pro->peer_port;
+ ct->pool = pool;
+ if (mlx5_aso_ct_update_by_wqe(priv->sh, ct, pro)) {
+ mlx5_ipool_free(pool->cts, ct_idx);
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Failed to update CT");
+ return 0;
+ }
+ if (queue == MLX5_HW_INV_QUEUE) {
+ ret = mlx5_aso_ct_available(priv->sh, ct);
+ if (ret) {
+ mlx5_ipool_free(pool->cts, ct_idx);
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Timeout to get the CT update");
+ return 0;
+ }
+ }
+ return (struct rte_flow_action_handle *)(uintptr_t)
+ MLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);
+}
+
/**
* Create shared action.
*
@@ -5044,6 +5369,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
handle = (struct rte_flow_action_handle *)
(uintptr_t)cnt_id;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ handle = flow_hw_conntrack_create(dev, queue, action->conf, error);
+ break;
default:
handle = flow_dv_action_create(dev, conf, action, error);
}
@@ -5079,10 +5407,18 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
void *user_data,
struct rte_flow_error *error)
{
+ uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+
RTE_SET_USED(queue);
RTE_SET_USED(attr);
RTE_SET_USED(user_data);
- return flow_dv_action_update(dev, handle, update, error);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ return flow_hw_conntrack_update(dev, queue, update, act_idx, error);
+ default:
+ return flow_dv_action_update(dev, handle, update, error);
+ }
}
/**
@@ -5121,6 +5457,8 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
switch (type) {
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ return flow_hw_conntrack_destroy(dev, act_idx, error);
default:
return flow_dv_action_destroy(dev, handle, error);
}
@@ -5274,6 +5612,8 @@ flow_hw_action_query(struct rte_eth_dev *dev,
switch (type) {
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_hw_query_counter(dev, act_idx, data, error);
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ return flow_hw_conntrack_query(dev, act_idx, data, error);
default:
return flow_dv_action_query(dev, handle, data, error);
}