@@ -722,8 +722,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = !!(dev_handle->act_flags &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
@@ -800,8 +799,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = !!(dev_handle->act_flags &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
@@ -2718,7 +2716,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
* help to do the optimization work for source code.
* If no decap actions, use the layers directly.
*/
- if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+ if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
return dev_flow->handle->layers;
/* Convert L3 layers with decap action. */
if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
@@ -339,6 +339,16 @@ enum mlx5_flow_drv_type {
MLX5_FLOW_TYPE_MAX,
};
+/* Fate action type. */
+enum mlx5_flow_fate_type {
+ MLX5_FLOW_FATE_NONE, /* Egress flow. */
+ MLX5_FLOW_FATE_QUEUE,
+ MLX5_FLOW_FATE_JUMP,
+ MLX5_FLOW_FATE_PORT_ID,
+ MLX5_FLOW_FATE_DROP,
+ MLX5_FLOW_FATE_MAX,
+};
+
/* Matcher PRM representation */
struct mlx5_flow_dv_match_params {
size_t size;
@@ -502,14 +512,14 @@ struct mlx5_flow_handle {
/**< Index to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
- uint64_t act_flags;
- /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
void *ib_flow; /**< Verbs flow pointer. */
struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
union {
uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
uint32_t mtr_flow_id; /**< Unique meter match flow id. */
};
+ uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t fate_action:3; /**< Fate action type. */
union {
uint32_t hrxq; /**< Hash Rx queue object index. */
uint32_t jump; /**< Index to the jump action resource. */
@@ -624,6 +634,8 @@ struct mlx5_flow_verbs_workspace {
struct mlx5_flow {
struct rte_flow *flow; /**< Pointer to the main flow. */
uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+ uint64_t act_flags;
+ /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
bool external; /**< true if the flow is created external to PMD. */
uint8_t ingress; /**< 1 if the flow is ingress. */
union {
@@ -7409,9 +7409,11 @@ struct field_modify_info modify_tcp[] = {
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
+ dev_flow->handle->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
@@ -7440,6 +7442,7 @@ struct field_modify_info modify_tcp[] = {
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
+ dev_flow->handle->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
@@ -7484,6 +7487,7 @@ struct field_modify_info modify_tcp[] = {
break;
case RTE_FLOW_ACTION_TYPE_DROP:
action_flags |= MLX5_FLOW_ACTION_DROP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
MLX5_ASSERT(flow->rss.queue);
@@ -7491,6 +7495,7 @@ struct field_modify_info modify_tcp[] = {
flow->rss.queue_num = 1;
(*flow->rss.queue)[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
MLX5_ASSERT(flow->rss.queue);
@@ -7507,6 +7512,7 @@ struct field_modify_info modify_tcp[] = {
* when expanding items for RSS.
*/
action_flags |= MLX5_FLOW_ACTION_RSS;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
@@ -7664,6 +7670,7 @@ struct field_modify_info modify_tcp[] = {
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
@@ -7806,7 +7813,7 @@ struct field_modify_info modify_tcp[] = {
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- handle->act_flags = action_flags;
+ dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
@@ -8062,7 +8069,7 @@ struct field_modify_info modify_tcp[] = {
dh = dev_flow->handle;
dv_h = &dh->dvh;
n = dv->actions_n;
- if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
@@ -8085,8 +8092,7 @@ struct field_modify_info modify_tcp[] = {
dh->hrxq = UINT32_MAX;
dv->actions[n++] = drop_hrxq->action;
}
- } else if (dh->act_flags &
- (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
+ } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
@@ -8147,12 +8153,10 @@ struct field_modify_info modify_tcp[] = {
handle_idx, dh, next) {
/* hrxq is union, don't clear it if the flag is not set. */
if (dh->hrxq) {
- if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
dh->hrxq = 0;
- } else if (dh->act_flags &
- (MLX5_FLOW_ACTION_QUEUE |
- MLX5_FLOW_ACTION_RSS)) {
+ } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, dh->hrxq);
dh->hrxq = 0;
}
@@ -8423,12 +8427,10 @@ struct field_modify_info modify_tcp[] = {
}
/* hrxq is union, don't touch it only the flag is set. */
if (dh->hrxq) {
- if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
dh->hrxq = 0;
- } else if (dh->act_flags &
- (MLX5_FLOW_ACTION_QUEUE |
- MLX5_FLOW_ACTION_RSS)) {
+ } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, dh->hrxq);
dh->hrxq = 0;
}
@@ -8479,9 +8481,9 @@ struct field_modify_info modify_tcp[] = {
flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
- if (dev_handle->act_flags & MLX5_FLOW_ACTION_JUMP)
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_JUMP)
flow_dv_jump_tbl_resource_release(dev, dev_handle);
- if (dev_handle->act_flags & MLX5_FLOW_ACTION_PORT_ID)
+ if (dev_handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
flow_dv_port_id_action_resource_release(dev,
dev_handle);
if (dev_handle->dvh.push_vlan_res)
@@ -1589,22 +1589,27 @@
case RTE_FLOW_ACTION_TYPE_FLAG:
flow_verbs_translate_action_flag(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_FLAG;
+ dev_flow->handle->mark = 1;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
flow_verbs_translate_action_mark(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_MARK;
+ dev_flow->handle->mark = 1;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
flow_verbs_translate_action_drop(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_DROP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
flow_verbs_translate_action_queue(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
flow_verbs_translate_action_rss(dev_flow, actions);
action_flags |= MLX5_FLOW_ACTION_RSS;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_verbs_translate_action_count(dev_flow,
@@ -1621,7 +1626,7 @@
"action not supported");
}
}
- dev_flow->handle->act_flags = action_flags;
+ dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -1756,12 +1761,11 @@
}
/* hrxq is union, don't touch it only the flag is set. */
if (handle->hrxq) {
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
handle->hrxq = 0;
- } else if (handle->act_flags &
- (MLX5_FLOW_ACTION_QUEUE |
- MLX5_FLOW_ACTION_RSS)) {
+ } else if (handle->fate_action ==
+ MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, handle->hrxq);
handle->hrxq = 0;
}
@@ -1833,7 +1837,7 @@
for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
handle = dev_flow->handle;
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
hrxq = mlx5_hrxq_drop_new(dev);
if (!hrxq) {
rte_flow_error_set
@@ -1898,12 +1902,11 @@
dev_handles, handle, next) {
/* hrxq is union, don't touch it only the flag is set. */
if (handle->hrxq) {
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
mlx5_hrxq_drop_release(dev);
handle->hrxq = 0;
- } else if (handle->act_flags &
- (MLX5_FLOW_ACTION_QUEUE |
- MLX5_FLOW_ACTION_RSS)) {
+ } else if (handle->fate_action ==
+ MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, handle->hrxq);
handle->hrxq = 0;
}