@@ -525,20 +525,22 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * flow.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] flow
- * Pointer to flow structure.
+ * @param[in] dev_flow
+ * Pointer to device flow structure.
*/
static void
-flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -556,7 +558,8 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
@@ -568,21 +571,39 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
}
/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_set(dev, dev_flow);
+}
+
+/**
* Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
+ * device flow if no other flow uses it with the same kind of request.
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to the flow.
+ * @param[in] dev_flow
+ * Pointer to the device flow.
*/
static void
-flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
assert(dev->data->dev_started);
@@ -601,7 +622,8 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
@@ -613,6 +635,24 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
}
/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_trim(dev, dev_flow);
+}
+
+/**
* Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
*
* @param dev
@@ -2024,6 +2064,11 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
if (!dev_flow)
goto error;
dev_flow->flow = flow;
+ dev_flow->layers = item_flags;
+ /* Store actions once as expanded flows have same actions. */
+ if (i == 0)
+ flow->actions = action_flags;
+ assert(flow->actions == action_flags);
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
ret = flow_drv_translate(dev, dev_flow, attr,
buf->entry[i].pattern,
@@ -215,7 +215,8 @@ struct mlx5_flow_verbs {
struct mlx5_flow {
LIST_ENTRY(mlx5_flow) next;
struct rte_flow *flow; /**< Pointer to the main flow. */
- uint32_t layers; /**< Bit-fields that holds the detected layers. */
+ uint32_t layers;
+ /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_dv dv;
@@ -240,15 +241,14 @@ struct mlx5_flow_counter {
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
- uint32_t layers;
- /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
struct mlx5_flow_counter *counter; /**< Holds flow counter. */
struct rte_flow_action_rss rss;/**< RSS context. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
/**< Device flows that are part of the flow. */
- uint32_t actions; /**< Bit-fields which mark all detected actions. */
+ uint32_t actions;
+ /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -177,6 +177,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -285,7 +286,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "too many actions");
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
@@ -1253,13 +1253,15 @@ flow_dv_translate(struct rte_eth_dev *dev,
},
};
void *match_value = dev_flow->dv.value.buf;
- uint8_t inner = 0;
+ int tunnel = 0;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
flow_dv_create_item(&matcher, match_value, items, dev_flow,
- inner);
+ tunnel);
+ }
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
if (priority == MLX5_FLOW_PRIO_RSVD)
@@ -1324,7 +1326,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
- !!(flow->layers &
+ !!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
@@ -343,7 +343,7 @@ flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
struct ibv_flow_spec_ipv6 ipv6 = {
.type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
@@ -467,7 +467,7 @@ flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp tcp = {
.type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
@@ -999,6 +999,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int ret = 0;
+
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1110,7 +1112,6 @@ flow_verbs_validate(struct rte_eth_dev *dev,
}
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
@@ -1252,9 +1253,10 @@ flow_verbs_get_items_and_size(const struct rte_flow_item items[],
{
int size = 0;
uint64_t detected_items = 0;
- const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL);
+
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1450,7 +1452,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
"action not supported");
}
}
- dev_flow->flow->actions |= action_flags;
+ /* Device flow should have action flags by flow_drv_prepare(). */
+ assert(dev_flow->flow->actions == action_flags);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
@@ -1613,7 +1616,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
verbs->hash_fields,
(*flow->queue),
flow->rss.queue_num,
- !!(flow->layers &
+ !!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set