@@ -751,6 +751,7 @@ enum mlx5_modification_field {
MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
MLX5_MODI_GTP_TEID = 0x6E,
MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
};
/* Total number of metadata reg_c's. */
@@ -1539,6 +1539,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_hrxq_clone_free_cb);
if (!priv->hrxqs)
goto error;
+ mlx5_set_metadata_mask(eth_dev);
+ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ !priv->sh->dv_regc0_mask) {
+ DRV_LOG(ERR, "metadata mode %u is not supported "
+ "(no metadata reg_c[0] is available)",
+ sh->config.dv_xmeta_en);
+ err = ENOTSUP;
+ goto error;
+ }
rte_rwlock_init(&priv->ind_tbls_lock);
if (priv->vport_meta_mask)
flow_hw_set_port_info(eth_dev);
@@ -1560,15 +1569,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = -err;
goto error;
}
- mlx5_set_metadata_mask(eth_dev);
- if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- !priv->sh->dv_regc0_mask) {
- DRV_LOG(ERR, "metadata mode %u is not supported "
- "(no metadata reg_c[0] is available)",
- sh->config.dv_xmeta_en);
- err = ENOTSUP;
- goto error;
- }
/* Query availability of metadata reg_c's. */
if (!priv->sh->metadata_regc_check_flag) {
err = mlx5_flow_discover_mreg_c(eth_dev);
@@ -343,6 +343,7 @@ struct mlx5_hw_q_job {
struct rte_flow_hw *flow; /* Flow attached to the job. */
void *user_data; /* Job user data. */
uint8_t *encap_data; /* Encap data. */
+ struct mlx5_modification_cmd *mhdr_cmd;
};
/* HW steering job descriptor LIFO pool. */
@@ -1008,6 +1008,51 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
return items[0].spec;
}
+/**
+ * Fetch 1, 2, 3 or 4 byte field from the byte array
+ * and return as unsigned integer in host-endian format.
+ *
+ * @param[in] data
+ * Pointer to data array.
+ * @param[in] size
+ * Size of field to extract.
+ *
+ * @return
+ * converted field in host endian format.
+ */
+static inline uint32_t
+flow_dv_fetch_field(const uint8_t *data, uint32_t size)
+{
+ uint32_t ret;
+
+ switch (size) {
+ case 1:
+ ret = *data;
+ break;
+ case 2:
+ ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
+ break;
+ case 3:
+ ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
+ ret = (ret << 8) | *(data + sizeof(uint16_t));
+ break;
+ case 4:
+ ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
+ break;
+ default:
+ MLX5_ASSERT(false);
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+struct field_modify_info {
+ uint32_t size; /* Size of field in protocol header, in bytes. */
+ uint32_t offset; /* Offset of field in protocol header, in bytes. */
+ enum mlx5_modification_field id;
+};
+
/* HW steering flow attributes. */
struct mlx5_flow_attr {
uint32_t port_id; /* Port index. */
@@ -1068,6 +1113,29 @@ struct mlx5_action_construct_data {
/* encap data len. */
uint16_t len;
} encap;
+ struct {
+ /* Modify header action offset in pattern. */
+ uint16_t mhdr_cmds_off;
+ /* Offset in pattern after modify header actions. */
+ uint16_t mhdr_cmds_end;
+ /*
+ * True if this action is masked and does not need to
+ * be generated.
+ */
+ bool shared;
+ /*
+ * Modified field definitions in dst field (SET, ADD)
+ * or src field (COPY).
+ */
+ struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
+ /* Modified field definitions in dst field (COPY). */
+ struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
+ /*
+ * Masks applied to field values to generate
+ * PRM actions.
+ */
+ uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
+ } modify_header;
struct {
uint64_t types; /* RSS hash types. */
uint32_t level; /* RSS level. */
@@ -1093,6 +1161,7 @@ struct rte_flow_actions_template {
struct rte_flow_actions_template_attr attr;
struct rte_flow_action *actions; /* Cached flow actions. */
struct rte_flow_action *masks; /* Cached action masks.*/
+ uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
};
@@ -1113,6 +1182,22 @@ struct mlx5_hw_encap_decap_action {
uint8_t data[]; /* Action data. */
};
+#define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
+
+/* Modify field action struct. */
+struct mlx5_hw_modify_header_action {
+ /* Reference to DR action */
+ struct mlx5dr_action *action;
+ /* Modify header action position in action rule table. */
+ uint16_t pos;
+ /* Is MODIFY_HEADER action shared across flows in table. */
+ bool shared;
+ /* Amount of modification commands stored in the precompiled buffer. */
+ uint32_t mhdr_cmds_num;
+ /* Precompiled modification commands. */
+ struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
+};
+
/* The maximum actions support in the flow. */
#define MLX5_HW_MAX_ACTS 16
@@ -1122,6 +1207,7 @@ struct mlx5_hw_actions {
LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
struct mlx5_hw_jump_action *jump; /* Jump action. */
struct mlx5_hrxq *tir; /* TIR action. */
+ struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
/* Encap/Decap action. */
struct mlx5_hw_encap_decap_action *encap_decap;
uint16_t encap_decap_pos; /* Encap/Decap action position. */
@@ -2200,6 +2286,16 @@ int flow_dv_action_query(struct rte_eth_dev *dev,
size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error);
+void mlx5_flow_field_id_to_modify_info
+ (const struct rte_flow_action_modify_data *data,
+ struct field_modify_info *info, uint32_t *mask,
+ uint32_t width, struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr, struct rte_flow_error *error);
+int flow_dv_convert_modify_action(struct rte_flow_item *item,
+ struct field_modify_info *field,
+ struct field_modify_info *dcopy,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ uint32_t type, struct rte_flow_error *error);
#define MLX5_PF_VPORT_ID 0
#define MLX5_ECPF_VPORT_ID 0xFFFE
@@ -241,12 +241,6 @@ rte_col_2_mlx5_col(enum rte_color rcol)
return MLX5_FLOW_COLOR_UNDEFINED;
}
-struct field_modify_info {
- uint32_t size; /* Size of field in protocol header, in bytes. */
- uint32_t offset; /* Offset of field in protocol header, in bytes. */
- enum mlx5_modification_field id;
-};
-
struct field_modify_info modify_eth[] = {
{4, 0, MLX5_MODI_OUT_DMAC_47_16},
{2, 4, MLX5_MODI_OUT_DMAC_15_0},
@@ -379,45 +373,6 @@ mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
}
}
-/**
- * Fetch 1, 2, 3 or 4 byte field from the byte array
- * and return as unsigned integer in host-endian format.
- *
- * @param[in] data
- * Pointer to data array.
- * @param[in] size
- * Size of field to extract.
- *
- * @return
- * converted field in host endian format.
- */
-static inline uint32_t
-flow_dv_fetch_field(const uint8_t *data, uint32_t size)
-{
- uint32_t ret;
-
- switch (size) {
- case 1:
- ret = *data;
- break;
- case 2:
- ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
- break;
- case 3:
- ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
- ret = (ret << 8) | *(data + sizeof(uint16_t));
- break;
- case 4:
- ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
- break;
- default:
- MLX5_ASSERT(false);
- ret = 0;
- break;
- }
- return ret;
-}
-
/**
* Convert modify-header action to DV specification.
*
@@ -446,7 +401,7 @@ flow_dv_fetch_field(const uint8_t *data, uint32_t size)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
flow_dv_convert_modify_action(struct rte_flow_item *item,
struct field_modify_info *field,
struct field_modify_info *dcopy,
@@ -1464,7 +1419,32 @@ mlx5_flow_item_field_width(struct rte_eth_dev *dev,
return 0;
}
-static void
+static __rte_always_inline uint8_t
+flow_modify_info_mask_8(uint32_t length, uint32_t off)
+{
+ return (0xffu >> (8 - length)) << off;
+}
+
+static __rte_always_inline uint16_t
+flow_modify_info_mask_16(uint32_t length, uint32_t off)
+{
+ return rte_cpu_to_be_16((0xffffu >> (16 - length)) << off);
+}
+
+static __rte_always_inline uint32_t
+flow_modify_info_mask_32(uint32_t length, uint32_t off)
+{
+ return rte_cpu_to_be_32((0xffffffffu >> (32 - length)) << off);
+}
+
+static __rte_always_inline uint32_t
+flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mask)
+{
+ uint32_t mask = (0xffffffffu >> (32 - length)) << off;
+ return rte_cpu_to_be_32(mask & post_mask);
+}
+
+void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
struct field_modify_info *info, uint32_t *mask,
@@ -1473,323 +1453,340 @@ mlx5_flow_field_id_to_modify_info
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t idx = 0;
- uint32_t off = 0;
-
- switch (data->field) {
+ uint32_t off_be = 0;
+ uint32_t length = 0;
+ switch ((int)data->field) {
case RTE_FLOW_FIELD_START:
/* not supported yet */
MLX5_ASSERT(false);
break;
case RTE_FLOW_FIELD_MAC_DST:
- off = data->offset > 16 ? data->offset - 16 : 0;
- if (mask) {
- if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 4,
- MLX5_MODI_OUT_DMAC_15_0};
- if (width < 16) {
- mask[1] = rte_cpu_to_be_16(0xffff >>
- (16 - width));
- width = 0;
- } else {
- mask[1] = RTE_BE16(0xffff);
- width -= 16;
- }
- if (!width)
- break;
- ++idx;
- }
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DMAC_47_16};
- mask[0] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ MLX5_ASSERT(data->offset + width <= 48);
+ off_be = 48 - (data->offset + width);
+ if (off_be < 16) {
+ info[idx] = (struct field_modify_info){2, 4,
+ MLX5_MODI_OUT_DMAC_15_0};
+ length = off_be + width <= 16 ? width : 16 - off_be;
+ if (mask)
+ mask[1] = flow_modify_info_mask_16(length,
+ off_be);
+ else
+ info[idx].offset = off_be;
+ width -= length;
+ if (!width)
+ break;
+ off_be = 0;
+ idx++;
} else {
- if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 0,
- MLX5_MODI_OUT_DMAC_15_0};
- info[idx] = (struct field_modify_info){4, off,
- MLX5_MODI_OUT_DMAC_47_16};
+ off_be -= 16;
}
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_DMAC_47_16};
+ if (mask)
+ mask[0] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_MAC_SRC:
- off = data->offset > 16 ? data->offset - 16 : 0;
- if (mask) {
- if (data->offset < 16) {
- info[idx] = (struct field_modify_info){2, 4,
- MLX5_MODI_OUT_SMAC_15_0};
- if (width < 16) {
- mask[1] = rte_cpu_to_be_16(0xffff >>
- (16 - width));
- width = 0;
- } else {
- mask[1] = RTE_BE16(0xffff);
- width -= 16;
- }
- if (!width)
- break;
- ++idx;
- }
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SMAC_47_16};
- mask[0] = rte_cpu_to_be_32((0xffffffff >>
- (32 - width)) << off);
+ MLX5_ASSERT(data->offset + width <= 48);
+ off_be = 48 - (data->offset + width);
+ if (off_be < 16) {
+ info[idx] = (struct field_modify_info){2, 4,
+ MLX5_MODI_OUT_SMAC_15_0};
+ length = off_be + width <= 16 ? width : 16 - off_be;
+ if (mask)
+ mask[1] = flow_modify_info_mask_16(length,
+ off_be);
+ else
+ info[idx].offset = off_be;
+ width -= length;
+ if (!width)
+ break;
+ off_be = 0;
+ idx++;
} else {
- if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 0,
- MLX5_MODI_OUT_SMAC_15_0};
- info[idx] = (struct field_modify_info){4, off,
- MLX5_MODI_OUT_SMAC_47_16};
+ off_be -= 16;
}
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_OUT_SMAC_47_16};
+ if (mask)
+ mask[0] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_VLAN_TYPE:
/* not supported yet */
break;
case RTE_FLOW_FIELD_VLAN_ID:
+ MLX5_ASSERT(data->offset + width <= 12);
+ off_be = 12 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_FIRST_VID};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_MAC_TYPE:
+ MLX5_ASSERT(data->offset + width <= 16);
+ off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_ETHERTYPE};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_DSCP:
+ MLX5_ASSERT(data->offset + width <= 6);
+ off_be = 6 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = 0x3f >> (6 - width);
+ mask[idx] = flow_modify_info_mask_8(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_TTL:
+ MLX5_ASSERT(data->offset + width <= 8);
+ off_be = 8 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV4_TTL};
if (mask)
- mask[idx] = 0xff >> (8 - width);
+ mask[idx] = flow_modify_info_mask_8(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_SRC:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV4};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV4_DST:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV4};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_DSCP:
+ MLX5_ASSERT(data->offset + width <= 6);
+ off_be = 6 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_DSCP};
if (mask)
- mask[idx] = 0x3f >> (6 - width);
+ mask[idx] = flow_modify_info_mask_8(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
+ MLX5_ASSERT(data->offset + width <= 8);
+ off_be = 8 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IPV6_HOPLIMIT};
if (mask)
- mask[idx] = 0xff >> (8 - width);
+ mask[idx] = flow_modify_info_mask_8(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
- case RTE_FLOW_FIELD_IPV6_SRC:
- if (mask) {
- if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 12,
- MLX5_MODI_OUT_SIPV6_31_0};
- if (width < 32) {
- mask[3] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[3] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
- }
- if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4, 8,
- MLX5_MODI_OUT_SIPV6_63_32};
- if (width < 32) {
- mask[2] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[2] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
- }
- if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4, 4,
- MLX5_MODI_OUT_SIPV6_95_64};
- if (width < 32) {
- mask[1] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[1] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
+ case RTE_FLOW_FIELD_IPV6_SRC: {
+ /*
+ * Fields corresponding to IPv6 source address bytes
+ * arranged according to network byte ordering.
+ */
+ struct field_modify_info fields[] = {
+ { 4, 0, MLX5_MODI_OUT_SIPV6_127_96 },
+ { 4, 4, MLX5_MODI_OUT_SIPV6_95_64 },
+ { 4, 8, MLX5_MODI_OUT_SIPV6_63_32 },
+ { 4, 12, MLX5_MODI_OUT_SIPV6_31_0 },
+ };
+ /* First mask to be modified is the mask of 4th address byte. */
+ uint32_t midx = 3;
+
+ MLX5_ASSERT(data->offset + width <= 128);
+ off_be = 128 - (data->offset + width);
+ while (width > 0 && midx > 0) {
+ if (off_be < 32) {
+ info[idx] = fields[midx];
+ length = off_be + width <= 32 ?
+ width : 32 - off_be;
+ if (mask)
+ mask[midx] = flow_modify_info_mask_32
+ (length, off_be);
+ else
+ info[idx].offset = off_be;
+ width -= length;
+ off_be = 0;
+ idx++;
+ } else {
+ off_be -= 32;
}
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_127_96};
- mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
- } else {
- if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_31_0};
- if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_63_32};
- if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_95_64};
- if (data->offset < 128)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_SIPV6_127_96};
+ midx--;
}
+ if (!width)
+ break;
+ info[idx] = fields[midx];
+ if (mask)
+ mask[midx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
- case RTE_FLOW_FIELD_IPV6_DST:
- if (mask) {
- if (data->offset < 32) {
- info[idx] = (struct field_modify_info){4, 12,
- MLX5_MODI_OUT_DIPV6_31_0};
- if (width < 32) {
- mask[3] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[3] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
- }
- if (data->offset < 64) {
- info[idx] = (struct field_modify_info){4, 8,
- MLX5_MODI_OUT_DIPV6_63_32};
- if (width < 32) {
- mask[2] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[2] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
- }
- if (data->offset < 96) {
- info[idx] = (struct field_modify_info){4, 4,
- MLX5_MODI_OUT_DIPV6_95_64};
- if (width < 32) {
- mask[1] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
- width = 0;
- } else {
- mask[1] = RTE_BE32(0xffffffff);
- width -= 32;
- }
- if (!width)
- break;
- ++idx;
+ }
+ case RTE_FLOW_FIELD_IPV6_DST: {
+ /*
+ * Fields corresponding to IPv6 destination address bytes
+ * arranged according to network byte ordering.
+ */
+ struct field_modify_info fields[] = {
+ { 4, 0, MLX5_MODI_OUT_DIPV6_127_96 },
+ { 4, 4, MLX5_MODI_OUT_DIPV6_95_64 },
+ { 4, 8, MLX5_MODI_OUT_DIPV6_63_32 },
+ { 4, 12, MLX5_MODI_OUT_DIPV6_31_0 },
+ };
+ /* First mask to be modified is the mask of 4th address byte. */
+ uint32_t midx = 3;
+
+ MLX5_ASSERT(data->offset + width <= 128);
+ off_be = 128 - (data->offset + width);
+ while (width > 0 && midx > 0) {
+ if (off_be < 32) {
+ info[idx] = fields[midx];
+ length = off_be + width <= 32 ?
+ width : 32 - off_be;
+ if (mask)
+ mask[midx] = flow_modify_info_mask_32
+ (length, off_be);
+ else
+ info[idx].offset = off_be;
+ width -= length;
+ off_be = 0;
+ idx++;
+ } else {
+ off_be -= 32;
}
- info[idx] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_127_96};
- mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
- } else {
- if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_31_0};
- if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_63_32};
- if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_95_64};
- if (data->offset < 128)
- info[idx++] = (struct field_modify_info){4, 0,
- MLX5_MODI_OUT_DIPV6_127_96};
+ midx--;
}
+ if (!width)
+ break;
+ info[idx] = fields[midx];
+ if (mask)
+ mask[midx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
+ }
case RTE_FLOW_FIELD_TCP_PORT_SRC:
+ MLX5_ASSERT(data->offset + width <= 16);
+ off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_PORT_DST:
+ MLX5_ASSERT(data->offset + width <= 16);
+ off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_SEQ_NUM:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_TCP_SEQ_NUM};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_ACK_NUM:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_TCP_ACK_NUM};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TCP_FLAGS:
+ MLX5_ASSERT(data->offset + width <= 9);
+ off_be = 9 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_TCP_FLAGS};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_UDP_PORT_SRC:
+ MLX5_ASSERT(data->offset + width <= 16);
+ off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_SPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_UDP_PORT_DST:
+ MLX5_ASSERT(data->offset + width <= 16);
+ off_be = 16 - (data->offset + width);
info[idx] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_UDP_DPORT};
if (mask)
- mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
+ mask[idx] = flow_modify_info_mask_16(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_VXLAN_VNI:
- /* not supported yet */
+ MLX5_ASSERT(data->offset + width <= 24);
+ /* VNI is on bits 31-8 of TUNNEL_HDR_DW_1. */
+ off_be = 24 - (data->offset + width) + 8;
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_TUNNEL_HDR_DW_1};
+ if (mask)
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_GENEVE_VNI:
/* not supported yet*/
break;
case RTE_FLOW_FIELD_GTP_TEID:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
info[idx] = (struct field_modify_info){4, 0,
MLX5_MODI_GTP_TEID};
if (mask)
- mask[idx] = rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_TAG:
{
- int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
- data->level, error);
+ MLX5_ASSERT(data->offset + width <= 32);
+ int reg;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ reg = REG_C_1;
+ else
+ reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
+ data->level, error);
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
@@ -1797,15 +1794,18 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
- mask[idx] =
- rte_cpu_to_be_32(0xffffffff >>
- (32 - width));
+ mask[idx] = flow_modify_info_mask_32
+ (width, data->offset);
+ else
+ info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_MARK:
{
uint32_t mark_mask = priv->sh->dv_mark_mask;
uint32_t mark_count = __builtin_popcount(mark_mask);
+ RTE_SET_USED(mark_count);
+ MLX5_ASSERT(data->offset + width <= mark_count);
int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
0, error);
if (reg < 0)
@@ -1815,14 +1815,18 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
- mask[idx] = rte_cpu_to_be_32((mark_mask >>
- (mark_count - width)) & mark_mask);
+ mask[idx] = flow_modify_info_mask_32_masked
+ (width, data->offset, mark_mask);
+ else
+ info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_META:
{
uint32_t meta_mask = priv->sh->dv_meta_mask;
uint32_t meta_count = __builtin_popcount(meta_mask);
+ RTE_SET_USED(meta_count);
+ MLX5_ASSERT(data->offset + width <= meta_count);
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return;
@@ -1831,16 +1835,22 @@ mlx5_flow_field_id_to_modify_info
info[idx] = (struct field_modify_info){4, 0,
reg_to_field[reg]};
if (mask)
- mask[idx] = rte_cpu_to_be_32((meta_mask >>
- (meta_count - width)) & meta_mask);
+ mask[idx] = flow_modify_info_mask_32_masked
+ (width, data->offset, meta_mask);
+ else
+ info[idx].offset = data->offset;
}
break;
case RTE_FLOW_FIELD_IPV4_ECN:
case RTE_FLOW_FIELD_IPV6_ECN:
+ MLX5_ASSERT(data->offset + width <= 2);
+ off_be = 2 - (data->offset + width);
info[idx] = (struct field_modify_info){1, 0,
MLX5_MODI_OUT_IP_ECN};
if (mask)
- mask[idx] = 0x3 >> (2 - width);
+ mask[idx] = flow_modify_info_mask_8(width, off_be);
+ else
+ info[idx].offset = off_be;
break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
@@ -319,6 +319,11 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
acts->jump = NULL;
}
+ if (acts->mhdr) {
+ if (acts->mhdr->action)
+ mlx5dr_action_destroy(acts->mhdr->action);
+ mlx5_free(acts->mhdr);
+ }
}
/**
@@ -425,6 +430,37 @@ __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+__flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src,
+ uint16_t action_dst,
+ uint16_t mhdr_cmds_off,
+ uint16_t mhdr_cmds_end,
+ bool shared,
+ struct field_modify_info *field,
+ struct field_modify_info *dcopy,
+ uint32_t *mask)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
+ act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
+ act_data->modify_header.shared = shared;
+ rte_memcpy(act_data->modify_header.field, field,
+ sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
+ rte_memcpy(act_data->modify_header.dcopy, dcopy,
+ sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
+ rte_memcpy(act_data->modify_header.mask, mask,
+ sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
+
/**
* Append shared RSS action to the dynamic action list.
*
@@ -515,6 +551,257 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline bool
+flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
+ const struct rte_flow_action *mask)
+{
+ const struct rte_flow_action_modify_field *v = action->conf;
+ const struct rte_flow_action_modify_field *m = mask->conf;
+
+ if (v->src.field == RTE_FLOW_FIELD_VALUE) {
+ uint32_t j;
+
+ if (m == NULL)
+ return false;
+ for (j = 0; j < RTE_DIM(m->src.value); ++j) {
+ /*
+ * Immediate value is considered to be masked
+ * (and thus shared by all flow rules), if mask
+ * is non-zero. Partial mask over immediate value
+ * is not allowed.
+ */
+ if (m->src.value[j])
+ return true;
+ }
+ return false;
+ }
+ if (v->src.field == RTE_FLOW_FIELD_POINTER)
+ return m->src.pvalue != NULL;
+ /*
+ * Source field types other than VALUE and
+ * POINTER are always shared.
+ */
+ return true;
+}
+
+static __rte_always_inline bool
+flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
+ const struct mlx5_modification_cmd *cmd)
+{
+ struct mlx5_modification_cmd last_cmd = { { 0 } };
+ struct mlx5_modification_cmd new_cmd = { { 0 } };
+ const uint32_t cmds_num = mhdr->mhdr_cmds_num;
+ unsigned int last_type;
+ bool should_insert = false;
+
+ if (cmds_num == 0)
+ return false;
+ last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
+ last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
+ last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
+ last_type = last_cmd.action_type;
+ new_cmd = *cmd;
+ new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
+ new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
+ switch (new_cmd.action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ if (last_type == MLX5_MODIFICATION_TYPE_SET ||
+ last_type == MLX5_MODIFICATION_TYPE_ADD)
+ should_insert = new_cmd.field == last_cmd.field;
+ else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
+ should_insert = new_cmd.field == last_cmd.dst_field;
+ else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
+ should_insert = false;
+ else
+ MLX5_ASSERT(false); /* Other types are not supported. */
+ break;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ if (last_type == MLX5_MODIFICATION_TYPE_SET ||
+ last_type == MLX5_MODIFICATION_TYPE_ADD)
+ should_insert = (new_cmd.field == last_cmd.field ||
+ new_cmd.dst_field == last_cmd.field);
+ else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
+ should_insert = (new_cmd.field == last_cmd.dst_field ||
+ new_cmd.dst_field == last_cmd.dst_field);
+ else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
+ should_insert = false;
+ else
+ MLX5_ASSERT(false); /* Other types are not supported. */
+ break;
+ default:
+ /* Other action types should be rejected on AT validation. */
+ MLX5_ASSERT(false);
+ break;
+ }
+ return should_insert;
+}
+
+static __rte_always_inline int
+flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
+{
+ struct mlx5_modification_cmd *nop;
+ uint32_t num = mhdr->mhdr_cmds_num;
+
+ if (num + 1 >= MLX5_MHDR_MAX_CMD)
+ return -ENOMEM;
+ nop = mhdr->mhdr_cmds + num;
+ nop->data0 = 0;
+ nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
+ nop->data0 = rte_cpu_to_be_32(nop->data0);
+ nop->data1 = 0;
+ mhdr->mhdr_cmds_num = num + 1;
+ return 0;
+}
+
+static __rte_always_inline int
+flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
+ struct mlx5_modification_cmd *cmd)
+{
+ uint32_t num = mhdr->mhdr_cmds_num;
+
+ if (num + 1 >= MLX5_MHDR_MAX_CMD)
+ return -ENOMEM;
+ mhdr->mhdr_cmds[num] = *cmd;
+ mhdr->mhdr_cmds_num = num + 1;
+ return 0;
+}
+
+static __rte_always_inline int
+flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
+ struct mlx5_flow_dv_modify_hdr_resource *resource)
+{
+ uint32_t idx;
+ int ret;
+
+ for (idx = 0; idx < resource->actions_num; ++idx) {
+ struct mlx5_modification_cmd *src = &resource->actions[idx];
+
+ if (flow_hw_should_insert_nop(mhdr, src)) {
+ ret = flow_hw_mhdr_cmd_nop_append(mhdr);
+ if (ret)
+ return ret;
+ }
+ ret = flow_hw_mhdr_cmd_append(mhdr, src);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static __rte_always_inline void
+flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
+ struct rte_flow_actions_template *at)
+{
+ memset(mhdr, 0, sizeof(*mhdr));
+ /* Modify header action without any commands is shared by default. */
+ mhdr->shared = true;
+ mhdr->pos = at->mhdr_off;
+}
+
+static __rte_always_inline int
+flow_hw_modify_field_compile(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *action_start, /* Start of AT actions. */
+ const struct rte_flow_action *action, /* Current action from AT. */
+ const struct rte_flow_action *action_mask, /* Current mask from AT. */
+ struct mlx5_hw_actions *acts,
+ struct mlx5_hw_modify_header_action *mhdr,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_modify_field *conf = action->conf;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource resource;
+ uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
+ } dummy;
+ struct mlx5_flow_dv_modify_hdr_resource *resource;
+ struct rte_flow_item item = {
+ .spec = NULL,
+ .mask = NULL
+ };
+ struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, MLX5_MODI_OUT_NONE} };
+ struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
+ {0, 0, MLX5_MODI_OUT_NONE} };
+ uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
+ uint32_t type, value = 0;
+ uint16_t cmds_start, cmds_end;
+ bool shared;
+ int ret;
+
+ /*
+ * Modify header action is shared if previous modify_field actions
+ * are shared and currently compiled action is shared.
+ */
+ shared = flow_hw_action_modify_field_is_shared(action, action_mask);
+ mhdr->shared &= shared;
+ if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
+ conf->src.field == RTE_FLOW_FIELD_VALUE) {
+ type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
+ MLX5_MODIFICATION_TYPE_ADD;
+ /* For SET/ADD fill the destination field (field) first. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
+ conf->width, dev,
+ attr, error);
+ item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
+ (void *)(uintptr_t)conf->src.pvalue :
+ (void *)(uintptr_t)&conf->src.value;
+ if (conf->dst.field == RTE_FLOW_FIELD_META ||
+ conf->dst.field == RTE_FLOW_FIELD_TAG) {
+ value = *(const unaligned_uint32_t *)item.spec;
+ value = rte_cpu_to_be_32(value);
+ item.spec = &value;
+ }
+ } else {
+ type = MLX5_MODIFICATION_TYPE_COPY;
+ /* For COPY fill the destination field (dcopy) without mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
+ conf->width, dev,
+ attr, error);
+ /* Then construct the source field (field) with mask. */
+ mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
+ conf->width, dev,
+ attr, error);
+ }
+ item.mask = &mask;
+ memset(&dummy, 0, sizeof(dummy));
+ resource = &dummy.resource;
+ ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
+ if (ret)
+ return ret;
+ MLX5_ASSERT(resource->actions_num > 0);
+ /*
+ * If previous modify field action collide with this one, then insert NOP command.
+ * This NOP command will not be a part of action's command range used to update commands
+ * on rule creation.
+ */
+ if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
+ ret = flow_hw_mhdr_cmd_nop_append(mhdr);
+ if (ret)
+ return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "too many modify field operations specified");
+ }
+ cmds_start = mhdr->mhdr_cmds_num;
+ ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
+ if (ret)
+ return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "too many modify field operations specified");
+
+ cmds_end = mhdr->mhdr_cmds_num;
+ if (shared)
+ return 0;
+ ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
+ action - action_start, mhdr->pos,
+ cmds_start, cmds_end, shared,
+ field, dcopy, mask);
+ if (ret)
+ return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "not enough memory to store modify field metadata");
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -558,10 +845,12 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t reformat_pos = MLX5_HW_MAX_ACTS, reformat_src = 0;
uint8_t *encap_data = NULL, *encap_data_m = NULL;
size_t data_size = 0;
+ struct mlx5_hw_modify_header_action mhdr = { 0 };
bool actions_end = false;
uint32_t type, i;
int err;
+ flow_hw_modify_field_init(&mhdr, at);
if (attr->transfer)
type = MLX5DR_TABLE_TYPE_FDB;
else if (attr->egress)
@@ -714,6 +1003,15 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
reformat_pos = i++;
refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ if (mhdr.pos == UINT16_MAX)
+ mhdr.pos = i++;
+ err = flow_hw_modify_field_compile(dev, attr, action_start,
+ actions, masks, acts, &mhdr,
+ error);
+ if (err)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -721,6 +1019,31 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
break;
}
}
+ if (mhdr.pos != UINT16_MAX) {
+ uint32_t flags;
+ uint32_t bulk_size;
+ size_t mhdr_len;
+
+ acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
+ 0, SOCKET_ID_ANY);
+ if (!acts->mhdr)
+ goto err;
+ rte_memcpy(acts->mhdr, &mhdr, sizeof(*acts->mhdr));
+ mhdr_len = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num;
+ flags = mlx5_hw_act_flag[!!attr->group][type];
+ if (acts->mhdr->shared) {
+ flags |= MLX5DR_ACTION_FLAG_SHARED;
+ bulk_size = 0;
+ } else {
+ bulk_size = rte_log2_u32(table_attr->nb_flows);
+ }
+ acts->mhdr->action = mlx5dr_action_create_modify_header
+ (priv->dr_ctx, mhdr_len, (__be64 *)acts->mhdr->mhdr_cmds,
+ bulk_size, flags);
+ if (!acts->mhdr->action)
+ goto err;
+ acts->rule_acts[acts->mhdr->pos].action = acts->mhdr->action;
+ }
if (reformat_pos != MLX5_HW_MAX_ACTS) {
uint8_t buf[MLX5_ENCAP_MAX_LEN];
bool shared_rfmt = true;
@@ -884,6 +1207,100 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline int
+flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
+{
+ struct mlx5_modification_cmd cmd_he = {
+ .data0 = rte_be_to_cpu_32(cmd->data0),
+ .data1 = 0,
+ };
+
+ return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
+}
+
+/**
+ * Construct flow action array.
+ *
+ * For action template contains dynamic actions, these actions need to
+ * be updated according to the rte_flow action during flow creation.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] job
+ * Pointer to job descriptor.
+ * @param[in] hw_acts
+ * Pointer to translated actions from template.
+ * @param[in] it_idx
+ * Item template index the action template refer to.
+ * @param[in] actions
+ * Array of rte_flow action need to be checked.
+ * @param[in] rule_acts
+ * Array of DR rule actions to be used during flow creation..
+ * @param[in] acts_num
+ * Pointer to the real acts_num flow has.
+ *
+ * @return
+ * 0 on success, negative value otherwise and rte_errno is set.
+ */
+static __rte_always_inline int
+flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
+ struct mlx5_action_construct_data *act_data,
+ const struct mlx5_hw_actions *hw_acts,
+ const struct rte_flow_action *action)
+{
+ const struct rte_flow_action_modify_field *mhdr_action = action->conf;
+ uint8_t values[16] = { 0 };
+ unaligned_uint32_t *value_p;
+ uint32_t i;
+ struct field_modify_info *field;
+
+ if (!hw_acts->mhdr)
+ return -1;
+ if (hw_acts->mhdr->shared || act_data->modify_header.shared)
+ return 0;
+ MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
+ mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
+ if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
+ mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
+ return 0;
+ if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
+ rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
+ else
+ rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
+ if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
+ mhdr_action->dst.field == RTE_FLOW_FIELD_TAG) {
+ value_p = (unaligned_uint32_t *)values;
+ *value_p = rte_cpu_to_be_32(*value_p);
+ }
+ i = act_data->modify_header.mhdr_cmds_off;
+ field = act_data->modify_header.field;
+ do {
+ uint32_t off_b;
+ uint32_t mask;
+ uint32_t data;
+ const uint8_t *mask_src;
+
+ if (i >= act_data->modify_header.mhdr_cmds_end)
+ return -1;
+ if (flow_hw_mhdr_cmd_is_nop(&job->mhdr_cmd[i])) {
+ ++i;
+ continue;
+ }
+ mask_src = (const uint8_t *)act_data->modify_header.mask;
+ mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
+ if (!mask) {
+ ++field;
+ continue;
+ }
+ off_b = rte_bsf32(mask);
+ data = flow_dv_fetch_field(values + field->offset, field->size);
+ data = (data & mask) >> off_b;
+ job->mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
+ ++field;
+ } while (field->size);
+ return 0;
+}
+
/**
* Construct flow action array.
*
@@ -928,6 +1345,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
};
uint32_t ft_flag;
size_t encap_len = 0;
+ int ret;
memcpy(rule_acts, hw_acts->rule_acts,
sizeof(*rule_acts) * hw_acts->acts_num);
@@ -945,6 +1363,18 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
} else {
attr.ingress = 1;
}
+ if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
+ uint16_t pos = hw_acts->mhdr->pos;
+
+ if (!hw_acts->mhdr->shared) {
+ rule_acts[pos].modify_header.offset =
+ job->flow->idx - 1;
+ rule_acts[pos].modify_header.data =
+ (uint8_t *)job->mhdr_cmd;
+ rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
+ sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
+ }
+ }
LIST_FOREACH(act_data, &hw_acts->act_list, next) {
uint32_t jump_group;
uint32_t tag;
@@ -1020,6 +1450,14 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(raw_encap_data->size ==
act_data->encap.len);
break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ ret = flow_hw_modify_field_construct(job,
+ act_data,
+ hw_acts,
+ action);
+ if (ret)
+ return -1;
+ break;
default:
break;
}
@@ -2093,6 +2531,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
mem_size += (sizeof(struct mlx5_hw_q_job *) +
sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
+ sizeof(struct mlx5_modification_cmd) *
+ MLX5_MHDR_MAX_CMD +
sizeof(struct mlx5_hw_q_job)) *
queue_attr[0]->size;
}
@@ -2104,6 +2544,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
for (i = 0; i < nb_queue; i++) {
uint8_t *encap = NULL;
+ struct mlx5_modification_cmd *mhdr_cmd = NULL;
priv->hw_q[i].job_idx = queue_attr[i]->size;
priv->hw_q[i].size = queue_attr[i]->size;
@@ -2115,8 +2556,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
&job[queue_attr[i - 1]->size];
job = (struct mlx5_hw_q_job *)
&priv->hw_q[i].job[queue_attr[i]->size];
- encap = (uint8_t *)&job[queue_attr[i]->size];
+ mhdr_cmd = (struct mlx5_modification_cmd *)&job[queue_attr[i]->size];
+ encap = (uint8_t *)&mhdr_cmd[queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
for (j = 0; j < queue_attr[i]->size; j++) {
+ job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
priv->hw_q[i].job[j] = &job[j];
}