@@ -396,6 +396,167 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
}
}
+int
+mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
+ uint32_t ids[], uint32_t num)
+{
+ uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0};
+ void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr);
+ void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex);
+ void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
+ int ret;
+ uint32_t idx = 0;
+ uint32_t i;
+
+ if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "Too many sample IDs to be fetched.");
+ return -rte_errno;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id);
+ ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ rte_errno = ret;
+ DRV_LOG(ERR, "Failed to query sample IDs with object %p.",
+ (void *)flex_obj);
+ return -rte_errno;
+ }
+ for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+ void *s_off = (void *)((char *)sample + i *
+ MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
+ uint32_t en;
+
+ en = MLX5_GET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_en);
+ if (!en)
+ continue;
+ ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_id);
+ }
+ if (num != idx) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "Number of sample IDs are not as expected.");
+ return -rte_errno;
+ }
+ return ret;
+}
+
+
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_flex_parser(void *ctx,
+ struct mlx5_devx_graph_node_attr *data)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr);
+ void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex);
+ void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
+ void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc);
+ void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc);
+ struct mlx5_devx_obj *parse_flex_obj = NULL;
+ uint32_t i;
+
+ parse_flex_obj = rte_calloc(__func__, 1, sizeof(*parse_flex_obj), 0);
+ if (!parse_flex_obj) {
+ DRV_LOG(ERR, "Failed to allocate flex parser data");
+ rte_errno = ENOMEM;
+ rte_free(in);
+ return NULL;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
+ MLX5_SET(parse_graph_flex, flex, header_length_mode,
+ data->header_length_mode);
+ MLX5_SET(parse_graph_flex, flex, header_length_base_value,
+ data->header_length_base_value);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
+ data->header_length_field_offset);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
+ data->header_length_field_shift);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
+ data->header_length_field_mask);
+ for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+ struct mlx5_devx_match_sample_attr *s = &data->sample[i];
+ void *s_off = (void *)((char *)sample + i *
+ MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
+
+ if (!s->flow_match_sample_en)
+ continue;
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_en, !!s->flow_match_sample_en);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset,
+ s->flow_match_sample_field_offset);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_offset_mode,
+ s->flow_match_sample_offset_mode);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset_mask,
+ s->flow_match_sample_field_offset_mask);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset_shift,
+ s->flow_match_sample_field_offset_shift);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_base_offset,
+ s->flow_match_sample_field_base_offset);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_tunnel_mode,
+ s->flow_match_sample_tunnel_mode);
+ }
+ for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) {
+ struct mlx5_devx_graph_arc_attr *ia = &data->in[i];
+ struct mlx5_devx_graph_arc_attr *oa = &data->out[i];
+ void *in_off = (void *)((char *)in_arc + i *
+ MLX5_ST_SZ_BYTES(parse_graph_arc));
+ void *out_off = (void *)((char *)out_arc + i *
+ MLX5_ST_SZ_BYTES(parse_graph_arc));
+
+ if (ia->arc_parse_graph_node != 0) {
+ MLX5_SET(parse_graph_arc, in_off,
+ compare_condition_value,
+ ia->compare_condition_value);
+ MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel,
+ ia->start_inner_tunnel);
+ MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node,
+ ia->arc_parse_graph_node);
+ MLX5_SET(parse_graph_arc, in_off,
+ parse_graph_node_handle,
+ ia->parse_graph_node_handle);
+ }
+ if (oa->arc_parse_graph_node != 0) {
+ MLX5_SET(parse_graph_arc, out_off,
+ compare_condition_value,
+ oa->compare_condition_value);
+ MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel,
+ oa->start_inner_tunnel);
+ MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node,
+ oa->arc_parse_graph_node);
+ MLX5_SET(parse_graph_arc, out_off,
+ parse_graph_node_handle,
+ oa->parse_graph_node_handle);
+ }
+ }
+ parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!parse_flex_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create FLEX PARSE GRAPH object "
+ "by using DevX.");
+ rte_free(parse_flex_obj);
+ return NULL;
+ }
+ parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return parse_flex_obj;
+}
+
/**
* Query HCA attributes.
* Using those attributes we can check on run time if the device
@@ -467,6 +628,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
+ attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
if (attr->qos.sup) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
@@ -1024,7 +1188,7 @@ mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
if (ret) {
DRV_LOG(ERR, "Failed to modify SQ using DevX");
rte_errno = errno;
- return -errno;
+ return -rte_errno;
}
return ret;
}
@@ -1337,7 +1501,7 @@ mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
if (ret) {
DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
rte_errno = errno;
- return -errno;
+ return -rte_errno;
}
return ret;
}
@@ -1540,7 +1704,7 @@ mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
if (ret) {
DRV_LOG(ERR, "Failed to modify QP using DevX.");
rte_errno = errno;
- return -errno;
+ return -rte_errno;
}
return ret;
}
@@ -68,6 +68,7 @@ struct mlx5_hca_attr {
uint32_t eswitch_manager:1;
uint32_t flow_counters_dump:1;
uint32_t log_max_rqt_size:5;
+ uint32_t parse_graph_flex_node:1;
uint8_t flow_counter_bulk_alloc_bitmap;
uint32_t eth_net_offloads:1;
uint32_t eth_virt:1;
@@ -416,6 +417,13 @@ int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp,
__rte_internal
int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
struct mlx5_devx_rqt_attr *rqt_attr);
+__rte_internal
+int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
+ uint32_t ids[], uint32_t num);
+
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_create_flex_parser(void *ctx,
+ struct mlx5_devx_graph_node_attr *data);
/**
* Create virtio queue counters object DevX API.
@@ -961,10 +961,9 @@ enum {
MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
};
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q = (1ULL << 0xd),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS = (1ULL << 0x1c),
-};
+#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q (1ULL << 0xd)
+#define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS (1ULL << 0x1c)
+#define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE (1ULL << 0x22)
enum {
MLX5_HCA_CAP_OPMOD_GET_MAX = 0,
@@ -2022,6 +2021,7 @@ struct mlx5_ifc_create_cq_in_bits {
enum {
MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d,
MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH = 0x0022,
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
@@ -2500,6 +2500,67 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_parse_graph_arc_bits {
+ u8 start_inner_tunnel[0x1];
+ u8 reserved_at_1[0x7];
+ u8 arc_parse_graph_node[0x8];
+ u8 compare_condition_value[0x10];
+ u8 parse_graph_node_handle[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_parse_graph_flow_match_sample_bits {
+ u8 flow_match_sample_en[0x1];
+ u8 reserved_at_1[0x3];
+ u8 flow_match_sample_offset_mode[0x4];
+ u8 reserved_at_5[0x8];
+ u8 flow_match_sample_field_offset[0x10];
+ u8 reserved_at_32[0x4];
+ u8 flow_match_sample_field_offset_shift[0x4];
+ u8 flow_match_sample_field_base_offset[0x8];
+ u8 reserved_at_48[0xd];
+ u8 flow_match_sample_tunnel_mode[0x3];
+ u8 flow_match_sample_field_offset_mask[0x20];
+ u8 flow_match_sample_field_id[0x20];
+};
+
+struct mlx5_ifc_parse_graph_flex_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_64[0x20];
+ u8 header_length_base_value[0x10];
+ u8 reserved_at_112[0x4];
+ u8 header_length_field_shift[0x4];
+ u8 reserved_at_120[0x4];
+ u8 header_length_mode[0x4];
+ u8 header_length_field_offset[0x10];
+ u8 next_header_field_offset[0x10];
+ u8 reserved_at_160[0x1b];
+ u8 next_header_field_size[0x5];
+ u8 header_length_field_mask[0x20];
+ u8 reserved_at_224[0x20];
+ struct mlx5_ifc_parse_graph_flow_match_sample_bits sample_table[0x8];
+ struct mlx5_ifc_parse_graph_arc_bits input_arc[0x8];
+ struct mlx5_ifc_parse_graph_arc_bits output_arc[0x8];
+};
+
+struct mlx5_ifc_create_flex_parser_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_parse_graph_flex_bits flex;
+};
+
+struct mlx5_ifc_create_flex_parser_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_parse_graph_flex_bits flex;
+};
+
+struct mlx5_ifc_parse_graph_flex_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_parse_graph_flex_bits capability;
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
@@ -11,6 +11,7 @@ INTERNAL {
mlx5_dev_to_pci_addr;
mlx5_devx_cmd_create_cq;
+ mlx5_devx_cmd_create_flex_parser;
mlx5_devx_cmd_create_qp;
mlx5_devx_cmd_create_rq;
mlx5_devx_cmd_create_rqt;
@@ -32,6 +33,7 @@ INTERNAL {
mlx5_devx_cmd_modify_virtq;
mlx5_devx_cmd_qp_query_tis_td;
mlx5_devx_cmd_query_hca_attr;
+ mlx5_devx_cmd_query_parse_samples;
mlx5_devx_cmd_query_virtio_q_counters;
mlx5_devx_cmd_query_virtq;
mlx5_devx_get_out_command_status;