@@ -2193,3 +2193,75 @@ struct mlx5_devx_obj *
#endif
}
+/*
+ * Allocate queue counters via devx interface.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ *
+ * @return
+ * Pointer to counter object on success, a NULL value otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_queue_counter_alloc(void *ctx)
+{
+ struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
+ SOCKET_ID_ANY);
+ uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
+
+ if (!dcs) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!dcs->obj) {
+ DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error "
+ "%d.", errno);
+ rte_errno = errno;
+ mlx5_free(dcs);
+ return NULL;
+ }
+ dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ return dcs;
+}
+
+/**
+ * Query queue counters values.
+ *
+ * @param[in] dcs
+ * devx object of the queue counter set.
+ * @param[in] clear
+ * Whether hardware should clear the counters after the query or not.
+ * @param[out] out_of_buffers
+ * Number of dropped occurred due to lack of WQE for the associated QPs/RQs.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+ uint32_t *out_of_buffers)
+{
+ uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0};
+ uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
+ int rc;
+
+ MLX5_SET(query_q_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, op_mod, 0);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id);
+ MLX5_SET(query_q_counter_in, in, clear, !!clear);
+ rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+ sizeof(out));
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc);
+ rte_errno = rc;
+ return -rc;
+ }
+ *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+ return 0;
+}
@@ -542,4 +542,10 @@ struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
__rte_internal
int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
+
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx);
+__rte_internal
+int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+ uint32_t *out_of_buffers);
#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
@@ -901,6 +901,8 @@ enum {
MLX5_CMD_OP_SUSPEND_QP = 0x50F,
MLX5_CMD_OP_RESUME_QP = 0x510,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+ MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ACCESS_REGISTER = 0x805,
@@ -3213,6 +3215,85 @@ struct mlx5_ifc_query_regexp_register_out_bits {
u8 register_data[0x20];
};
+/* Queue counters. */
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x18];
+ u8 counter_set_id[0x8];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ u8 rx_write_requests[0x20];
+ u8 reserved_at_a0[0x20];
+ u8 rx_read_requests[0x20];
+ u8 reserved_at_e0[0x20];
+ u8 rx_atomic_requests[0x20];
+ u8 reserved_at_120[0x20];
+ u8 rx_dct_connect[0x20];
+ u8 reserved_at_160[0x20];
+ u8 out_of_buffer[0x20];
+ u8 reserved_at_1a0[0x20];
+ u8 out_of_sequence[0x20];
+ u8 reserved_at_1e0[0x20];
+ u8 duplicate_request[0x20];
+ u8 reserved_at_220[0x20];
+ u8 rnr_nak_retry_err[0x20];
+ u8 reserved_at_260[0x20];
+ u8 packet_seq_err[0x20];
+ u8 reserved_at_2a0[0x20];
+ u8 implied_nak_seq_err[0x20];
+ u8 reserved_at_2e0[0x20];
+ u8 local_ack_timeout_err[0x20];
+ u8 reserved_at_320[0xa0];
+ u8 resp_local_length_error[0x20];
+ u8 req_local_length_error[0x20];
+ u8 resp_local_qp_error[0x20];
+ u8 local_operation_error[0x20];
+ u8 resp_local_protection[0x20];
+ u8 req_local_protection[0x20];
+ u8 resp_cqe_error[0x20];
+ u8 req_cqe_error[0x20];
+ u8 req_mw_binding[0x20];
+ u8 req_bad_response[0x20];
+ u8 req_remote_invalid_request[0x20];
+ u8 resp_remote_invalid_request[0x20];
+ u8 req_remote_access_errors[0x20];
+ u8 resp_remote_access_errors[0x20];
+ u8 req_remote_operation_errors[0x20];
+ u8 req_transport_retries_exceeded[0x20];
+ u8 cq_overflow[0x20];
+ u8 resp_cqe_flush_error[0x20];
+ u8 req_cqe_flush_error[0x20];
+ u8 reserved_at_620[0x1e0];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x80];
+ u8 clear[0x1];
+ u8 reserved_at_c1[0x1f];
+ u8 reserved_at_e0[0x18];
+ u8 counter_set_id[0x8];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
@@ -22,7 +22,7 @@ INTERNAL {
mlx5_devx_cmd_create_tis;
mlx5_devx_cmd_create_virtio_q_counters;
mlx5_devx_cmd_create_virtq;
- mlx5_devx_cmd_create_flow_hit_aso_obj;
+ mlx5_devx_cmd_create_flow_hit_aso_obj;
mlx5_devx_cmd_create_geneve_tlv_option;
mlx5_devx_cmd_destroy;
mlx5_devx_cmd_flow_counter_alloc;
@@ -40,6 +40,8 @@ INTERNAL {
mlx5_devx_cmd_query_parse_samples;
mlx5_devx_cmd_query_virtio_q_counters;
mlx5_devx_cmd_query_virtq;
+ mlx5_devx_cmd_queue_counter_alloc;
+ mlx5_devx_cmd_queue_counter_query;
mlx5_devx_cmd_register_read;
mlx5_devx_cmd_wq_query;
mlx5_devx_get_out_command_status;