[1/3] common/mlx5: add min WQE size for striding RQ

Message ID 20211123183805.2905792-2-michaelba@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series fix MPRQ prepare |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Michael Baum Nov. 23, 2021, 6:38 p.m. UTC
From: Michael Baum <michaelba@nvidia.com>

Some devices have a WQE size limit for striding RQ. On some newer
devices, this limitation is smaller and information on its size is
provided by the firmware.

This patch adds the attribute query from firmware: the minimum required
size of WQE in a strided RQ in granularity of Bytes.

Cc: stable@dpdk.org

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 16 ++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h |  1 +
 drivers/common/mlx5/mlx5_prm.h       | 11 +++++++++--
 3 files changed, 26 insertions(+), 2 deletions(-)
  

Comments

Ferruh Yigit Dec. 7, 2021, 1:32 p.m. UTC | #1
On 11/23/2021 6:38 PM, michaelba@nvidia.com wrote:
> From: Michael Baum<michaelba@nvidia.com>
> 
> Some devices have a WQE size limit for striding RQ. On some newer
> devices, this limitation is smaller and information on its size is
> provided by the firmware.
> 
> This patch adds the attribute query from firmware: the minimum required
> size of WQE in a strided RQ in granularity of Bytes.

? s/strided/strode/

> 
> Cc:stable@dpdk.org
> 

This is not a fix, why requesting to backport it?
Patch is to use FW provided capability value, which is not used before.

> Signed-off-by: Michael Baum<michaelba@nvidia.com>
> Acked-by: Matan Azrad<matan@nvidia.com>
  
Michael Baum Dec. 8, 2021, 12:52 p.m. UTC | #2
On 12/07/2021 3:32 PM, ferruh.yigit@intel.com wrote: 
> 
> On 11/23/2021 6:38 PM, michaelba@nvidia.com wrote:
> > From: Michael Baum<michaelba@nvidia.com>
> >
> > Some devices have a WQE size limit for striding RQ. On some newer
> > devices, this limitation is smaller and information on its size is
> > provided by the firmware.
> >
> > This patch adds the attribute query from firmware: the minimum
> > required size of WQE in a strided RQ in granularity of Bytes.
> 
> ? s/strided/strode/

Thanks for the comment.
Let's replace it to "the minimum required size of WQE buffer for striding RQ in granularity of Bytes".

> 
> >
> > Cc:stable@dpdk.org
> >
> 
> This is not a fix, why requesting to backport it?
> Patch is to use FW provided capability value, which is not used before.

It is requesting for fix coming after (net/mlx5: fix missing adjustment MPRQ stride devargs).
The fix use this capability.

> 
> > Signed-off-by: Michael Baum<michaelba@nvidia.com>
> > Acked-by: Matan Azrad<matan@nvidia.com>
  

Patch

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index e52b995ee3..a8efdbe1ae 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -823,6 +823,7 @@  mlx5_devx_cmd_query_hca_attr(void *ctx,
 {
 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
+	bool hca_cap_2_sup;
 	uint64_t general_obj_types_supported = 0;
 	void *hcattr;
 	int rc, i;
@@ -832,6 +833,7 @@  mlx5_devx_cmd_query_hca_attr(void *ctx,
 			MLX5_HCA_CAP_OPMOD_GET_CUR);
 	if (!hcattr)
 		return rc;
+	hca_cap_2_sup = MLX5_GET(cmd_hca_cap, hcattr, hca_cap_2);
 	attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq);
 	attr->flow_counter_bulk_alloc_bitmap =
 			MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
@@ -967,6 +969,20 @@  mlx5_devx_cmd_query_hca_attr(void *ctx,
 					 general_obj_types) &
 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
 	attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
+	if (hca_cap_2_sup) {
+		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+				MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
+				MLX5_HCA_CAP_OPMOD_GET_CUR);
+		if (!hcattr) {
+			DRV_LOG(DEBUG,
+				"Failed to query DevX HCA capabilities 2.");
+			return rc;
+		}
+		attr->log_min_stride_wqe_sz = MLX5_GET(cmd_hca_cap_2, hcattr,
+						       log_min_stride_wqe_sz);
+	}
+	if (attr->log_min_stride_wqe_sz == 0)
+		attr->log_min_stride_wqe_sz = MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
 	if (attr->qos.sup) {
 		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
 				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index d7f71646a3..37821b493e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -251,6 +251,7 @@  struct mlx5_hca_attr {
 	uint32_t log_max_mmo_decompress:5;
 	uint32_t umr_modify_entity_size_disabled:1;
 	uint32_t umr_indirect_mkey_disabled:1;
+	uint32_t log_min_stride_wqe_sz:5;
 	uint16_t max_wqe_sz_sq;
 };
 
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 2ded67e85e..8a7cb0e673 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -264,6 +264,9 @@ 
 /* The maximum log value of segments per RQ WQE. */
 #define MLX5_MAX_LOG_RQ_SEGS 5u
 
+/* Log 2 of the default size of a WQE for Multi-Packet RQ. */
+#define MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE 14U
+
 /* The alignment needed for WQ buffer. */
 #define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size()
 
@@ -1342,7 +1345,9 @@  enum {
 #define MLX5_STEERING_LOGIC_FORMAT_CONNECTX_6DX 0x1
 
 struct mlx5_ifc_cmd_hca_cap_bits {
-	u8 reserved_at_0[0x30];
+	u8 reserved_at_0[0x20];
+	u8 hca_cap_2[0x1];
+	u8 reserved_at_21[0xf];
 	u8 vhca_id[0x10];
 	u8 reserved_at_40[0x20];
 	u8 reserved_at_60[0x3];
@@ -1909,7 +1914,8 @@  struct mlx5_ifc_cmd_hca_cap_2_bits {
 	u8 max_reformat_insert_offset[0x8];
 	u8 max_reformat_remove_size[0x8];
 	u8 max_reformat_remove_offset[0x8]; /* End of DW6. */
-	u8 aso_conntrack_reg_id[0x8];
+	u8 reserved_at_c0[0x3];
+	u8 log_min_stride_wqe_sz[0x5];
 	u8 reserved_at_c8[0x3];
 	u8 log_conn_track_granularity[0x5];
 	u8 reserved_at_d0[0x3];
@@ -1922,6 +1928,7 @@  struct mlx5_ifc_cmd_hca_cap_2_bits {
 
 union mlx5_ifc_hca_cap_union_bits {
 	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+	struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
 	struct mlx5_ifc_per_protocol_networking_offload_caps_bits
 	       per_protocol_networking_offload_caps;
 	struct mlx5_ifc_qos_cap_bits qos_cap;