[v2,2/3] net/mlx5: support CQE compression on Windows

Message ID 20230516112548.6848-3-talshn@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series Windows performance enhancements |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tal Shnaiderman May 16, 2023, 11:25 a.m. UTC
  CQE Compression reduces PCI overhead by coalescing and compressing
multiple CQEs into a single merged CQE.

Add supported for the CQE compression feature on Windows.
feature is enabled by default unless not supported by the HW
or if the rxq_cqe_comp_en PMD argument is explicitly disabled.

Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Tested-by: Pier Damouny  <pdamouny@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c |  2 ++
 drivers/common/mlx5/mlx5_devx_cmds.h |  1 +
 drivers/net/mlx5/windows/mlx5_os.c   | 12 ++++++++++++
 3 files changed, 15 insertions(+)
  

Patch

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 096bd1d520..a31e4995f5 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -1062,6 +1062,8 @@  mlx5_devx_cmd_query_hca_attr(void *ctx,
 	attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression);
 	attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr,
 						mini_cqe_resp_flow_tag);
+	attr->cqe_compression_128 = MLX5_GET(cmd_hca_cap, hcattr,
+						cqe_compression_128);
 	attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr,
 						 mini_cqe_resp_l3_l4_tag);
 	attr->enhanced_cqe_compression = MLX5_GET(cmd_hca_cap, hcattr,
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 9e7992b1c6..edcd867c4e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -284,6 +284,7 @@  struct mlx5_hca_attr {
 	uint16_t max_wqe_sz_sq;
 	uint32_t striding_rq:1;
 	uint32_t ext_stride_num_range:1;
+	uint32_t cqe_compression_128:1;
 	uint32_t set_reg_c:8;
 	uint32_t nic_flow_table:1;
 	uint32_t modify_outer_ip_ecn:1;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 0caa8931e4..6527269663 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -237,6 +237,18 @@  mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 	} else {
 		DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
 	}
+	sh->dev_cap.cqe_comp = 0;
+#if (RTE_CACHE_LINE_SIZE == 128)
+	if (hca_attr->cqe_compression_128)
+		sh->dev_cap.cqe_comp = 1;
+	DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
+		sh->dev_cap.cqe_comp ? "" : "not ");
+#else
+	if (hca_attr->cqe_compression)
+		sh->dev_cap.cqe_comp = 1;
+	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+		sh->dev_cap.cqe_comp ? "" : "not ");
+#endif
 	snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),