net/mlx5: fix Rx queue initialization for scattered segment

Message ID 1603732656-24395-1-git-send-email-viacheslavo@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: fix Rx queue initialization for scattered segment |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Slava Ovsiienko Oct. 26, 2020, 5:17 p.m. UTC
  During integration/rebase there was introduced the bugs:

- double memory allocation for queue structure resulting in
  losing the part of configuration settings and following
  crash

- the erroneous fix for the segment logarithm

Fixes: 919ef3e26cff ("net/mlx5: configure Rx queue to support split")

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h     |  1 +
 drivers/net/mlx5/mlx5_rxq.c | 11 +----------
 2 files changed, 2 insertions(+), 10 deletions(-)
  

Comments

Raslan Darawsheh Oct. 26, 2020, 6:07 p.m. UTC | #1
Hi,

> -----Original Message-----
> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> Sent: Monday, October 26, 2020 7:18 PM
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH] net/mlx5: fix Rx queue initialization for scattered segment
> 
> During integration/rebase there was introduced the bugs:
> 
> - double memory allocation for queue structure resulting in
>   losing the part of configuration settings and following
>   crash
> 
> - the erroneous fix for the segment logarithm
> 
> Fixes: 919ef3e26cff ("net/mlx5: configure Rx queue to support split")
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5.h     |  1 +
>  drivers/net/mlx5/mlx5_rxq.c | 11 +----------
>  2 files changed, 2 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index 258be03..8d65828 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -730,6 +730,7 @@ struct mlx5_ind_table_obj {
>  };
> 
>  /* Hash Rx queue. */
> +__extension__
>  struct mlx5_hrxq {
>  	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
>  	rte_atomic32_t refcnt; /* Reference counter. */
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 1cc477a..4e17535 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1486,15 +1486,6 @@ struct mlx5_rxq_ctrl *
>  		rte_errno = ENOSPC;
>  		return NULL;
>  	}
> -	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
> -		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
> -		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf
> *),
> -		0, socket);
> -
> -	if (!tmpl) {
> -		rte_errno = ENOMEM;
> -		return NULL;
> -	}
>  	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
>  	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
>  			       MLX5_MR_BTREE_CACHE_N, socket)) {
> @@ -1560,7 +1551,7 @@ struct mlx5_rxq_ctrl *
>  		 * Determine the number of SGEs needed for a full packet
>  		 * and round it to the next power of two.
>  		 */
> -		sges_n = tmpl->rxq.rxseg_n;
> +		sges_n = log2above(tmpl->rxq.rxseg_n);
>  		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
>  			DRV_LOG(ERR,
>  				"port %u too many SGEs (%u) needed to
> handle"
> --
> 1.8.3.1

Patch squashed into relevant commit in master-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 258be03..8d65828 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -730,6 +730,7 @@  struct mlx5_ind_table_obj {
 };
 
 /* Hash Rx queue. */
+__extension__
 struct mlx5_hrxq {
 	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
 	rte_atomic32_t refcnt; /* Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1cc477a..4e17535 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1486,15 +1486,6 @@  struct mlx5_rxq_ctrl *
 		rte_errno = ENOSPC;
 		return NULL;
 	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
-		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
-		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
-		0, socket);
-
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
-	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
 			       MLX5_MR_BTREE_CACHE_N, socket)) {
@@ -1560,7 +1551,7 @@  struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = tmpl->rxq.rxseg_n;
+		sges_n = log2above(tmpl->rxq.rxseg_n);
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"