[v2,14/20] net/mlx5: remove useless arguments in hrxq API

Message ID 2cb1bee89e0537dd4a8165f5cbce2bb284490b9b.1530111623.git.nelio.laranjeiro@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Shahaf Shuler
Headers
Series net/mlx5: flow rework |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Nélio Laranjeiro June 27, 2018, 3:07 p.m. UTC
  RSS level is necessary to had a bit in the hash_fields which is already
provided in this API, for the tunnel, it is necessary to request such
queue to compute the checksum on the inner most, this last one should
always be activated.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c |  4 ++--
 drivers/net/mlx5/mlx5_rxq.c  | 39 +++++++++---------------------------
 drivers/net/mlx5/mlx5_rxtx.h |  8 ++------
 3 files changed, 13 insertions(+), 38 deletions(-)
  

Comments

Yongseok Koh July 6, 2018, 2:18 a.m. UTC | #1
On Wed, Jun 27, 2018 at 05:07:46PM +0200, Nelio Laranjeiro wrote:
> RSS level is necessary to had a bit in the hash_fields which is already
> provided in this API, for the tunnel, it is necessary to request such
> queue to compute the checksum on the inner most, this last one should
> always be activated.
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>

Thanks
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 08e0a6556..7dda88641 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1935,13 +1935,13 @@  mlx5_flow_fate_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 					     rss_hash_default_key_len,
 					     verbs->hash_fields,
 					     (*flow->queue),
-					     flow->rss.queue_num, 0, 0);
+					     flow->rss.queue_num);
 			if (!hrxq)
 				hrxq = mlx5_hrxq_new(dev, flow->key,
 						     rss_hash_default_key_len,
 						     verbs->hash_fields,
 						     (*flow->queue),
-						     flow->rss.queue_num, 0, 0);
+						     flow->rss.queue_num);
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 5f17dce50..8f65e4299 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1730,10 +1730,6 @@  mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
  *   first queue index will be taken for the indirection table.
  * @param queues_n
  *   Number of queues.
- * @param tunnel
- *   Tunnel type, implies tunnel offloading like inner checksum if available.
- * @param rss_level
- *   RSS hash on tunnel level.
  *
  * @return
  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1742,17 +1738,13 @@  struct mlx5_hrxq *
 mlx5_hrxq_new(struct rte_eth_dev *dev,
 	      const uint8_t *rss_key, uint32_t rss_key_len,
 	      uint64_t hash_fields,
-	      const uint16_t *queues, uint32_t queues_n,
-	      uint32_t tunnel, uint32_t rss_level)
+	      const uint16_t *queues, uint32_t queues_n)
 {
 	struct priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq;
 	struct mlx5_ind_table_ibv *ind_tbl;
 	struct ibv_qp *qp;
 	int err;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	struct mlx5dv_qp_init_attr qp_init_attr = {0};
-#endif
 
 	queues_n = hash_fields ? queues_n : 1;
 	ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
@@ -1767,11 +1759,6 @@  mlx5_hrxq_new(struct rte_eth_dev *dev,
 		rss_key = rss_hash_default_key;
 	}
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	if (tunnel) {
-		qp_init_attr.comp_mask =
-				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
-		qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
-	}
 	qp = mlx5_glue->dv_create_qp
 		(priv->ctx,
 		 &(struct ibv_qp_init_attr_ex){
@@ -1787,14 +1774,17 @@  mlx5_hrxq_new(struct rte_eth_dev *dev,
 				.rx_hash_key = rss_key ?
 					       (void *)(uintptr_t)rss_key :
 					       rss_hash_default_key,
-				.rx_hash_fields_mask = hash_fields |
-					(tunnel && rss_level > 1 ?
-					(uint32_t)IBV_RX_HASH_INNER : 0),
+				.rx_hash_fields_mask = hash_fields,
 			},
 			.rwq_ind_tbl = ind_tbl->ind_table,
 			.pd = priv->pd,
 		 },
-		 &qp_init_attr);
+		 &(struct mlx5dv_qp_init_attr){
+			.comp_mask = (hash_fields & IBV_RX_HASH_INNER) ?
+				 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS :
+				 0,
+			.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
+		 });
 #else
 	qp = mlx5_glue->create_qp_ex
 		(priv->ctx,
@@ -1828,8 +1818,6 @@  mlx5_hrxq_new(struct rte_eth_dev *dev,
 	hrxq->qp = qp;
 	hrxq->rss_key_len = rss_key_len;
 	hrxq->hash_fields = hash_fields;
-	hrxq->tunnel = tunnel;
-	hrxq->rss_level = rss_level;
 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
 	rte_atomic32_inc(&hrxq->refcnt);
 	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
@@ -1855,10 +1843,6 @@  mlx5_hrxq_new(struct rte_eth_dev *dev,
  *   first queue index will be taken for the indirection table.
  * @param queues_n
  *   Number of queues.
- * @param tunnel
- *   Tunnel type, implies tunnel offloading like inner checksum if available.
- * @param rss_level
- *   RSS hash on tunnel level
  *
  * @return
  *   An hash Rx queue on success.
@@ -1867,8 +1851,7 @@  struct mlx5_hrxq *
 mlx5_hrxq_get(struct rte_eth_dev *dev,
 	      const uint8_t *rss_key, uint32_t rss_key_len,
 	      uint64_t hash_fields,
-	      const uint16_t *queues, uint32_t queues_n,
-	      uint32_t tunnel, uint32_t rss_level)
+	      const uint16_t *queues, uint32_t queues_n)
 {
 	struct priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq;
@@ -1883,10 +1866,6 @@  mlx5_hrxq_get(struct rte_eth_dev *dev,
 			continue;
 		if (hrxq->hash_fields != hash_fields)
 			continue;
-		if (hrxq->tunnel != tunnel)
-			continue;
-		if (hrxq->rss_level != rss_level)
-			continue;
 		ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
 		if (!ind_tbl)
 			continue;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 51f7f678b..bb67c32a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -157,8 +157,6 @@  struct mlx5_hrxq {
 	struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
 	struct ibv_qp *qp; /* Verbs queue pair. */
 	uint64_t hash_fields; /* Verbs Hash fields. */
-	uint32_t tunnel; /* Tunnel type. */
-	uint32_t rss_level; /* RSS on tunnel level. */
 	uint32_t rss_key_len; /* Hash key length in bytes. */
 	uint8_t rss_key[]; /* Hash key. */
 };
@@ -274,13 +272,11 @@  void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
-				const uint16_t *queues, uint32_t queues_n,
-				uint32_t tunnel, uint32_t rss_level);
+				const uint16_t *queues, uint32_t queues_n);
 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
-				const uint16_t *queues, uint32_t queues_n,
-				uint32_t tunnel, uint32_t rss_level);
+				const uint16_t *queues, uint32_t queues_n);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);