[v2,02/20] net/mlx5: handle drop queues are regular queues

Message ID 1368a8720f3ec3c40e47a6b1d9ef1edf0f38a646.1530111623.git.nelio.laranjeiro@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Shahaf Shuler
Headers
Series net/mlx5: flow rework |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Nélio Laranjeiro June 27, 2018, 3:07 p.m. UTC
  Drop queues are essentially used in flows due to Verbs API, the
information if the fate of the flow is a drop or not is already present
in the flow.  Due to this, drop queues can be fully mapped on regular
queues.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.c      |  24 ++--
 drivers/net/mlx5/mlx5.h      |  14 ++-
 drivers/net/mlx5/mlx5_flow.c |  94 +++++++-------
 drivers/net/mlx5/mlx5_rxq.c  | 232 +++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h |   8 ++
 5 files changed, 310 insertions(+), 62 deletions(-)
  

Comments

Yongseok Koh July 3, 2018, 1:07 a.m. UTC | #1
On Wed, Jun 27, 2018 at 05:07:34PM +0200, Nelio Laranjeiro wrote:
> Drop queues are essentially used in flows due to Verbs API, the
> information if the fate of the flow is a drop or not is already present
> in the flow.  Due to this, drop queues can be fully mapped on regular
> queues.

The title doesn't look good. Maybe typo?
	net/mlx5: handle drop queues are regular queues
'are' -> 'as'?

> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
>  drivers/net/mlx5/mlx5.c      |  24 ++--
>  drivers/net/mlx5/mlx5.h      |  14 ++-
>  drivers/net/mlx5/mlx5_flow.c |  94 +++++++-------
>  drivers/net/mlx5/mlx5_rxq.c  | 232 +++++++++++++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_rxtx.h |   8 ++
>  5 files changed, 310 insertions(+), 62 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> index d01c0f6cf..aba6c1f9f 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -260,7 +260,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
>  		priv->txqs_n = 0;
>  		priv->txqs = NULL;
>  	}
> -	mlx5_flow_delete_drop_queue(dev);
>  	mlx5_mprq_free_mp(dev);
>  	mlx5_mr_release(dev);
>  	if (priv->pd != NULL) {
> @@ -1100,22 +1099,15 @@ mlx5_dev_spawn_one(struct rte_device *dpdk_dev,
>  	mlx5_link_update(eth_dev, 0);
>  	/* Store device configuration on private structure. */
>  	priv->config = config;
> -	/* Create drop queue. */
> -	err = mlx5_flow_create_drop_queue(eth_dev);
> -	if (err) {
> -		DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
> -			eth_dev->data->port_id, strerror(rte_errno));
> -		err = rte_errno;
> -		goto error;
> -	}
>  	/* Supported Verbs flow priority number detection. */
> -	if (verb_priorities == 0)
> -		verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
> -	if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
> -		DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
> -			eth_dev->data->port_id, verb_priorities);
> -		err = ENOTSUP;
> -		goto error;
> +	if (verb_priorities == 0) {
> +		err = mlx5_verbs_max_prio(eth_dev);
> +		if (err < 0) {
> +			DRV_LOG(ERR, "port %u wrong Verbs flow priorities",
> +				eth_dev->data->port_id);
> +			goto error;
> +		}
> +		verb_priorities = err;
>  	}
>  	priv->config.max_verbs_prio = verb_priorities;
>  	/*
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index 6674a77d4..11aa0932f 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -131,9 +131,6 @@ enum mlx5_verbs_alloc_type {
>  	MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
>  };
>  
> -/* 8 Verbs priorities. */
> -#define MLX5_VERBS_FLOW_PRIO_8 8
> -
>  /**
>   * Verbs allocator needs a context to know in the callback which kind of
>   * resources it is allocating.
> @@ -145,6 +142,12 @@ struct mlx5_verbs_alloc_ctx {
>  
>  LIST_HEAD(mlx5_mr_list, mlx5_mr);
>  
> +/* Flow drop context necessary due to Verbs API. */
> +struct mlx5_drop {
> +	struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
> +	struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
> +};
> +
>  struct priv {
>  	LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
>  	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
> @@ -175,7 +178,7 @@ struct priv {
>  	struct rte_intr_handle intr_handle; /* Interrupt handler. */
>  	unsigned int (*reta_idx)[]; /* RETA index table. */
>  	unsigned int reta_idx_n; /* RETA index size. */
> -	struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
> +	struct mlx5_drop drop; /* Flow drop queues. */

priv->drop sounds strange. Why did you change the name?
How about priv->flow_drop if you didn't like the full name?

>  	struct mlx5_flows flows; /* RTE Flow rules. */
>  	struct mlx5_flows ctrl_flows; /* Control flow rules. */
>  	struct {
> @@ -306,7 +309,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev);
>  
>  /* mlx5_flow.c */
>  
> -unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
> +int mlx5_verbs_max_prio(struct rte_eth_dev *dev);
> +void mlx5_flow_print(struct rte_flow *flow);
>  int mlx5_flow_validate(struct rte_eth_dev *dev,
>  		       const struct rte_flow_attr *attr,
>  		       const struct rte_flow_item items[],
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index a45cb06e1..8b5b695d4 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -75,6 +75,58 @@ struct ibv_spec_header {
>  	uint16_t size;
>  };
>  
> + /**
> +  * Get the maximum number of priority available.
> +  *
> +  * @param dev
> +  *   Pointer to Ethernet device.
> +  *
> +  * @return
> +  *   number of supported Verbs flow priority on success, a negative errno
> +  *   value otherwise and rte_errno is set.
> +  */
> +int
> +mlx5_verbs_max_prio(struct rte_eth_dev *dev)
> +{
> +	struct {
> +		struct ibv_flow_attr attr;
> +		struct ibv_flow_spec_eth eth;
> +		struct ibv_flow_spec_action_drop drop;
> +	} flow_attr = {
> +		.attr = {
> +			.num_of_specs = 2,
> +		},
> +		.eth = {
> +			.type = IBV_FLOW_SPEC_ETH,
> +			.size = sizeof(struct ibv_flow_spec_eth),
> +		},
> +		.drop = {
> +			.size = sizeof(struct ibv_flow_spec_action_drop),
> +			.type = IBV_FLOW_SPEC_ACTION_DROP,
> +		},
> +	};
> +	struct ibv_flow *flow;
> +	uint32_t verb_priorities;
> +	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
> +
> +	if (!drop) {
> +		rte_errno = ENOTSUP;
> +		return -rte_errno;
> +	}
> +	for (verb_priorities = 0; 1; verb_priorities++) {
> +		flow_attr.attr.priority = verb_priorities;
> +		flow = mlx5_glue->create_flow(drop->qp,
> +					      &flow_attr.attr);
> +		if (!flow)
> +			break;
> +		claim_zero(mlx5_glue->destroy_flow(flow));
> +	}
> +	mlx5_hrxq_drop_release(dev, drop);
> +	DRV_LOG(INFO, "port %u flow maximum priority: %d",
> +		dev->data->port_id, verb_priorities);
> +	return verb_priorities;
> +}
> +
>  /**
>   * Convert a flow.
>   *
> @@ -184,32 +236,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
>  	}
>  }
>  
> -/**
> - * Create drop queue.
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - *
> - * @return
> - *   0 on success, a negative errno value otherwise and rte_errno is set.
> - */
> -int
> -mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)
> -{
> -	return 0;
> -}
> -
> -/**
> - * Delete drop queue.
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - */
> -void
> -mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)
> -{
> -}
> -
>  /**
>   * Remove all flows.
>   *
> @@ -292,6 +318,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
>  	struct priv *priv = dev->data->dev_private;
>  	const struct rte_flow_attr attr = {
>  		.ingress = 1,
> +		.priority = priv->config.max_verbs_prio - 1,
>  	};
>  	struct rte_flow_item items[] = {
>  		{
> @@ -830,18 +857,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
>  	}
>  	return 0;
>  }
> -
> -/**
> - * Detect number of Verbs flow priorities supported.
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - *
> - * @return
> - *   number of supported Verbs flow priority.
> - */
> -unsigned int
> -mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)
> -{
> -	return 8;
> -}
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 17db7c160..5f17dce50 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1949,3 +1949,235 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
>  	}
>  	return ret;
>  }
> +
> +/**
> + * Create a drop Rx queue Verbs object.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + *
> + * @return
> + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> + */
> +struct mlx5_rxq_ibv *
> +mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
> +{
> +	struct priv *priv = dev->data->dev_private;
> +	struct ibv_cq *cq;
> +	struct ibv_wq *wq = NULL;
> +	struct mlx5_rxq_ibv *rxq;
> +
> +	if (priv->drop.rxq)
> +		return priv->drop.rxq;
> +	cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
> +	if (!cq) {
> +		DEBUG("port %u cannot allocate CQ for drop queue",
> +		      dev->data->port_id);
> +		rte_errno = errno;
> +		goto error;
> +	}
> +	wq = mlx5_glue->create_wq(priv->ctx,
> +		 &(struct ibv_wq_init_attr){
> +			.wq_type = IBV_WQT_RQ,
> +			.max_wr = 1,
> +			.max_sge = 1,
> +			.pd = priv->pd,
> +			.cq = cq,
> +		 });
> +	if (!wq) {
> +		DEBUG("port %u cannot allocate WQ for drop queue",
> +		      dev->data->port_id);
> +		rte_errno = errno;
> +		goto error;
> +	}
> +	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
> +	if (!rxq) {
> +		DEBUG("port %u cannot allocate drop Rx queue memory",
> +		      dev->data->port_id);
> +		rte_errno = ENOMEM;
> +		goto error;
> +	}
> +	rxq->cq = cq;
> +	rxq->wq = wq;
> +	priv->drop.rxq = rxq;
> +	return rxq;
> +error:
> +	if (wq)
> +		claim_zero(mlx5_glue->destroy_wq(wq));
> +	if (cq)
> +		claim_zero(mlx5_glue->destroy_cq(cq));
> +	return NULL;
> +}
> +
> +/**
> + * Release a drop Rx queue Verbs object.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + * @param rxq
> + *   Pointer to the drop Verbs Rx queue.
> + *
> + * @return
> + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> + */
> +void
> +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev, struct mlx5_rxq_ibv *rxq)

If rxq for drop is saved in priv->drop.rxq, then why does it have to get rxq
pointer as an argument? Looks redundant.

> +{
> +	if (rxq->wq)
> +		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
> +	if (rxq->cq)
> +		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
> +	rte_free(rxq);
> +	((struct priv *)dev->data->dev_private)->drop.rxq = NULL;
> +}
> +
> +/**
> + * Create a drop indirection table.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + *
> + * @return
> + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> + */
> +struct mlx5_ind_table_ibv *
> +mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
> +{
> +	struct priv *priv = dev->data->dev_private;
> +	struct mlx5_ind_table_ibv *ind_tbl;
> +	struct mlx5_rxq_ibv *rxq;
> +	struct mlx5_ind_table_ibv tmpl;
> +
> +	rxq = mlx5_rxq_ibv_drop_new(dev);
> +	if (!rxq)
> +		return NULL;
> +	tmpl.ind_table = mlx5_glue->create_rwq_ind_table
> +		(priv->ctx,
> +		 &(struct ibv_rwq_ind_table_init_attr){
> +			.log_ind_tbl_size = 0,
> +			.ind_tbl = &rxq->wq,
> +			.comp_mask = 0,
> +		 });
> +	if (!tmpl.ind_table) {
> +		DEBUG("port %u cannot allocate indirection table for drop"
> +		      " queue",
> +		      dev->data->port_id);
> +		rte_errno = errno;
> +		goto error;
> +	}
> +	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
> +	if (!ind_tbl) {
> +		rte_errno = ENOMEM;
> +		goto error;
> +	}
> +	ind_tbl->ind_table = tmpl.ind_table;
> +	return ind_tbl;
> +error:
> +	mlx5_rxq_ibv_drop_release(dev, rxq);
> +	return NULL;
> +}
> +
> +/**
> + * Release a drop indirection table.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + * @param ind_tbl
> + *   Indirection table to release.
> + */
> +void
> +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
> +				struct mlx5_ind_table_ibv *ind_tbl)

ind_tbl is a redundant argument. Can be referenced by
priv->drop.hrxq->ind_table.

> +{
> +	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
> +	mlx5_rxq_ibv_drop_release
> +		(dev, ((struct priv *)dev->data->dev_private)->drop.rxq);
> +	rte_free(ind_tbl);
> +}
> +
> +/**
> + * Create a drop Rx Hash queue.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + *
> + * @return
> + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> + */
> +struct mlx5_hrxq *
> +mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
> +{
> +	struct priv *priv = dev->data->dev_private;
> +	struct mlx5_ind_table_ibv *ind_tbl;
> +	struct ibv_qp *qp;
> +	struct mlx5_hrxq *hrxq;
> +
> +	if (priv->drop.hrxq) {
> +		rte_atomic32_inc(&priv->drop.hrxq->refcnt);
> +		return priv->drop.hrxq;
> +	}
> +	ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
> +	if (!ind_tbl)
> +		return NULL;
> +	qp = mlx5_glue->create_qp_ex(priv->ctx,
> +		 &(struct ibv_qp_init_attr_ex){
> +			.qp_type = IBV_QPT_RAW_PACKET,
> +			.comp_mask =
> +				IBV_QP_INIT_ATTR_PD |
> +				IBV_QP_INIT_ATTR_IND_TABLE |
> +				IBV_QP_INIT_ATTR_RX_HASH,
> +			.rx_hash_conf = (struct ibv_rx_hash_conf){
> +				.rx_hash_function =
> +					IBV_RX_HASH_FUNC_TOEPLITZ,
> +				.rx_hash_key_len = rss_hash_default_key_len,
> +				.rx_hash_key = rss_hash_default_key,
> +				.rx_hash_fields_mask = 0,
> +				},
> +			.rwq_ind_tbl = ind_tbl->ind_table,
> +			.pd = priv->pd
> +		 });
> +	if (!qp) {
> +		DEBUG("port %u cannot allocate QP for drop queue",
> +		      dev->data->port_id);
> +		rte_errno = errno;
> +		goto error;
> +	}
> +	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
> +	if (!hrxq) {
> +		DRV_LOG(WARNING,
> +			"port %u cannot allocate memory for drop queue",
> +			dev->data->port_id);
> +		rte_errno = ENOMEM;
> +		goto error;
> +	}
> +	hrxq->ind_table = ind_tbl;
> +	hrxq->qp = qp;
> +	priv->drop.hrxq = hrxq;
> +	rte_atomic32_set(&hrxq->refcnt, 1);
> +	return hrxq;
> +error:
> +	if (ind_tbl)
> +		mlx5_ind_table_ibv_drop_release(dev, ind_tbl);
> +	return NULL;
> +}
> +
> +/**
> + * Release a drop hash Rx queue.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + * @param hrxq
> + *   Pointer to Hash Rx queue to release.
> + */
> +void
> +mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)

hrxq is a redundant argument. Can be referenced by priv->drop.hrxq.

Thanks,
Yongseok

> +{
> +	struct priv *priv = dev->data->dev_private;
> +
> +	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
> +		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
> +		mlx5_ind_table_ibv_drop_release(dev, hrxq->ind_table);
> +		rte_free(hrxq);
> +		priv->drop.hrxq = NULL;
> +	}
> +}
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index f53bb43c3..51f7f678b 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -245,6 +245,9 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
>  struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
>  int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
>  int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
> +struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
> +void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev,
> +			       struct mlx5_rxq_ibv *rxq);
>  int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
>  struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
>  				   uint16_t desc, unsigned int socket,
> @@ -265,6 +268,9 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
>  int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
>  			       struct mlx5_ind_table_ibv *ind_tbl);
>  int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
> +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
> +void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
> +				     struct mlx5_ind_table_ibv *ind_tbl);
>  struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
>  				const uint8_t *rss_key, uint32_t rss_key_len,
>  				uint64_t hash_fields,
> @@ -277,6 +283,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
>  				uint32_t tunnel, uint32_t rss_level);
>  int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
>  int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
> +struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
> +void mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
>  uint64_t mlx5_get_rx_port_offloads(void);
>  uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
>  
> -- 
> 2.18.0
>
  
Nélio Laranjeiro July 3, 2018, 7:17 a.m. UTC | #2
On Mon, Jul 02, 2018 at 06:07:03PM -0700, Yongseok Koh wrote:
> On Wed, Jun 27, 2018 at 05:07:34PM +0200, Nelio Laranjeiro wrote:
> > Drop queues are essentially used in flows due to Verbs API, the
> > information if the fate of the flow is a drop or not is already present
> > in the flow.  Due to this, drop queues can be fully mapped on regular
> > queues.
> 
> The title doesn't look good. Maybe typo?
> 	net/mlx5: handle drop queues are regular queues
> 'are' -> 'as'?
[...]

Right,

> >  struct priv {
> >  	LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
> >  	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
> > @@ -175,7 +178,7 @@ struct priv {
> >  	struct rte_intr_handle intr_handle; /* Interrupt handler. */
> >  	unsigned int (*reta_idx)[]; /* RETA index table. */
> >  	unsigned int reta_idx_n; /* RETA index size. */
> > -	struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
> > +	struct mlx5_drop drop; /* Flow drop queues. */
> 
> priv->drop sounds strange. Why did you change the name?
> How about priv->flow_drop if you didn't like the full name?
>[...]

flow_drop_queue is also confusing as it is a drop hash rx queue, it can
be used without a flow as a regular queue.
Renaming it to drop_hrxq.

> > +/**
> > + * Release a drop Rx queue Verbs object.
> > + *
> > + * @param dev
> > + *   Pointer to Ethernet device.
> > + * @param rxq
> > + *   Pointer to the drop Verbs Rx queue.
> > + *
> > + * @return
> > + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> > + */
> > +void
> > +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev, struct mlx5_rxq_ibv *rxq)
> 
> If rxq for drop is saved in priv->drop.rxq, then why does it have to get rxq
> pointer as an argument? Looks redundant.
>[...]

Like for all hrxqs, indirection tables, rxqs,  which are stored in priv
inside a list or an array.  Priv is used as a storage place which is
only access through *_{new,get,release} functions.
This is also to keep a consistency between regular hrxqs, and drop hrxq also.

> > +void
> > +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
> > +				struct mlx5_ind_table_ibv *ind_tbl)
> 
> ind_tbl is a redundant argument. Can be referenced by
> priv->drop.hrxq->ind_table.
>[...]

Ditto.

> > +/**
> > + * Release a drop hash Rx queue.
> > + *
> > + * @param dev
> > + *   Pointer to Ethernet device.
> > + * @param hrxq
> > + *   Pointer to Hash Rx queue to release.
> > + */
> > +void
> > +mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
>[...]
> 
> hrxq is a redundant argument. Can be referenced by priv->drop.hrxq.
>[...]
 
Ditto.

Thanks,
  
Yongseok Koh July 3, 2018, 5:05 p.m. UTC | #3
On Tue, Jul 03, 2018 at 09:17:56AM +0200, Nélio Laranjeiro wrote:
> On Mon, Jul 02, 2018 at 06:07:03PM -0700, Yongseok Koh wrote:
> > On Wed, Jun 27, 2018 at 05:07:34PM +0200, Nelio Laranjeiro wrote:
[...]
> flow_drop_queue is also confusing as it is a drop hash rx queue, it can
> be used without a flow as a regular queue.
> Renaming it to drop_hrxq.

Not so much critical to me but the entry has a field having repeating name.
	priv->drop_hrxq.hrxq and priv->drop_hrxq.rxq
Sounds still confusing...

> > > +/**
> > > + * Release a drop Rx queue Verbs object.
> > > + *
> > > + * @param dev
> > > + *   Pointer to Ethernet device.
> > > + * @param rxq
> > > + *   Pointer to the drop Verbs Rx queue.
> > > + *
> > > + * @return
> > > + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> > > + */
> > > +void
> > > +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev, struct mlx5_rxq_ibv *rxq)
> > 
> > If rxq for drop is saved in priv->drop.rxq, then why does it have to get rxq
> > pointer as an argument? Looks redundant.
> >[...]
> 
> Like for all hrxqs, indirection tables, rxqs,  which are stored in priv
> inside a list or an array.

However, the assumption is there's only one drop queue while the regular ones
have multiple instances.

> Priv is used as a storage place which is only access through
> *_{new,get,release} functions.

Yes, that's what I'm telling you. *_{new,get,release}() accesses priv, then why
the pointer (which is saved in priv) is needed as an argument?

> This is also to keep a consistency between regular hrxqs, and drop hrxq also.
Not sure why that consistency has to be kept.

int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);

mlx5_rxq_release() takes index as the instances are stored in an array and
mlx5_hrxq_release() takes pointer as the instances are stored in a list.

Then, what if there's only one instance and no need to search?
Not taking such an argument sounds more consistent...

Thanks,
Yongseok

> 
> > > +void
> > > +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
> > > +				struct mlx5_ind_table_ibv *ind_tbl)
> > 
> > ind_tbl is a redundant argument. Can be referenced by
> > priv->drop.hrxq->ind_table.
> >[...]
> 
> Ditto.
> 
> > > +/**
> > > + * Release a drop hash Rx queue.
> > > + *
> > > + * @param dev
> > > + *   Pointer to Ethernet device.
> > > + * @param hrxq
> > > + *   Pointer to Hash Rx queue to release.
> > > + */
> > > +void
> > > +mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
> >[...]
> > 
> > hrxq is a redundant argument. Can be referenced by priv->drop.hrxq.
> >[...]
>  
> Ditto.
> 
> Thanks,
> 
> -- 
> Nélio Laranjeiro
> 6WIND
  
Nélio Laranjeiro July 4, 2018, 6:44 a.m. UTC | #4
On Tue, Jul 03, 2018 at 10:05:05AM -0700, Yongseok Koh wrote:
> On Tue, Jul 03, 2018 at 09:17:56AM +0200, Nélio Laranjeiro wrote:
> > On Mon, Jul 02, 2018 at 06:07:03PM -0700, Yongseok Koh wrote:
> > > On Wed, Jun 27, 2018 at 05:07:34PM +0200, Nelio Laranjeiro wrote:
> [...]
> > flow_drop_queue is also confusing as it is a drop hash rx queue, it can
> > be used without a flow as a regular queue.
> > Renaming it to drop_hrxq.
> 
> Not so much critical to me but the entry has a field having repeating name.
> 	priv->drop_hrxq.hrxq and priv->drop_hrxq.rxq
> Sounds still confusing...
> 
> > > > +/**
> > > > + * Release a drop Rx queue Verbs object.
> > > > + *
> > > > + * @param dev
> > > > + *   Pointer to Ethernet device.
> > > > + * @param rxq
> > > > + *   Pointer to the drop Verbs Rx queue.
> > > > + *
> > > > + * @return
> > > > + *   The Verbs object initialised, NULL otherwise and rte_errno is set.
> > > > + */
> > > > +void
> > > > +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev, struct mlx5_rxq_ibv *rxq)
> > > 
> > > If rxq for drop is saved in priv->drop.rxq, then why does it have to get rxq
> > > pointer as an argument? Looks redundant.
> > >[...]
> > 
> > Like for all hrxqs, indirection tables, rxqs,  which are stored in priv
> > inside a list or an array.
> 
> However, the assumption is there's only one drop queue while the regular ones
> have multiple instances.
> 
> > Priv is used as a storage place which is only access through
> > *_{new,get,release} functions.
> 
> Yes, that's what I'm telling you. *_{new,get,release}() accesses priv, then why
> the pointer (which is saved in priv) is needed as an argument?
> 
> > This is also to keep a consistency between regular hrxqs, and drop hrxq also.
> Not sure why that consistency has to be kept.
> 
> int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
> int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
> 
> mlx5_rxq_release() takes index as the instances are stored in an array and
> mlx5_hrxq_release() takes pointer as the instances are stored in a list.
> 
> Then, what if there's only one instance and no need to search?
> Not taking such an argument sounds more consistent...
>[...]

Even if I prefer to keep consistency about identical functionality (in
this case creating an hash Rx queue), I'll make the changes to please
you.

Regards,
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d01c0f6cf..aba6c1f9f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -260,7 +260,6 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 		priv->txqs_n = 0;
 		priv->txqs = NULL;
 	}
-	mlx5_flow_delete_drop_queue(dev);
 	mlx5_mprq_free_mp(dev);
 	mlx5_mr_release(dev);
 	if (priv->pd != NULL) {
@@ -1100,22 +1099,15 @@  mlx5_dev_spawn_one(struct rte_device *dpdk_dev,
 	mlx5_link_update(eth_dev, 0);
 	/* Store device configuration on private structure. */
 	priv->config = config;
-	/* Create drop queue. */
-	err = mlx5_flow_create_drop_queue(eth_dev);
-	if (err) {
-		DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
-			eth_dev->data->port_id, strerror(rte_errno));
-		err = rte_errno;
-		goto error;
-	}
 	/* Supported Verbs flow priority number detection. */
-	if (verb_priorities == 0)
-		verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
-	if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
-		DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
-			eth_dev->data->port_id, verb_priorities);
-		err = ENOTSUP;
-		goto error;
+	if (verb_priorities == 0) {
+		err = mlx5_verbs_max_prio(eth_dev);
+		if (err < 0) {
+			DRV_LOG(ERR, "port %u wrong Verbs flow priorities",
+				eth_dev->data->port_id);
+			goto error;
+		}
+		verb_priorities = err;
 	}
 	priv->config.max_verbs_prio = verb_priorities;
 	/*
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6674a77d4..11aa0932f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -131,9 +131,6 @@  enum mlx5_verbs_alloc_type {
 	MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
 };
 
-/* 8 Verbs priorities. */
-#define MLX5_VERBS_FLOW_PRIO_8 8
-
 /**
  * Verbs allocator needs a context to know in the callback which kind of
  * resources it is allocating.
@@ -145,6 +142,12 @@  struct mlx5_verbs_alloc_ctx {
 
 LIST_HEAD(mlx5_mr_list, mlx5_mr);
 
+/* Flow drop context necessary due to Verbs API. */
+struct mlx5_drop {
+	struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
+	struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
+};
+
 struct priv {
 	LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
 	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
@@ -175,7 +178,7 @@  struct priv {
 	struct rte_intr_handle intr_handle; /* Interrupt handler. */
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
-	struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+	struct mlx5_drop drop; /* Flow drop queues. */
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
 	struct {
@@ -306,7 +309,8 @@  int mlx5_traffic_restart(struct rte_eth_dev *dev);
 
 /* mlx5_flow.c */
 
-unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
+int mlx5_verbs_max_prio(struct rte_eth_dev *dev);
+void mlx5_flow_print(struct rte_flow *flow);
 int mlx5_flow_validate(struct rte_eth_dev *dev,
 		       const struct rte_flow_attr *attr,
 		       const struct rte_flow_item items[],
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a45cb06e1..8b5b695d4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,6 +75,58 @@  struct ibv_spec_header {
 	uint16_t size;
 };
 
+ /**
+  * Get the maximum number of priority available.
+  *
+  * @param dev
+  *   Pointer to Ethernet device.
+  *
+  * @return
+  *   number of supported Verbs flow priority on success, a negative errno
+  *   value otherwise and rte_errno is set.
+  */
+int
+mlx5_verbs_max_prio(struct rte_eth_dev *dev)
+{
+	struct {
+		struct ibv_flow_attr attr;
+		struct ibv_flow_spec_eth eth;
+		struct ibv_flow_spec_action_drop drop;
+	} flow_attr = {
+		.attr = {
+			.num_of_specs = 2,
+		},
+		.eth = {
+			.type = IBV_FLOW_SPEC_ETH,
+			.size = sizeof(struct ibv_flow_spec_eth),
+		},
+		.drop = {
+			.size = sizeof(struct ibv_flow_spec_action_drop),
+			.type = IBV_FLOW_SPEC_ACTION_DROP,
+		},
+	};
+	struct ibv_flow *flow;
+	uint32_t verb_priorities;
+	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
+
+	if (!drop) {
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+	for (verb_priorities = 0; 1; verb_priorities++) {
+		flow_attr.attr.priority = verb_priorities;
+		flow = mlx5_glue->create_flow(drop->qp,
+					      &flow_attr.attr);
+		if (!flow)
+			break;
+		claim_zero(mlx5_glue->destroy_flow(flow));
+	}
+	mlx5_hrxq_drop_release(dev, drop);
+	DRV_LOG(INFO, "port %u flow maximum priority: %d",
+		dev->data->port_id, verb_priorities);
+	return verb_priorities;
+}
+
 /**
  * Convert a flow.
  *
@@ -184,32 +236,6 @@  mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
 	}
 }
 
-/**
- * Create drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-	return 0;
-}
-
-/**
- * Delete drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- */
-void
-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-}
-
 /**
  * Remove all flows.
  *
@@ -292,6 +318,7 @@  mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 	const struct rte_flow_attr attr = {
 		.ingress = 1,
+		.priority = priv->config.max_verbs_prio - 1,
 	};
 	struct rte_flow_item items[] = {
 		{
@@ -830,18 +857,3 @@  mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	}
 	return 0;
 }
-
-/**
- * Detect number of Verbs flow priorities supported.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   number of supported Verbs flow priority.
- */
-unsigned int
-mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)
-{
-	return 8;
-}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 17db7c160..5f17dce50 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1949,3 +1949,235 @@  mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
 	}
 	return ret;
 }
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct ibv_cq *cq;
+	struct ibv_wq *wq = NULL;
+	struct mlx5_rxq_ibv *rxq;
+
+	if (priv->drop.rxq)
+		return priv->drop.rxq;
+	cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+	if (!cq) {
+		DEBUG("port %u cannot allocate CQ for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	wq = mlx5_glue->create_wq(priv->ctx,
+		 &(struct ibv_wq_init_attr){
+			.wq_type = IBV_WQT_RQ,
+			.max_wr = 1,
+			.max_sge = 1,
+			.pd = priv->pd,
+			.cq = cq,
+		 });
+	if (!wq) {
+		DEBUG("port %u cannot allocate WQ for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+	if (!rxq) {
+		DEBUG("port %u cannot allocate drop Rx queue memory",
+		      dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	rxq->cq = cq;
+	rxq->wq = wq;
+	priv->drop.rxq = rxq;
+	return rxq;
+error:
+	if (wq)
+		claim_zero(mlx5_glue->destroy_wq(wq));
+	if (cq)
+		claim_zero(mlx5_glue->destroy_cq(cq));
+	return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param rxq
+ *   Pointer to the drop Verbs Rx queue.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev, struct mlx5_rxq_ibv *rxq)
+{
+	if (rxq->wq)
+		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+	if (rxq->cq)
+		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+	rte_free(rxq);
+	((struct priv *)dev->data->dev_private)->drop.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_ind_table_ibv *ind_tbl;
+	struct mlx5_rxq_ibv *rxq;
+	struct mlx5_ind_table_ibv tmpl;
+
+	rxq = mlx5_rxq_ibv_drop_new(dev);
+	if (!rxq)
+		return NULL;
+	tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+		(priv->ctx,
+		 &(struct ibv_rwq_ind_table_init_attr){
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rxq->wq,
+			.comp_mask = 0,
+		 });
+	if (!tmpl.ind_table) {
+		DEBUG("port %u cannot allocate indirection table for drop"
+		      " queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+	if (!ind_tbl) {
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	ind_tbl->ind_table = tmpl.ind_table;
+	return ind_tbl;
+error:
+	mlx5_rxq_ibv_drop_release(dev, rxq);
+	return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param ind_tbl
+ *   Indirection table to release.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
+				struct mlx5_ind_table_ibv *ind_tbl)
+{
+	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+	mlx5_rxq_ibv_drop_release
+		(dev, ((struct priv *)dev->data->dev_private)->drop.rxq);
+	rte_free(ind_tbl);
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_ind_table_ibv *ind_tbl;
+	struct ibv_qp *qp;
+	struct mlx5_hrxq *hrxq;
+
+	if (priv->drop.hrxq) {
+		rte_atomic32_inc(&priv->drop.hrxq->refcnt);
+		return priv->drop.hrxq;
+	}
+	ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+	if (!ind_tbl)
+		return NULL;
+	qp = mlx5_glue->create_qp_ex(priv->ctx,
+		 &(struct ibv_qp_init_attr_ex){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_QP_INIT_ATTR_PD |
+				IBV_QP_INIT_ATTR_IND_TABLE |
+				IBV_QP_INIT_ATTR_RX_HASH,
+			.rx_hash_conf = (struct ibv_rx_hash_conf){
+				.rx_hash_function =
+					IBV_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				},
+			.rwq_ind_tbl = ind_tbl->ind_table,
+			.pd = priv->pd
+		 });
+	if (!qp) {
+		DEBUG("port %u cannot allocate QP for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+	if (!hrxq) {
+		DRV_LOG(WARNING,
+			"port %u cannot allocate memory for drop queue",
+			dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	hrxq->ind_table = ind_tbl;
+	hrxq->qp = qp;
+	priv->drop.hrxq = hrxq;
+	rte_atomic32_set(&hrxq->refcnt, 1);
+	return hrxq;
+error:
+	if (ind_tbl)
+		mlx5_ind_table_ibv_drop_release(dev, ind_tbl);
+	return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Pointer to Hash Rx queue to release.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+{
+	struct priv *priv = dev->data->dev_private;
+
+	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+		mlx5_ind_table_ibv_drop_release(dev, hrxq->ind_table);
+		rte_free(hrxq);
+		priv->drop.hrxq = NULL;
+	}
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f53bb43c3..51f7f678b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -245,6 +245,9 @@  struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev,
+			       struct mlx5_rxq_ibv *rxq);
 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
@@ -265,6 +268,9 @@  struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
 			       struct mlx5_ind_table_ibv *ind_tbl);
 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev,
+				     struct mlx5_ind_table_ibv *ind_tbl);
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
@@ -277,6 +283,8 @@  struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
 				uint32_t tunnel, uint32_t rss_level);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
 uint64_t mlx5_get_rx_port_offloads(void);
 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);