[v4,1/2] net/mlx5: add support for Rx queue delay drop

Message ID 20211104175904.60696-2-bingz@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series Add delay drop support for Rx queue |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-testing warning apply patch failure

Commit Message

Bing Zhao Nov. 4, 2021, 5:59 p.m. UTC
  For the Ethernet RQs, if there all receiving descriptors are
exhausted, the packets being received will be dropped. This behavior
prevents slow or malicious software entities at the host from
affecting the network. While for hairpin cases, even if there is no
software involved during the packet forwarding from Rx to Tx side,
some hiccup in the hardware or back pressure from Tx side may still
cause the descriptors to be exhausted. In certain scenarios it may be
preferred to configure the device to avoid such packet drops,
assuming the posting of descriptors will resume shortly.

To support this, a new devarg "delay_drop_en" is introduced, by
default, the delay drop is enabled for hairpin Rx queues and
disabled for standard Rx queues. This value is used as a bit mask:
  - bit 0: enablement of standard Rx queue
  - bit 1: enablement of hairpin Rx queue
And this attribute will be applied to all Rx queues of a device.

The "rq_delay_drop" capability in the HCA_CAP is checked before
creating any queue. If the hardware capabilities do not support
this delay drop, all the Rx queues will still be created without
this attribute, and the devarg setting will be ignored even if it
is specified explicitly.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c |  1 +
 drivers/common/mlx5/mlx5_devx_cmds.h |  1 +
 drivers/net/mlx5/linux/mlx5_os.c     | 11 +++++++++++
 drivers/net/mlx5/mlx5.c              |  7 +++++++
 drivers/net/mlx5/mlx5.h              |  9 +++++++++
 drivers/net/mlx5/mlx5_devx.c         |  5 +++++
 drivers/net/mlx5/mlx5_rx.h           |  1 +
 7 files changed, 35 insertions(+)
  

Comments

Slava Ovsiienko Nov. 4, 2021, 6:22 p.m. UTC | #1
> -----Original Message-----
> From: Bing Zhao <bingz@nvidia.com>
> Sent: Thursday, November 4, 2021 19:59
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>; NBU-Contact-
> Thomas Monjalon <thomas@monjalon.net>; Ori Kam <orika@nvidia.com>
> Subject: [PATCH v4 1/2] net/mlx5: add support for Rx queue delay drop
> 
> For the Ethernet RQs, if there all receiving descriptors are exhausted, the
> packets being received will be dropped. This behavior prevents slow or
> malicious software entities at the host from affecting the network. While for
> hairpin cases, even if there is no software involved during the packet
> forwarding from Rx to Tx side, some hiccup in the hardware or back pressure
> from Tx side may still cause the descriptors to be exhausted. In certain
> scenarios it may be preferred to configure the device to avoid such packet
> drops, assuming the posting of descriptors will resume shortly.
> 
> To support this, a new devarg "delay_drop_en" is introduced, by default, the
> delay drop is enabled for hairpin Rx queues and disabled for standard Rx
> queues. This value is used as a bit mask:
>   - bit 0: enablement of standard Rx queue
>   - bit 1: enablement of hairpin Rx queue And this attribute will be applied to
> all Rx queues of a device.
> 
> The "rq_delay_drop" capability in the HCA_CAP is checked before creating
> any queue. If the hardware capabilities do not support this delay drop, all the
> Rx queues will still be created without this attribute, and the devarg setting
> will be ignored even if it is specified explicitly.
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
  

Patch

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 4ab3070da0..3748e54b22 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -964,6 +964,7 @@  mlx5_devx_cmd_query_hca_attr(void *ctx,
 	attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
 					 general_obj_types) &
 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
+	attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
 	if (attr->qos.sup) {
 		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
 				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 86ee4f7b78..50d3264b46 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -178,6 +178,7 @@  struct mlx5_hca_attr {
 	uint32_t swp_csum:1;
 	uint32_t swp_lso:1;
 	uint32_t lro_max_msg_sz_mode:2;
+	uint32_t rq_delay_drop:1;
 	uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
 	uint16_t lro_min_mss_size;
 	uint32_t flex_parser_protocols;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index e0304b685e..de880ee4c9 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1508,6 +1508,15 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 #endif
 	}
+	if (config->std_delay_drop || config->hp_delay_drop) {
+		if (!config->hca_attr.rq_delay_drop) {
+			config->std_delay_drop = 0;
+			config->hp_delay_drop = 0;
+			DRV_LOG(WARNING,
+				"dev_port-%u: Rxq delay drop is not supported",
+				priv->dev_port);
+		}
+	}
 	if (sh->devx) {
 		uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
@@ -2077,6 +2086,8 @@  mlx5_os_config_default(struct mlx5_dev_config *config)
 	config->decap_en = 1;
 	config->log_hp_size = MLX5_ARG_UNSET;
 	config->allow_duplicate_pattern = 1;
+	config->std_delay_drop = 0;
+	config->hp_delay_drop = 0;
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 8614b8ffdd..a961cce430 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -183,6 +183,9 @@ 
 /* Device parameter to configure implicit registration of mempool memory. */
 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
 
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP_EN "delay_drop_en"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -2091,6 +2094,9 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		config->decap_en = !!tmp;
 	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
 		config->allow_duplicate_pattern = !!tmp;
+	} else if (strcmp(MLX5_DELAY_DROP_EN, key) == 0) {
+		config->std_delay_drop = tmp & MLX5_DELAY_DROP_STANDARD;
+		config->hp_delay_drop = tmp & MLX5_DELAY_DROP_HAIRPIN;
 	} else {
 		DRV_LOG(WARNING, "%s: unknown parameter", key);
 		rte_errno = EINVAL;
@@ -2153,6 +2159,7 @@  mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 		MLX5_DECAP_EN,
 		MLX5_ALLOW_DUPLICATE_PATTERN,
 		MLX5_MR_MEMPOOL_REG_EN,
+		MLX5_DELAY_DROP_EN,
 		NULL,
 	};
 	struct rte_kvargs *kvlist;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 51f4578838..b2022f3300 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -99,6 +99,13 @@  enum mlx5_flow_type {
 	MLX5_FLOW_TYPE_MAXI,
 };
 
+/* The mode of delay drop for Rx queues. */
+enum mlx5_delay_drop_mode {
+	MLX5_DELAY_DROP_NONE = 0, /* All disabled. */
+	MLX5_DELAY_DROP_STANDARD = RTE_BIT32(0), /* Standard queues enable. */
+	MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
+};
+
 /* Hlist and list callback context. */
 struct mlx5_flow_cb_ctx {
 	struct rte_eth_dev *dev;
@@ -264,6 +271,8 @@  struct mlx5_dev_config {
 	unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
 	unsigned int allow_duplicate_pattern:1;
 	/* Allow/Prevent the duplicate rules pattern. */
+	unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
+	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
 	struct {
 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
 		unsigned int stride_num_n; /* Number of strides. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index a9f9f4af70..e46f79124d 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -277,6 +277,7 @@  mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
 						MLX5_WQ_END_PAD_MODE_NONE;
 	rq_attr.wq_attr.pd = cdev->pdn;
 	rq_attr.counter_set_id = priv->counter_set_id;
+	rq_attr.delay_drop_en = rxq_data->delay_drop;
 	rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id);
 	if (rxq_data->shared) /* Create RMP based RQ. */
 		rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
@@ -439,6 +440,8 @@  mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
 			attr.wq_attr.log_hairpin_data_sz -
 			MLX5_HAIRPIN_QUEUE_STRIDE;
 	attr.counter_set_id = priv->counter_set_id;
+	rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop;
+	attr.delay_drop_en = priv->config.hp_delay_drop;
 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
 					   rxq_ctrl->socket);
 	if (!tmpl->rq) {
@@ -496,6 +499,7 @@  mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto error;
 	}
+	rxq_data->delay_drop = priv->config.std_delay_drop;
 	/* Create RQ using DevX API. */
 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
 	if (ret) {
@@ -941,6 +945,7 @@  mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 			dev->data->port_id);
 		goto error;
 	}
+	rxq_ctrl->rxq.delay_drop = 0;
 	/* Create RQ using DevX API. */
 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
 	if (ret != 0) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index eda6eca8de..3b797e577a 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -97,6 +97,7 @@  struct mlx5_rxq_data {
 	unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
 	unsigned int mcqe_format:3; /* CQE compression format. */
 	unsigned int shared:1; /* Shared RXQ. */
+	unsigned int delay_drop:1; /* Enable delay drop. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t port_id;