[2/2] net/mlx5: control rules with identical pattern behavior
Checks
Commit Message
In order to allow\disallow configuring rules with identical
patterns, the new device argument 'allow_duplicate_pattern'
is introduced.
If allow, these rules be inserted successfully and only the
first rule take affect.
If disallow, the first rule will be inserted and other rules
be rejected.
The default is to allow.
Set it to 0 if disallow, for example:
-a <PCI_BDF>,allow_duplicate_pattern=0
Signed-off-by: Jiawei Wang <jiaweiw@nvidia.com>
---
doc/guides/nics/mlx5.rst | 10 ++++++++++
doc/guides/rel_notes/release_21_08.rst | 6 ++++++
drivers/net/mlx5/linux/mlx5_os.c | 7 +++++++
drivers/net/mlx5/mlx5.c | 6 ++++++
drivers/net/mlx5/mlx5.h | 2 ++
drivers/net/mlx5/mlx5_flow_dv.c | 3 +++
6 files changed, 34 insertions(+)
@@ -1058,6 +1058,16 @@ Driver options
By default, the PMD will set this value to 1.
+- ``allow_duplicate_pattern`` parameter [int]
+
+ There are two options to choose:
+
+ - 0. Prevent insertion of rules with the same pattern items on non-root table.
+
+ - 1. Allow insertion of rules with the same pattern items.
+
+ By default, the PMD will set this value to 1.
+
.. _mlx5_firmware_config:
Firmware configuration
@@ -55,6 +55,12 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Mellanox mlx5 net driver and common layer.**
+
+ Updated Mellanox mlx5 driver with new features and improvements, including:
+
+ * Added devargs options ``allow_duplicate_pattern``.
+
Removed Items
-------------
@@ -355,6 +355,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
}
sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
+ if (!priv->config.allow_duplicate_pattern) {
+ mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0);
+ mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0);
+ if (sh->fdb_domain)
+ mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0);
+ }
#endif /* HAVE_MLX5DV_DR */
sh->default_miss_action =
mlx5_glue->dr_create_flow_action_default_miss();
@@ -2359,6 +2365,7 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
dev_config.dv_flow_en = 1;
dev_config.decap_en = 1;
dev_config.log_hp_size = MLX5_ARG_UNSET;
+ dev_config.allow_duplicate_pattern = 1;
list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
&list[i],
&dev_config,
@@ -175,6 +175,9 @@
/* Decap will be used or not. */
#define MLX5_DECAP_EN "decap_en"
+/* Device parameter to configure allow or prevent duplicate rules pattern. */
+#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
@@ -1948,6 +1951,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->sys_mem_en = !!tmp;
} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
config->decap_en = !!tmp;
+ } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
+ config->allow_duplicate_pattern = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
@@ -2007,6 +2012,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_RECLAIM_MEM,
MLX5_SYS_MEM_EN,
MLX5_DECAP_EN,
+ MLX5_ALLOW_DUPLICATE_PATTERN,
NULL,
};
struct rte_kvargs *kvlist;
@@ -244,6 +244,8 @@ struct mlx5_dev_config {
unsigned int sys_mem_en:1; /* The default memory allocator. */
unsigned int decap_en:1; /* Whether decap will be used or not. */
unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
+ unsigned int allow_duplicate_pattern:1;
+ /* Allow/Prevent the duplicate rules pattern. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int stride_num_n; /* Number of strides. */
@@ -13299,6 +13299,9 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"hardware refuses to create flow");
+ if (!priv->config.allow_duplicate_pattern &&
+ errno == EEXIST)
+ DRV_LOG(INFO, "duplicate rules not supported");
goto error;
}
if (priv->vmwa_context &&