[v2,6/9] net/mlx5: add flex parser DevX object management

Message ID 20211102085347.20568-7-getelson@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: add flex item support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Gregory Etelson Nov. 2, 2021, 8:53 a.m. UTC
  The DevX flex parsers can be shared between representors
within the same IB context. We should put the flex parser
objects into the shared list and engage the standard
mlx5_list_xxx API to manage ones.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |  10 +++
 drivers/net/mlx5/mlx5.c           |   4 +
 drivers/net/mlx5/mlx5.h           |  20 +++++
 drivers/net/mlx5/mlx5_flow_flex.c | 121 +++++++++++++++++++++++++++++-
 4 files changed, 154 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 3f7c34b687..1c6f50b72a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -337,6 +337,16 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index a4a0e258a9..dc15688f21 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1429,6 +1429,10 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 		mlx5_flow_os_release_workspace();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	if (sh->flex_parsers_dv) {
+		mlx5_list_destroy(sh->flex_parsers_dv);
+		sh->flex_parsers_dv = NULL;
+	}
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f0c1775f8c..63de6523e8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1099,6 +1099,15 @@  struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+	struct mlx5_list_entry entry;  /* List element at the beginning. */
+	uint32_t num_samples;
+	void *devx_obj;
+	struct mlx5_devx_graph_node_attr devx_conf;
+	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
@@ -1159,6 +1168,7 @@  struct mlx5_dev_ctx_shared {
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
 	struct mlx5_list *dest_array_list;
+	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1828,4 +1838,14 @@  int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+			      struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+				    struct mlx5_list_entry *entry);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b7bc4af6fb..2f87073e97 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -45,7 +45,13 @@  mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
 
 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
 		if (priv->flex_item_map & (1 << i)) {
-			/* DevX object dereferencing should be provided here. */
+			struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+			claim_zero(mlx5_list_unregister
+					(priv->sh->flex_parsers_dv,
+					 &flex->devx_fp->entry));
+			flex->devx_fp = NULL;
+			flex->refcnt = 0;
 			priv->flex_item_map &= ~(1 << i);
 		}
 	}
@@ -127,7 +133,9 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
 	struct mlx5_flex_item *flex;
+	struct mlx5_list_entry *ent;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	flex = mlx5_flex_alloc(priv);
@@ -137,10 +145,22 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+	if (!ent) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "flex item creation failure");
+		goto error;
+	}
+	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+	mlx5_flex_free(priv, flex);
+	return NULL;
 }
 
 /**
@@ -166,6 +186,7 @@  flow_dv_item_release(struct rte_eth_dev *dev,
 	struct mlx5_flex_item *flex =
 		(struct mlx5_flex_item *)(uintptr_t)handle;
 	uint32_t old_refcnt = 1;
+	int rc;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_spinlock_lock(&priv->flex_item_sl);
@@ -184,6 +205,104 @@  flow_dv_item_release(struct rte_eth_dev *dev,
 	}
 	/* Flex item is marked as invalid, we can leave locked section. */
 	rte_spinlock_unlock(&priv->flex_item_sl);
+	MLX5_ASSERT(flex->devx_fp);
+	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+				  &flex->devx_fp->entry);
+	flex->devx_fp = NULL;
 	mlx5_flex_free(priv, flex);
+	if (rc < 0)
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item release failure");
 	return 0;
 }
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = list_ctx;
+	struct mlx5_flex_parser_devx *fp, *conf = ctx;
+	int ret;
+
+	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	/* Copy the requested configurations. */
+	fp->num_samples = conf->num_samples;
+	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+	/* Create DevX flex parser. */
+	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
+							&fp->devx_conf);
+	if (!fp->devx_obj)
+		goto error;
+	/* Query the firmware assigned sample ids. */
+	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+						fp->sample_ids,
+						fp->num_samples);
+	if (ret)
+		goto error;
+	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
+		(const void *)fp, fp->num_samples);
+	return &fp->entry;
+error:
+	if (fp->devx_obj)
+		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+	if (fp)
+		mlx5_free(fp);
+	return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+			  struct mlx5_list_entry *iter, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(iter, struct mlx5_flex_parser_devx, entry);
+	struct mlx5_flex_parser_devx *org =
+		container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	return !iter || !ctx || memcmp(&fp->devx_conf,
+				       &org->devx_conf,
+				       sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	MLX5_ASSERT(fp->devx_obj);
+	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
+	mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+			  struct mlx5_list_entry *entry, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp;
+
+	RTE_SET_USED(list_ctx);
+	RTE_SET_USED(entry);
+	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+	return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+	RTE_SET_USED(list_ctx);
+	mlx5_free(fp);
+}