[v5,06/34] ml/cnxk: rename cnxk ops function pointers struct

Message ID 20231018064806.24145-7-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Implementation of revised ml/cnxk driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Srikanth Yalavarthi Oct. 18, 2023, 6:47 a.m. UTC
  Renamed cn10k ML ops structure with cnxk prefix.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_dev.c |  2 +-
 drivers/ml/cnxk/cn10k_ml_ops.c | 73 +++++++++-------------------------
 drivers/ml/cnxk/cn10k_ml_ops.h | 34 +++++++++++++++-
 drivers/ml/cnxk/cnxk_ml_ops.c  | 36 +++++++++++++++++
 drivers/ml/cnxk/cnxk_ml_ops.h  |  2 +
 5 files changed, 91 insertions(+), 56 deletions(-)
  

Patch

diff --git a/drivers/ml/cnxk/cn10k_ml_dev.c b/drivers/ml/cnxk/cn10k_ml_dev.c
index fc6f78d414..91813e9d0a 100644
--- a/drivers/ml/cnxk/cn10k_ml_dev.c
+++ b/drivers/ml/cnxk/cn10k_ml_dev.c
@@ -345,7 +345,7 @@  cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de
 			goto pmd_destroy;
 		}
 
-		dev->dev_ops = &cn10k_ml_ops;
+		dev->dev_ops = &cnxk_ml_ops;
 	} else {
 		plt_err("CN10K ML Ops are not supported on secondary process");
 		dev->dev_ops = &ml_dev_dummy_ops;
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index 42a4389bbe..66b38fc1eb 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -119,7 +119,7 @@  cnxk_ml_qp_destroy(const struct rte_ml_dev *dev, struct cnxk_ml_qp *qp)
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_queue_pair_release(struct rte_ml_dev *dev, uint16_t queue_pair_id)
 {
 	struct cnxk_ml_qp *qp;
@@ -860,7 +860,7 @@  cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)
 	return ret;
 }
 
-static int
+int
 cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -888,7 +888,7 @@  cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *conf)
 {
 	struct rte_ml_dev_info dev_info;
@@ -1087,7 +1087,7 @@  cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 	return ret;
 }
 
-static int
+int
 cn10k_ml_dev_close(struct rte_ml_dev *dev)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -1160,7 +1160,7 @@  cn10k_ml_dev_close(struct rte_ml_dev *dev)
 	return rte_dev_remove(dev->device);
 }
 
-static int
+int
 cn10k_ml_dev_start(struct rte_ml_dev *dev)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -1180,7 +1180,7 @@  cn10k_ml_dev_start(struct rte_ml_dev *dev)
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_stop(struct rte_ml_dev *dev)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -1200,7 +1200,7 @@  cn10k_ml_dev_stop(struct rte_ml_dev *dev)
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,
 			      const struct rte_ml_dev_qp_conf *qp_conf, int socket_id)
 {
@@ -1241,7 +1241,7 @@  cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_stats_get(struct rte_ml_dev *dev, struct rte_ml_dev_stats *stats)
 {
 	struct cnxk_ml_qp *qp;
@@ -1258,7 +1258,7 @@  cn10k_ml_dev_stats_get(struct rte_ml_dev *dev, struct rte_ml_dev_stats *stats)
 	return 0;
 }
 
-static void
+void
 cn10k_ml_dev_stats_reset(struct rte_ml_dev *dev)
 {
 	struct cnxk_ml_qp *qp;
@@ -1273,7 +1273,7 @@  cn10k_ml_dev_stats_reset(struct rte_ml_dev *dev)
 	}
 }
 
-static int
+int
 cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,
 			      int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,
 			      uint32_t size)
@@ -1321,7 +1321,7 @@  cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod
 	return idx;
 }
 
-static int
+int
 cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,
 				uint64_t *value)
 {
@@ -1363,7 +1363,7 @@  cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16
 	return -EINVAL;
 }
 
-static int
+int
 cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,
 			const uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)
 {
@@ -1427,7 +1427,7 @@  cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode
 	return idx;
 }
 
-static int
+int
 cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,
 			  int32_t model_id, const uint16_t stat_ids[], uint16_t nb_ids)
 {
@@ -1441,7 +1441,7 @@  cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mo
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -1528,7 +1528,7 @@  cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 	return 0;
 }
 
-static int
+int
 cn10k_ml_dev_selftest(struct rte_ml_dev *dev)
 {
 	struct cn10k_ml_dev *cn10k_mldev;
@@ -2051,7 +2051,7 @@  cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 	return ret;
 }
 
-static int
+int
 cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,
 			struct rte_ml_model_info *model_info)
 {
@@ -2071,7 +2071,7 @@  cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,
 	return 0;
 }
 
-static int
+int
 cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *buffer)
 {
 	struct cnxk_ml_model *model;
@@ -2105,7 +2105,7 @@  cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *bu
 	return 0;
 }
 
-static int
+int
 cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **dbuffer,
 		     struct rte_ml_buff_seg **qbuffer)
 {
@@ -2186,7 +2186,7 @@  cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_bu
 	return 0;
 }
 
-static int
+int
 cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **qbuffer,
 		       struct rte_ml_buff_seg **dbuffer)
 {
@@ -2574,38 +2574,3 @@  cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)
 error_enqueue:
 	return ret;
 }
-
-struct rte_ml_dev_ops cn10k_ml_ops = {
-	/* Device control ops */
-	.dev_info_get = cn10k_ml_dev_info_get,
-	.dev_configure = cn10k_ml_dev_configure,
-	.dev_close = cn10k_ml_dev_close,
-	.dev_start = cn10k_ml_dev_start,
-	.dev_stop = cn10k_ml_dev_stop,
-	.dev_dump = cn10k_ml_dev_dump,
-	.dev_selftest = cn10k_ml_dev_selftest,
-
-	/* Queue-pair handling ops */
-	.dev_queue_pair_setup = cn10k_ml_dev_queue_pair_setup,
-	.dev_queue_pair_release = cn10k_ml_dev_queue_pair_release,
-
-	/* Stats ops */
-	.dev_stats_get = cn10k_ml_dev_stats_get,
-	.dev_stats_reset = cn10k_ml_dev_stats_reset,
-	.dev_xstats_names_get = cn10k_ml_dev_xstats_names_get,
-	.dev_xstats_by_name_get = cn10k_ml_dev_xstats_by_name_get,
-	.dev_xstats_get = cn10k_ml_dev_xstats_get,
-	.dev_xstats_reset = cn10k_ml_dev_xstats_reset,
-
-	/* Model ops */
-	.model_load = cn10k_ml_model_load,
-	.model_unload = cn10k_ml_model_unload,
-	.model_start = cn10k_ml_model_start,
-	.model_stop = cn10k_ml_model_stop,
-	.model_info_get = cn10k_ml_model_info_get,
-	.model_params_update = cn10k_ml_model_params_update,
-
-	/* I/O ops */
-	.io_quantize = cn10k_ml_io_quantize,
-	.io_dequantize = cn10k_ml_io_dequantize,
-};
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.h b/drivers/ml/cnxk/cn10k_ml_ops.h
index fd5992e192..16480b9ad8 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.h
+++ b/drivers/ml/cnxk/cn10k_ml_ops.h
@@ -286,7 +286,29 @@  struct cn10k_ml_req {
 };
 
 /* Device ops */
-extern struct rte_ml_dev_ops cn10k_ml_ops;
+int cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info);
+int cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *conf);
+int cn10k_ml_dev_close(struct rte_ml_dev *dev);
+int cn10k_ml_dev_start(struct rte_ml_dev *dev);
+int cn10k_ml_dev_stop(struct rte_ml_dev *dev);
+int cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp);
+int cn10k_ml_dev_selftest(struct rte_ml_dev *dev);
+int cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,
+				  const struct rte_ml_dev_qp_conf *qp_conf, int socket_id);
+int cn10k_ml_dev_queue_pair_release(struct rte_ml_dev *dev, uint16_t queue_pair_id);
+
+int cn10k_ml_dev_stats_get(struct rte_ml_dev *dev, struct rte_ml_dev_stats *stats);
+void cn10k_ml_dev_stats_reset(struct rte_ml_dev *dev);
+int cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,
+				  int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,
+				  uint32_t size);
+int cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,
+				    uint64_t *value);
+int cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,
+			    int32_t model_id, const uint16_t stat_ids[], uint64_t values[],
+			    uint16_t nb_ids);
+int cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,
+			      int32_t model_id, const uint16_t stat_ids[], uint16_t nb_ids);
 
 /* Slow-path ops */
 int cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
@@ -294,6 +316,16 @@  int cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *para
 int cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id);
 int cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id);
 int cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id);
+int cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,
+			    struct rte_ml_model_info *model_info);
+int cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *buffer);
+
+/* I/O ops */
+int cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id,
+			 struct rte_ml_buff_seg **dbuffer, struct rte_ml_buff_seg **qbuffer);
+
+int cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id,
+			   struct rte_ml_buff_seg **qbuffer, struct rte_ml_buff_seg **dbuffer);
 
 /* Fast-path ops */
 __rte_hot uint16_t cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id,
diff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c
index f1872dcf7c..03402681c5 100644
--- a/drivers/ml/cnxk/cnxk_ml_ops.c
+++ b/drivers/ml/cnxk/cnxk_ml_ops.c
@@ -3,5 +3,41 @@ 
  */
 
 #include <rte_mldev.h>
+#include <rte_mldev_pmd.h>
 
 #include "cnxk_ml_ops.h"
+
+struct rte_ml_dev_ops cnxk_ml_ops = {
+	/* Device control ops */
+	.dev_info_get = cn10k_ml_dev_info_get,
+	.dev_configure = cn10k_ml_dev_configure,
+	.dev_close = cn10k_ml_dev_close,
+	.dev_start = cn10k_ml_dev_start,
+	.dev_stop = cn10k_ml_dev_stop,
+	.dev_dump = cn10k_ml_dev_dump,
+	.dev_selftest = cn10k_ml_dev_selftest,
+
+	/* Queue-pair handling ops */
+	.dev_queue_pair_setup = cn10k_ml_dev_queue_pair_setup,
+	.dev_queue_pair_release = cn10k_ml_dev_queue_pair_release,
+
+	/* Stats ops */
+	.dev_stats_get = cn10k_ml_dev_stats_get,
+	.dev_stats_reset = cn10k_ml_dev_stats_reset,
+	.dev_xstats_names_get = cn10k_ml_dev_xstats_names_get,
+	.dev_xstats_by_name_get = cn10k_ml_dev_xstats_by_name_get,
+	.dev_xstats_get = cn10k_ml_dev_xstats_get,
+	.dev_xstats_reset = cn10k_ml_dev_xstats_reset,
+
+	/* Model ops */
+	.model_load = cn10k_ml_model_load,
+	.model_unload = cn10k_ml_model_unload,
+	.model_start = cn10k_ml_model_start,
+	.model_stop = cn10k_ml_model_stop,
+	.model_info_get = cn10k_ml_model_info_get,
+	.model_params_update = cn10k_ml_model_params_update,
+
+	/* I/O ops */
+	.io_quantize = cn10k_ml_io_quantize,
+	.io_dequantize = cn10k_ml_io_dequantize,
+};
diff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h
index b953fb0f5f..a925c07580 100644
--- a/drivers/ml/cnxk/cnxk_ml_ops.h
+++ b/drivers/ml/cnxk/cnxk_ml_ops.h
@@ -60,4 +60,6 @@  struct cnxk_ml_qp {
 	struct rte_ml_dev_stats stats;
 };
 
+extern struct rte_ml_dev_ops cnxk_ml_ops;
+
 #endif /* _CNXK_ML_OPS_H_ */