[3/3] ml/cnxk: add support for additional integer types

Message ID 20240107152813.2668-4-syalavarthi@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series add support for additional data types |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS

Commit Message

Srikanth Yalavarthi Jan. 7, 2024, 3:28 p.m. UTC
  Added support quantization and dequantization of 32-bit
and 64-bit integer types.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cnxk_ml_io.c     | 24 ++++++++++++++++++++++++
 drivers/ml/cnxk/mvtvm_ml_model.c |  4 ++++
 2 files changed, 28 insertions(+)
  

Patch

diff --git a/drivers/ml/cnxk/cnxk_ml_io.c b/drivers/ml/cnxk/cnxk_ml_io.c
index c78009ab0cd..4b0adc2ae47 100644
--- a/drivers/ml/cnxk/cnxk_ml_io.c
+++ b/drivers/ml/cnxk/cnxk_ml_io.c
@@ -40,6 +40,18 @@  cnxk_ml_io_quantize_single(struct cnxk_ml_io *input, uint8_t *dbuffer, uint8_t *
 		case RTE_ML_IO_TYPE_UINT16:
 			ret = rte_ml_io_float32_to_uint16(qscale, nb_elements, dbuffer, qbuffer);
 			break;
+		case RTE_ML_IO_TYPE_INT32:
+			ret = rte_ml_io_float32_to_int32(qscale, nb_elements, dbuffer, qbuffer);
+			break;
+		case RTE_ML_IO_TYPE_UINT32:
+			ret = rte_ml_io_float32_to_uint32(qscale, nb_elements, dbuffer, qbuffer);
+			break;
+		case RTE_ML_IO_TYPE_INT64:
+			ret = rte_ml_io_float32_to_int64(qscale, nb_elements, dbuffer, qbuffer);
+			break;
+		case RTE_ML_IO_TYPE_UINT64:
+			ret = rte_ml_io_float32_to_uint64(qscale, nb_elements, dbuffer, qbuffer);
+			break;
 		case RTE_ML_IO_TYPE_FP16:
 			ret = rte_ml_io_float32_to_float16(nb_elements, dbuffer, qbuffer);
 			break;
@@ -82,6 +94,18 @@  cnxk_ml_io_dequantize_single(struct cnxk_ml_io *output, uint8_t *qbuffer, uint8_
 		case RTE_ML_IO_TYPE_UINT16:
 			ret = rte_ml_io_uint16_to_float32(dscale, nb_elements, qbuffer, dbuffer);
 			break;
+		case RTE_ML_IO_TYPE_INT32:
+			ret = rte_ml_io_int32_to_float32(dscale, nb_elements, qbuffer, dbuffer);
+			break;
+		case RTE_ML_IO_TYPE_UINT32:
+			ret = rte_ml_io_uint32_to_float32(dscale, nb_elements, qbuffer, dbuffer);
+			break;
+		case RTE_ML_IO_TYPE_INT64:
+			ret = rte_ml_io_int64_to_float32(dscale, nb_elements, qbuffer, dbuffer);
+			break;
+		case RTE_ML_IO_TYPE_UINT64:
+			ret = rte_ml_io_uint64_to_float32(dscale, nb_elements, qbuffer, dbuffer);
+			break;
 		case RTE_ML_IO_TYPE_FP16:
 			ret = rte_ml_io_float16_to_float32(nb_elements, qbuffer, dbuffer);
 			break;
diff --git a/drivers/ml/cnxk/mvtvm_ml_model.c b/drivers/ml/cnxk/mvtvm_ml_model.c
index 0dbe08e9889..e3234ae4422 100644
--- a/drivers/ml/cnxk/mvtvm_ml_model.c
+++ b/drivers/ml/cnxk/mvtvm_ml_model.c
@@ -150,6 +150,8 @@  mvtvm_ml_io_type_map(DLDataType dltype)
 			return RTE_ML_IO_TYPE_INT16;
 		else if (dltype.bits == 32)
 			return RTE_ML_IO_TYPE_INT32;
+		else if (dltype.bits == 64)
+			return RTE_ML_IO_TYPE_INT64;
 		break;
 	case kDLUInt:
 		if (dltype.bits == 8)
@@ -158,6 +160,8 @@  mvtvm_ml_io_type_map(DLDataType dltype)
 			return RTE_ML_IO_TYPE_UINT16;
 		else if (dltype.bits == 32)
 			return RTE_ML_IO_TYPE_UINT32;
+		else if (dltype.bits == 64)
+			return RTE_ML_IO_TYPE_UINT64;
 		break;
 	case kDLFloat:
 		if (dltype.bits == 8)