[v1,26/27] mempool/octeontx2: add devargs for max pool selection

Message ID 20190523081339.56348-27-jerinj@marvell.com (mailing list archive)
State Superseded, archived
Headers
Series OCTEON TX2 common and mempool driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Jerin Jacob Kollanukkaran May 23, 2019, 8:13 a.m. UTC
  From: Jerin Jacob <jerinj@marvell.com>

The maximum number of mempools per application needs to be configured
on HW during mempool driver initialization. HW can support up to 1M
mempools, Since each mempool costs set of HW resources, the max_pools
devargs parameter is being introduced to configure the number of
mempools required for the application.
For example:

-w 0002:02:00.0,max_pools=512

With the above configuration, the driver will set up only 512 mempools
for the given application to save HW resources.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/mempool/octeontx2/otx2_mempool.c | 41 +++++++++++++++++++++++-
 1 file changed, 40 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/mempool/octeontx2/otx2_mempool.c b/drivers/mempool/octeontx2/otx2_mempool.c
index 1bcb86cf4..ff7fcac85 100644
--- a/drivers/mempool/octeontx2/otx2_mempool.c
+++ b/drivers/mempool/octeontx2/otx2_mempool.c
@@ -7,6 +7,7 @@ 
 #include <rte_common.h>
 #include <rte_eal.h>
 #include <rte_io.h>
+#include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
@@ -142,6 +143,42 @@  otx2_aura_size_to_u32(uint8_t val)
 	return 1 << (val + 6);
 }
 
+static int
+parse_max_pools(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+	if (val < otx2_aura_size_to_u32(NPA_AURA_SZ_128))
+		val = 128;
+	if (val > otx2_aura_size_to_u32(NPA_AURA_SZ_1M))
+		val = BIT_ULL(20);
+
+	*(uint8_t *)extra_args = rte_log2_u32(val) - 6;
+	return 0;
+}
+
+#define OTX2_MAX_POOLS "max_pools"
+
+static uint8_t
+otx2_parse_aura_size(struct rte_devargs *devargs)
+{
+	uint8_t aura_sz = NPA_AURA_SZ_128;
+	struct rte_kvargs *kvlist;
+
+	if (devargs == NULL)
+		goto exit;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL)
+		goto exit;
+
+	rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
+	rte_kvargs_free(kvlist);
+exit:
+	return aura_sz;
+}
+
 static inline int
 npa_lf_attach(struct otx2_mbox *mbox)
 {
@@ -234,7 +271,7 @@  otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev)
 		if (rc)
 			goto npa_detach;
 
-		aura_sz = NPA_AURA_SZ_128;
+		aura_sz = otx2_parse_aura_size(pci_dev->device.devargs);
 		nr_pools = otx2_aura_size_to_u32(aura_sz);
 
 		lf = &dev->npalf;
@@ -397,3 +434,5 @@  static struct rte_pci_driver pci_npa = {
 RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
 RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
 RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
+			      OTX2_MAX_POOLS "=<128-1048576>");