[v2,05/11] mempool/cnxk: add cn9k mempool ops

Message ID 20210403141751.215926-5-asekhar@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers
Series [v2,01/11] mempool/cnxk: add build infra and doc |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ashwin Sekhar T K April 3, 2021, 2:17 p.m. UTC
  Add Marvell CN9k mempool ops and implement CN9k mempool
alloc which makes sure that the element size always occupy
odd number of cachelines to ensure even distribution among
of elements among L1D cache sets.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/mempool/cnxk/cn9k_mempool_ops.c | 54 +++++++++++++++++++++++++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  4 +-
 drivers/mempool/cnxk/meson.build        |  3 +-
 3 files changed, 59 insertions(+), 2 deletions(-)
 create mode 100644 drivers/mempool/cnxk/cn9k_mempool_ops.c
  

Patch

diff --git a/drivers/mempool/cnxk/cn9k_mempool_ops.c b/drivers/mempool/cnxk/cn9k_mempool_ops.c
new file mode 100644
index 0000000000..f5ac163af9
--- /dev/null
+++ b/drivers/mempool/cnxk/cn9k_mempool_ops.c
@@ -0,0 +1,54 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_mempool.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+static int
+cn9k_mempool_alloc(struct rte_mempool *mp)
+{
+	size_t block_size, padding;
+
+	block_size = mp->elt_size + mp->header_size + mp->trailer_size;
+	/* Align header size to ROC_ALIGN */
+	if (mp->header_size % ROC_ALIGN != 0) {
+		padding = RTE_ALIGN_CEIL(mp->header_size, ROC_ALIGN) -
+			  mp->header_size;
+		mp->header_size += padding;
+		block_size += padding;
+	}
+
+	/* Align block size to ROC_ALIGN */
+	if (block_size % ROC_ALIGN != 0) {
+		padding = RTE_ALIGN_CEIL(block_size, ROC_ALIGN) - block_size;
+		mp->trailer_size += padding;
+		block_size += padding;
+	}
+
+	/*
+	 * Marvell CN9k has 8 sets, 41 ways L1D cache, VA<9:7> bits dictate the
+	 * set selection. Add additional padding to ensure that the element size
+	 * always occupies odd number of cachelines to ensure even distribution
+	 * of elements among L1D cache sets.
+	 */
+	padding = ((block_size / ROC_ALIGN) % 2) ? 0 : ROC_ALIGN;
+	mp->trailer_size += padding;
+
+	return cnxk_mempool_alloc(mp);
+}
+
+static struct rte_mempool_ops cn9k_mempool_ops = {
+	.name = "cn9k_mempool_ops",
+	.alloc = cn9k_mempool_alloc,
+	.free = cnxk_mempool_free,
+	.enqueue = cnxk_mempool_enq,
+	.dequeue = cnxk_mempool_deq,
+	.get_count = cnxk_mempool_get_count,
+	.calc_mem_size = cnxk_mempool_calc_mem_size,
+	.populate = cnxk_mempool_populate,
+};
+
+MEMPOOL_REGISTER_OPS(cn9k_mempool_ops);
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 18c307288c..45c45e9943 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,9 @@  cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
 static int
 cnxk_mempool_lf_init(void)
 {
-	if (roc_model_is_cn10k() || roc_model_is_cn9k())
+	if (roc_model_is_cn9k())
+		rte_mbuf_set_platform_mempool_ops("cn9k_mempool_ops");
+	else if (roc_model_is_cn10k())
 		rte_mbuf_set_platform_mempool_ops("cnxk_mempool_ops");
 
 	return 0;
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index 52244e728b..ff31893ff4 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -9,6 +9,7 @@  if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
 endif
 
 sources = files('cnxk_mempool.c',
-		'cnxk_mempool_ops.c')
+		'cnxk_mempool_ops.c',
+		'cn9k_mempool_ops.c')
 
 deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']