diff mbox series

[06/44] event/octeontx2: allocate event inflight buffers

Message ID 20190601185355.370-7-pbhagavatula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers show
Series OCTEON TX2 event device driver | expand

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Pavan Nikhilesh Bhagavatula June 1, 2019, 6:53 p.m. UTC
From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Allocate buffers in DRAM that hold inflight events.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx2/Makefile     |   2 +-
 drivers/event/octeontx2/otx2_evdev.c | 111 ++++++++++++++++++++++++++-
 drivers/event/octeontx2/otx2_evdev.h |   8 ++
 3 files changed, 119 insertions(+), 2 deletions(-)

Comments

Jerin Jacob Kollanukkaran June 17, 2019, 7:56 a.m. UTC | #1
> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Sunday, June 2, 2019 12:23 AM
> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> Bhagavatula <pbhagavatula@marvell.com>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 06/44] event/octeontx2: allocate event inflight
> buffers
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Allocate buffers in DRAM that hold inflight events.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> 
> +static int
> +sso_xaq_allocate(struct otx2_sso_evdev *dev) {
> +	const struct rte_memzone *mz;
> +	struct npa_aura_s *aura;
> +	static int reconfig_cnt;
> +	char pool_name[30];

Use RTE_MEMZONE_NAMESIZE

> +	if (mz == NULL) {
> +		otx2_err("failed to allocate mem for fcmem");
> +		return -ENOMEM;
> +	}
> +
> +	dev->fc_iova = mz->iova;
> +	dev->fc_mem = mz->addr;
> +
> +	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
> +	memset(aura, 0, sizeof(struct npa_aura_s));
> +
> +	aura->fc_ena = 1;
> +	aura->fc_addr = dev->fc_iova;
> +	aura->fc_hyst_bits = 0; /* Store count on all updates */
> +
> +	/* Taken from HRM 14.3.3(4) */
> +	xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
> +	xaq_cnt += (dev->iue / dev->xae_waes) +
> +			(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
> +
> +	otx2_sso_dbg("configuring %d xaq buffers", xaq_cnt);
> +	/* Setup XAQ based on number of nb queues. */
> +	snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
> +	dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
> +			xaq_cnt, dev->xaq_buf_size, 0, 0,
> +			rte_socket_id(), 0);
> +
> +	if (dev->xaq_pool == NULL) {
> +		otx2_err("unable to create empty mempool.");
> +		rte_memzone_free(mz);
> +		return -ENOMEM;
> +	}
> +
> +	rc = rte_mempool_set_ops_byname(dev->xaq_pool,
> +					rte_mbuf_platform_mempool_ops(),
> aura);
> +	if (rc != 0) {
> +		otx2_err("unable to set xaqpool ops.");
> +		goto alloc_fail;
> +	}
> +
> +	rc = rte_mempool_populate_default(dev->xaq_pool);
> +	if (rc < 0) {
> +		otx2_err("unable to set populate xaqpool.");
> +		goto alloc_fail;
> +	}
> +	reconfig_cnt++;
> +	dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
> +				  dev->nb_event_queues);

Add comment on why divide by 2.
diff mbox series

Patch

diff --git a/drivers/event/octeontx2/Makefile b/drivers/event/octeontx2/Makefile
index 36f0b2b12..b3c3beccb 100644
--- a/drivers/event/octeontx2/Makefile
+++ b/drivers/event/octeontx2/Makefile
@@ -33,7 +33,7 @@  LIBABIVER := 1
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev.c
 
 LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci
-LDLIBS += -lrte_eventdev
+LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf
 LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index e09c6b6bc..325e61426 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -8,6 +8,7 @@ 
 #include <rte_common.h>
 #include <rte_eal.h>
 #include <rte_eventdev_pmd_pci.h>
+#include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
 
 #include "otx2_evdev.h"
@@ -203,6 +204,102 @@  sso_configure_queues(const struct rte_eventdev *event_dev)
 	return rc;
 }
 
+static int
+sso_xaq_allocate(struct otx2_sso_evdev *dev)
+{
+	const struct rte_memzone *mz;
+	struct npa_aura_s *aura;
+	static int reconfig_cnt;
+	char pool_name[30];
+	uint32_t xaq_cnt;
+	int rc;
+
+	if (dev->xaq_pool)
+		rte_mempool_free(dev->xaq_pool);
+
+	/*
+	 * Allocate memory for Add work backpressure.
+	 */
+	mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
+	if (mz == NULL)
+		mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
+						 OTX2_ALIGN +
+						 sizeof(struct npa_aura_s),
+						 rte_socket_id(),
+						 RTE_MEMZONE_IOVA_CONTIG,
+						 OTX2_ALIGN);
+	if (mz == NULL) {
+		otx2_err("failed to allocate mem for fcmem");
+		return -ENOMEM;
+	}
+
+	dev->fc_iova = mz->iova;
+	dev->fc_mem = mz->addr;
+
+	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
+	memset(aura, 0, sizeof(struct npa_aura_s));
+
+	aura->fc_ena = 1;
+	aura->fc_addr = dev->fc_iova;
+	aura->fc_hyst_bits = 0; /* Store count on all updates */
+
+	/* Taken from HRM 14.3.3(4) */
+	xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
+	xaq_cnt += (dev->iue / dev->xae_waes) +
+			(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
+
+	otx2_sso_dbg("configuring %d xaq buffers", xaq_cnt);
+	/* Setup XAQ based on number of nb queues. */
+	snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
+	dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
+			xaq_cnt, dev->xaq_buf_size, 0, 0,
+			rte_socket_id(), 0);
+
+	if (dev->xaq_pool == NULL) {
+		otx2_err("unable to create empty mempool.");
+		rte_memzone_free(mz);
+		return -ENOMEM;
+	}
+
+	rc = rte_mempool_set_ops_byname(dev->xaq_pool,
+					rte_mbuf_platform_mempool_ops(), aura);
+	if (rc != 0) {
+		otx2_err("unable to set xaqpool ops.");
+		goto alloc_fail;
+	}
+
+	rc = rte_mempool_populate_default(dev->xaq_pool);
+	if (rc < 0) {
+		otx2_err("unable to set populate xaqpool.");
+		goto alloc_fail;
+	}
+	reconfig_cnt++;
+	dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
+				  dev->nb_event_queues);
+	dev->nb_xaq_cfg = xaq_cnt;
+
+	return 0;
+alloc_fail:
+	rte_mempool_free(dev->xaq_pool);
+	rte_memzone_free(mz);
+	return rc;
+}
+
+static int
+sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
+{
+	struct otx2_mbox *mbox = dev->mbox;
+	struct sso_hw_setconfig *req;
+
+	otx2_sso_dbg("configuring XAQ for GGRPs");
+	req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
+	req->npa_pf_func = otx2_npa_pf_func_get();
+	req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
+	req->hwgrps = dev->nb_event_queues;
+
+	return otx2_mbox_process(mbox);
+}
+
 static void
 sso_lf_teardown(struct otx2_sso_evdev *dev,
 		enum otx2_sso_lf_type lf_type)
@@ -288,11 +385,23 @@  otx2_sso_configure(const struct rte_eventdev *event_dev)
 		goto teardown_hws;
 	}
 
+	if (sso_xaq_allocate(dev) < 0) {
+		rc = -ENOMEM;
+		goto teardown_hwggrp;
+	}
+
+	rc = sso_ggrp_alloc_xaq(dev);
+	if (rc < 0) {
+		otx2_err("failed to alloc xaq to ggrp %d", rc);
+		goto teardown_hwggrp;
+	}
+
 	dev->configured = 1;
 	rte_mb();
 
 	return 0;
-
+teardown_hwggrp:
+	sso_lf_teardown(dev, SSO_LF_GGRP);
 teardown_hws:
 	sso_lf_teardown(dev, SSO_LF_GWS);
 	dev->nb_event_queues = 0;
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index b46402771..375640bca 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -17,6 +17,9 @@ 
 
 #define OTX2_SSO_MAX_VHGRP                  RTE_EVENT_MAX_QUEUES_PER_DEV
 #define OTX2_SSO_MAX_VHWS                   (UINT8_MAX)
+#define OTX2_SSO_FC_NAME                    "otx2_evdev_xaq_fc"
+#define OTX2_SSO_XAQ_SLACK                  (8)
+#define OTX2_SSO_XAQ_CACHE_CNT              (0x7)
 
 /* SSO LF register offsets (BAR2) */
 #define SSO_LF_GGRP_OP_ADD_WORK0            (0x0ull)
@@ -54,6 +57,11 @@  struct otx2_sso_evdev {
 	uint32_t min_dequeue_timeout_ns;
 	uint32_t max_dequeue_timeout_ns;
 	int32_t max_num_events;
+	uint64_t *fc_mem;
+	uint64_t xaq_lmt;
+	uint64_t nb_xaq_cfg;
+	rte_iova_t fc_iova;
+	struct rte_mempool *xaq_pool;
 	/* HW const */
 	uint32_t xae_waes;
 	uint32_t xaq_buf_size;