[2/2] event/octeontx2: configure aura backpressure

Message ID 20210614192426.2978-2-pbhagavatula@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [1/2] mempool/octeontx2: fix shift calculation |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-intel-Functional success Functional Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/github-robot success github build: passed
ci/iol-testing fail Testing issues
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Functional fail Functional Testing issues

Commit Message

Pavan Nikhilesh Bhagavatula June 14, 2021, 7:24 p.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

In poll mode driver of octeontx2 the RQ is connected to a CQ and it is
responsible for asserting backpressure to the CGX channel.
When event eth Rx adapter is configured, the RQ is connected to a event
queue, to enable backpressure we need to configure AURA assigned to a
given RQ to backpressure CGX channel.
Event device expects unique AURA to be configured per ethernet device.
If multiple RQ from different ethernet devices use the same AURA,
the backpressure will be disabled, application can override this
using devargs:

	-a 0002:0e:00.0,force_rx_bp=1

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/eventdevs/octeontx2.rst         |  24 +++++
 drivers/event/octeontx2/otx2_evdev.c       |   4 +
 drivers/event/octeontx2/otx2_evdev.h       |   1 +
 drivers/event/octeontx2/otx2_evdev_adptr.c | 105 +++++++++++++++++++--
 4 files changed, 127 insertions(+), 7 deletions(-)
  

Patch

diff --git a/doc/guides/eventdevs/octeontx2.rst b/doc/guides/eventdevs/octeontx2.rst
index ce733198c2..11fbebfcd2 100644
--- a/doc/guides/eventdevs/octeontx2.rst
+++ b/doc/guides/eventdevs/octeontx2.rst
@@ -138,6 +138,15 @@  Runtime Config Options
 
       -a 0002:0e:00.0,npa_lock_mask=0xf
 
+- ``Force Rx Back pressure``
+
+   Force Rx back pressure when same mempool is used across ethernet device
+   connected to event device.
+
+   For example::
+
+      -a 0002:0e:00.0,force_rx_bp=1
+
 Debugging Options
 -----------------
 
@@ -152,3 +161,18 @@  Debugging Options
    +---+------------+-------------------------------------------------------+
    | 2 | TIM        | --log-level='pmd\.event\.octeontx2\.timer,8'          |
    +---+------------+-------------------------------------------------------+
+
+Limitations
+-----------
+
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+Using the same mempool for all the ethernet device ports connected to
+event device would cause back pressure to be asserted only on the first
+ethernet device.
+Back pressure is automatically disabled when using same mempool for all the
+ethernet devices connected to event device to override this applications can
+use `force_rx_bp=1` device arguments.
+Using unique mempool per each ethernet device is recommended when they are
+connected to event device.
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index ee7a6ad514..38a6b651d9 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -1639,6 +1639,7 @@  static struct rte_eventdev_ops otx2_sso_ops = {
 #define OTX2_SSO_XAE_CNT	"xae_cnt"
 #define OTX2_SSO_SINGLE_WS	"single_ws"
 #define OTX2_SSO_GGRP_QOS	"qos"
+#define OTX2_SSO_FORCE_BP	"force_rx_bp"
 
 static void
 parse_queue_param(char *value, void *opaque)
@@ -1734,6 +1735,8 @@  sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
 			   &single_ws);
 	rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
 			   dev);
+	rte_kvargs_process(kvlist, OTX2_SSO_FORCE_BP, &parse_kvargs_flag,
+			   &dev->force_rx_bp);
 	otx2_parse_common_devargs(kvlist);
 	dev->dual_ws = !single_ws;
 	rte_kvargs_free(kvlist);
@@ -1892,4 +1895,5 @@  RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
 			      OTX2_SSO_SINGLE_WS "=1"
 			      OTX2_SSO_GGRP_QOS "=<string>"
+			      OTX2_SSO_FORCE_BP "=1"
 			      OTX2_NPA_LOCK_MASK "=<1-65535>");
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 96e5799be1..a5d34b7df7 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -151,6 +151,7 @@  struct otx2_sso_evdev {
 	uint8_t dual_ws;
 	uint32_t xae_cnt;
 	uint8_t qos_queue_cnt;
+	uint8_t force_rx_bp;
 	struct otx2_sso_qos *qos_parse_data;
 	/* HW const */
 	uint32_t xae_waes;
diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c
index d85c3665ca..a91f784b1e 100644
--- a/drivers/event/octeontx2/otx2_evdev_adptr.c
+++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
@@ -4,6 +4,8 @@ 
 
 #include "otx2_evdev.h"
 
+#define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
+
 int
 otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
@@ -306,6 +308,87 @@  sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 	}
 }
 
+static inline void
+sso_cfg_nix_mp_bpid(struct otx2_sso_evdev *dev,
+		    struct otx2_eth_dev *otx2_eth_dev, struct otx2_eth_rxq *rxq,
+		    uint8_t ena)
+{
+	struct otx2_fc_info *fc = &otx2_eth_dev->fc_info;
+	struct npa_aq_enq_req *req;
+	struct npa_aq_enq_rsp *rsp;
+	struct otx2_npa_lf *lf;
+	struct otx2_mbox *mbox;
+	uint32_t limit;
+	int rc;
+
+	if (otx2_dev_is_sdp(otx2_eth_dev))
+		return;
+
+	lf = otx2_npa_lf_obj_get();
+	if (!lf)
+		return;
+	mbox = lf->mbox;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+	if (req == NULL)
+		return;
+
+	req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_READ;
+
+	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+	if (rc)
+		return;
+
+	limit = rsp->aura.limit;
+	/* BP is already enabled. */
+	if (rsp->aura.bp_ena) {
+		/* If BP ids don't match disable BP. */
+		if ((rsp->aura.nix0_bpid != fc->bpid[0]) && !dev->force_rx_bp) {
+			req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+			if (req == NULL)
+				return;
+
+			req->aura_id =
+				npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
+			req->ctype = NPA_AQ_CTYPE_AURA;
+			req->op = NPA_AQ_INSTOP_WRITE;
+
+			req->aura.bp_ena = 0;
+			req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+			otx2_mbox_process(mbox);
+		}
+		return;
+	}
+
+	/* BP was previously enabled but now disabled skip. */
+	if (rsp->aura.bp)
+		return;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+	if (req == NULL)
+		return;
+
+	req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_WRITE;
+
+	if (ena) {
+		req->aura.nix0_bpid = fc->bpid[0];
+		req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
+		req->aura.bp = NIX_RQ_AURA_THRESH(
+			limit > 128 ? 256 : limit); /* 95% of size*/
+		req->aura_mask.bp = ~(req->aura_mask.bp);
+	}
+
+	req->aura.bp_ena = !!ena;
+	req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+	otx2_mbox_process(mbox);
+}
+
 int
 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
 			      const struct rte_eth_dev *eth_dev,
@@ -326,8 +409,9 @@  otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
 		for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
 			rxq = eth_dev->data->rx_queues[i];
 			sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
-			rc = sso_xae_reconfigure((struct rte_eventdev *)
-						 (uintptr_t)event_dev);
+			sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
+			rc = sso_xae_reconfigure(
+				(struct rte_eventdev *)(uintptr_t)event_dev);
 			rc |= sso_rxq_enable(otx2_eth_dev, i,
 					     queue_conf->ev.sched_type,
 					     queue_conf->ev.queue_id, port);
@@ -337,6 +421,7 @@  otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
 	} else {
 		rxq = eth_dev->data->rx_queues[rx_queue_id];
 		sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
+		sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
 		rc = sso_xae_reconfigure((struct rte_eventdev *)
 					 (uintptr_t)event_dev);
 		rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
@@ -363,19 +448,25 @@  otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 			      const struct rte_eth_dev *eth_dev,
 			      int32_t rx_queue_id)
 {
-	struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+	struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 	int i, rc;
 
-	RTE_SET_USED(event_dev);
 	rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
 	if (rc)
 		return -EINVAL;
 
 	if (rx_queue_id < 0) {
-		for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
-			rc = sso_rxq_disable(dev, i);
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+			rc = sso_rxq_disable(otx2_eth_dev, i);
+			sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
+					    eth_dev->data->rx_queues[i], false);
+		}
 	} else {
-		rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+		rc = sso_rxq_disable(otx2_eth_dev, (uint16_t)rx_queue_id);
+		sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
+				    eth_dev->data->rx_queues[rx_queue_id],
+				    false);
 	}
 
 	if (rc < 0)