[v9,09/12] net/nfp: add flower ctrl VNIC rxtx logic

Message ID 1663238669-12244-10-git-send-email-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series preparation for the rte_flow offload of nfp PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Sept. 15, 2022, 10:44 a.m. UTC
  Adds the Rx and Tx function for the ctrl VNIC. The logic is mostly
identical to the normal Rx and Tx functionality of the NFP PMD.

Make use of the ctrl VNIC service logic to service the ctrl vNIC Rx
path.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Signed-off-by: Heinrich Kuhn <heinrich.kuhn@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 doc/guides/rel_notes/release_22_11.rst   |   1 +
 drivers/net/nfp/flower/nfp_flower.c      |  59 ++++++++
 drivers/net/nfp/flower/nfp_flower.h      |  21 +++
 drivers/net/nfp/flower/nfp_flower_ctrl.c | 250 +++++++++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_flower_ctrl.h |  13 ++
 drivers/net/nfp/meson.build              |   1 +
 6 files changed, 345 insertions(+)
 create mode 100644 drivers/net/nfp/flower/nfp_flower_ctrl.c
 create mode 100644 drivers/net/nfp/flower/nfp_flower_ctrl.h
  

Patch

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 6a666aa..c5a8c08 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -61,6 +61,7 @@  New Features
 
     * Added the support of flower firmware.
     * Added the flower service infrastructure.
+    * Added the control message interactive channels between PMD and firmware.
 
 
 Removed Items
diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 18ffa5c..e935821 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -20,6 +20,7 @@ 
 #include "../nfpcore/nfp_rtsym.h"
 #include "../nfpcore/nfp_nsp.h"
 #include "nfp_flower.h"
+#include "nfp_flower_ctrl.h"
 
 #define MAX_PKT_BURST 32
 #define MBUF_PRIV_SIZE 128
@@ -681,6 +682,56 @@  struct dp_packet {
 	return 0;
 }
 
+static int
+nfp_flower_ctrl_vnic_service(void *arg)
+{
+	struct nfp_app_fw_flower *app_fw_flower = arg;
+
+	nfp_flower_ctrl_vnic_poll(app_fw_flower);
+
+	return 0;
+}
+
+static struct rte_service_spec flower_services[NFP_FLOWER_SERVICE_MAX] = {
+	[NFP_FLOWER_SERVICE_CTRL] = {
+		.name         = "flower_ctrl_vnic_service",
+		.callback     = nfp_flower_ctrl_vnic_service,
+	},
+};
+
+static int
+nfp_flower_enable_services(struct nfp_app_fw_flower *app_fw_flower)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < NFP_FLOWER_SERVICE_MAX; i++) {
+		/* Pass a pointer to the flower app to the service */
+		flower_services[i].callback_userdata = (void *)app_fw_flower;
+
+		/* Register the flower services */
+		ret = rte_service_component_register(&flower_services[i],
+				&app_fw_flower->service_ids[i]);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Could not register %s",
+					flower_services[i].name);
+			return -EINVAL;
+		}
+
+		PMD_INIT_LOG(INFO, "%s registered", flower_services[i].name);
+
+		/* Map them to available service cores*/
+		ret = nfp_map_service(app_fw_flower->service_ids[i]);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Could not map %s",
+					flower_services[i].name);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 int
 nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
 {
@@ -777,6 +828,14 @@  struct dp_packet {
 		goto ctrl_vnic_cleanup;
 	}
 
+	/* Start up flower services */
+	ret = nfp_flower_enable_services(app_fw_flower);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Could not enable flower services");
+		ret = -ESRCH;
+		goto ctrl_vnic_cleanup;
+	}
+
 	return 0;
 
 ctrl_vnic_cleanup:
diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h
index e18703e..b9379e5 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -6,8 +6,23 @@ 
 #ifndef _NFP_FLOWER_H_
 #define _NFP_FLOWER_H_
 
+enum nfp_flower_service {
+	NFP_FLOWER_SERVICE_CTRL,
+	NFP_FLOWER_SERVICE_MAX,
+};
+/*
+ * Flower fallback and ctrl path always adds and removes
+ * 8 bytes of prepended data. Tx descriptors must point
+ * to the correct packet data offset after metadata has
+ * been added
+ */
+#define FLOWER_PKT_DATA_OFFSET 8
+
 /* The flower application's private structure */
 struct nfp_app_fw_flower {
+	/* List of rte_service ID's */
+	uint32_t service_ids[NFP_FLOWER_SERVICE_MAX];
+
 	/* Pointer to a mempool for the PF vNIC */
 	struct rte_mempool *pf_pktmbuf_pool;
 
@@ -22,6 +37,12 @@  struct nfp_app_fw_flower {
 
 	/* the eth table as reported by firmware */
 	struct nfp_eth_table *nfp_eth_table;
+
+	/* Ctrl vNIC Rx counter */
+	uint64_t ctrl_vnic_rx_count;
+
+	/* Ctrl vNIC Tx counter */
+	uint64_t ctrl_vnic_tx_count;
 };
 
 int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
new file mode 100644
index 0000000..df908ef
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -0,0 +1,250 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+
+#include "../nfp_common.h"
+#include "../nfp_logs.h"
+#include "../nfp_ctrl.h"
+#include "../nfp_rxtx.h"
+#include "nfp_flower.h"
+#include "nfp_flower_ctrl.h"
+
+#define MAX_PKT_BURST 32
+
+static uint16_t
+nfp_flower_ctrl_vnic_recv(void *rx_queue,
+		struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	uint64_t dma_addr;
+	uint16_t avail = 0;
+	struct rte_mbuf *mb;
+	uint16_t nb_hold = 0;
+	struct nfp_net_hw *hw;
+	struct nfp_net_rxq *rxq;
+	struct rte_mbuf *new_mb;
+	struct nfp_net_rx_buff *rxb;
+	struct nfp_net_rx_desc *rxds;
+
+	rxq = rx_queue;
+	if (unlikely(rxq == NULL)) {
+		/*
+		 * DPDK just checks the queue is lower than max queues
+		 * enabled. But the queue needs to be configured
+		 */
+		PMD_RX_LOG(ERR, "RX Bad queue");
+		return 0;
+	}
+
+	hw = rxq->hw;
+	while (avail < nb_pkts) {
+		rxb = &rxq->rxbufs[rxq->rd_p];
+		if (unlikely(rxb == NULL)) {
+			PMD_RX_LOG(ERR, "rxb does not exist!");
+			break;
+		}
+
+		rxds = &rxq->rxds[rxq->rd_p];
+		if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+			break;
+
+		/*
+		 * Memory barrier to ensure that we won't do other
+		 * reads before the DD bit.
+		 */
+		rte_rmb();
+
+		/*
+		 * We got a packet. Let's alloc a new mbuf for refilling the
+		 * free descriptor ring as soon as possible
+		 */
+		new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
+		if (unlikely(new_mb == NULL)) {
+			PMD_RX_LOG(ERR,
+				"RX mbuf alloc failed port_id=%u queue_id=%u",
+				rxq->port_id, (unsigned int)rxq->qidx);
+			nfp_net_mbuf_alloc_failed(rxq);
+			break;
+		}
+
+		nb_hold++;
+
+		/*
+		 * Grab the mbuf and refill the descriptor with the
+		 * previously allocated mbuf
+		 */
+		mb = rxb->mbuf;
+		rxb->mbuf = new_mb;
+
+		/* Size of this segment */
+		mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+		/* Size of the whole packet. We just support 1 segment */
+		mb->pkt_len = mb->data_len;
+
+		if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
+			/*
+			 * This should not happen and the user has the
+			 * responsibility of avoiding it. But we have
+			 * to give some info about the error
+			 */
+			RTE_LOG_DP(ERR, PMD,
+				"mbuf overflow likely due to the RX offset.\n"
+				"\t\tYour mbuf size should have extra space for"
+				" RX offset=%u bytes.\n"
+				"\t\tCurrently you just have %u bytes available"
+				" but the received packet is %u bytes long",
+				hw->rx_offset,
+				rxq->mbuf_size - hw->rx_offset,
+				mb->data_len);
+			rte_pktmbuf_free(mb);
+			break;
+		}
+
+		/* Filling the received mbuf with packet info */
+		if (hw->rx_offset)
+			mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
+		else
+			mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
+
+		/* No scatter mode supported */
+		mb->nb_segs = 1;
+		mb->next = NULL;
+		mb->port = rxq->port_id;
+
+		rx_pkts[avail++] = mb;
+
+		/* Now resetting and updating the descriptor */
+		rxds->vals[0] = 0;
+		rxds->vals[1] = 0;
+		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
+		rxds->fld.dd = 0;
+		rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+		rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
+		rxq->rd_p++;
+		if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+			rxq->rd_p = 0;
+	}
+
+	if (nb_hold == 0)
+		return 0;
+
+	nb_hold += rxq->nb_rx_hold;
+
+	/*
+	 * FL descriptors needs to be written before incrementing the
+	 * FL queue WR pointer
+	 */
+	rte_wmb();
+	if (nb_hold >= rxq->rx_free_thresh) {
+		PMD_RX_LOG(DEBUG, "port=%hu queue=%d nb_hold=%hu avail=%hu",
+			rxq->port_id, rxq->qidx, nb_hold, avail);
+		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
+		nb_hold = 0;
+	}
+
+	rxq->nb_rx_hold = nb_hold;
+
+	return avail;
+}
+
+uint16_t
+nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_mbuf *mbuf)
+{
+	uint16_t cnt = 0;
+	uint64_t dma_addr;
+	uint32_t free_descs;
+	struct rte_mbuf **lmbuf;
+	struct nfp_net_txq *txq;
+	struct nfp_net_hw *ctrl_hw;
+	struct rte_eth_dev *ctrl_dev;
+	struct nfp_net_nfd3_tx_desc *txds;
+
+	ctrl_hw = app_fw_flower->ctrl_hw;
+	ctrl_dev = ctrl_hw->eth_dev;
+
+	/* Flower ctrl vNIC only has a single tx queue */
+	txq = ctrl_dev->data->tx_queues[0];
+	if (unlikely(txq == NULL)) {
+		/*
+		 * DPDK just checks the queue is lower than max queues
+		 * enabled. But the queue needs to be configured
+		 */
+		PMD_TX_LOG(ERR, "ctrl dev TX Bad queue");
+		goto xmit_end;
+	}
+
+	txds = &txq->txds[txq->wr_p];
+	txds->vals[0] = 0;
+	txds->vals[1] = 0;
+	txds->vals[2] = 0;
+	txds->vals[3] = 0;
+
+	if (nfp_net_nfd3_txq_full(txq))
+		nfp_net_tx_free_bufs(txq);
+
+	free_descs = nfp_net_nfd3_free_tx_desc(txq);
+	if (unlikely(free_descs == 0)) {
+		PMD_TX_LOG(ERR, "ctrl dev no free descs");
+		goto xmit_end;
+	}
+
+	lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+	RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+	if (*lmbuf)
+		rte_pktmbuf_free_seg(*lmbuf);
+
+	*lmbuf = mbuf;
+	dma_addr = rte_mbuf_data_iova(mbuf);
+
+	txds->data_len = mbuf->pkt_len;
+	txds->dma_len = txds->data_len;
+	txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+	txds->dma_addr_lo = (dma_addr & 0xffffffff);
+	txds->offset_eop = FLOWER_PKT_DATA_OFFSET | PCIE_DESC_TX_EOP;
+
+	txq->wr_p++;
+	if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+		txq->wr_p = 0;
+
+	cnt++;
+	app_fw_flower->ctrl_vnic_tx_count++;
+
+xmit_end:
+	rte_wmb();
+	nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, 1);
+
+	return cnt;
+}
+
+void
+nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower)
+{
+	uint16_t i;
+	uint16_t count;
+	struct nfp_net_rxq *rxq;
+	struct nfp_net_hw *ctrl_hw;
+	struct rte_eth_dev *ctrl_eth_dev;
+	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+
+	ctrl_hw = app_fw_flower->ctrl_hw;
+	ctrl_eth_dev = ctrl_hw->eth_dev;
+
+	/* ctrl vNIC only has a single Rx queue */
+	rxq = ctrl_eth_dev->data->rx_queues[0];
+
+	while (true) {
+		count = nfp_flower_ctrl_vnic_recv(rxq, pkts_burst, MAX_PKT_BURST);
+		if (count != 0) {
+			app_fw_flower->ctrl_vnic_rx_count += count;
+			/* Process cmsgs here, only free for now */
+			for (i = 0; i < count; i++)
+				rte_pktmbuf_free(pkts_burst[i]);
+		}
+	}
+}
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.h b/drivers/net/nfp/flower/nfp_flower_ctrl.h
new file mode 100644
index 0000000..1e38578
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_FLOWER_CTRL_H_
+#define _NFP_FLOWER_CTRL_H_
+
+void nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower);
+uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_mbuf *mbuf);
+
+#endif /* _NFP_FLOWER_CTRL_H_ */
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index 7ae3115..8710213 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -7,6 +7,7 @@  if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
 endif
 sources = files(
         'flower/nfp_flower.c',
+        'flower/nfp_flower_ctrl.c',
         'nfpcore/nfp_cpp_pcie_ops.c',
         'nfpcore/nfp_nsp.c',
         'nfpcore/nfp_cppcore.c',