[12/17] net/mana: add function to start/stop TX queues

Message ID 1656666167-26035-13-git-send-email-longli@linuxonhyperv.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Introduce Microsoft Azure Network Adatper (MANA) PMD |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Long Li July 1, 2022, 9:02 a.m. UTC
  From: Long Li <longli@microsoft.com>

MANA allocate device queues through the IB layer when starting TX queues. When
device is stopped all the queues are unmapped and freed.

Signed-off-by: Long Li <longli@microsoft.com>
---
 doc/guides/nics/features/mana.ini |   1 +
 drivers/net/mana/mana.h           |   4 +
 drivers/net/mana/meson.build      |   1 +
 drivers/net/mana/tx.c             | 180 ++++++++++++++++++++++++++++++
 4 files changed, 186 insertions(+)
 create mode 100644 drivers/net/mana/tx.c
  

Comments

Stephen Hemminger July 1, 2022, 4:39 p.m. UTC | #1
On Fri,  1 Jul 2022 02:02:42 -0700
longli@linuxonhyperv.com wrote:

> +#include <stddef.h>
> +#include <unistd.h>
> +#include <string.h>
> +#include <stdint.h>
> +#include <stdlib.h>
> +#include <errno.h>
> +#include <stdio.h>
> +#include <sys/types.h>
> +#include <dirent.h>
> +
> +#include <rte_malloc.h>
> +#include <ethdev_driver.h>
> +#include <ethdev_pci.h>
> +#include <rte_pci.h>
> +#include <rte_bus_pci.h>
> +#include <rte_common.h>
> +#include <rte_kvargs.h>
> +#include <rte_rwlock.h>
> +#include <rte_spinlock.h>
> +#include <rte_string_fns.h>
> +#include <rte_alarm.h>
> +#include <rte_log.h>
> +#include <rte_eal_paging.h>
> +#include <rte_io.h>
> +
> +#include <infiniband/verbs.h>
> +#include <infiniband/manadv.h>
> +
> +#include "mana.h"
> +

Lots extra include here as well.
Try IWYU tool to find what is needed?
  

Patch

diff --git a/doc/guides/nics/features/mana.ini b/doc/guides/nics/features/mana.ini
index 9ba4767a06..7546c99ea3 100644
--- a/doc/guides/nics/features/mana.ini
+++ b/doc/guides/nics/features/mana.ini
@@ -7,6 +7,7 @@ 
 Link status          = P
 Linux                = Y
 Multiprocess aware   = Y
+Queue start/stop     = Y
 Removal event        = Y
 RSS hash             = Y
 Speed capabilities   = P
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 1847902054..fef646a9a7 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -379,6 +379,10 @@  uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 int gdma_poll_completion_queue(struct mana_gdma_queue *cq,
 			       struct gdma_comp *comp);
 
+int start_tx_queues(struct rte_eth_dev *dev);
+
+int stop_tx_queues(struct rte_eth_dev *dev);
+
 struct mana_mr_cache *find_pmd_mr(struct mana_mr_btree *local_tree,
 				  struct mana_priv *priv,
 				  struct rte_mbuf *mbuf);
diff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build
index 4a80189428..34bb9c6b2f 100644
--- a/drivers/net/mana/meson.build
+++ b/drivers/net/mana/meson.build
@@ -11,6 +11,7 @@  deps += ['pci', 'bus_pci', 'net', 'eal', 'kvargs']
 
 sources += files(
 	'mana.c',
+	'tx.c',
 	'mr.c',
 	'gdma.c',
 	'mp.c',
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
new file mode 100644
index 0000000000..dde911e548
--- /dev/null
+++ b/drivers/net/mana/tx.c
@@ -0,0 +1,180 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2022 Microsoft Corporation
+ */
+
+#include <stddef.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <dirent.h>
+
+#include <rte_malloc.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_alarm.h>
+#include <rte_log.h>
+#include <rte_eal_paging.h>
+#include <rte_io.h>
+
+#include <infiniband/verbs.h>
+#include <infiniband/manadv.h>
+
+#include "mana.h"
+
+int stop_tx_queues(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	int i;
+
+	for (i = 0; i < priv->num_queues; i++) {
+		struct mana_txq *txq = dev->data->tx_queues[i];
+
+		if (txq->qp) {
+			ibv_destroy_qp(txq->qp);
+			txq->qp = NULL;
+		}
+
+		if (txq->cq) {
+			ibv_destroy_cq(txq->cq);
+			txq->cq = NULL;
+		}
+
+		/* Drain and free posted WQEs */
+		while (txq->desc_ring_tail != txq->desc_ring_head) {
+			struct mana_txq_desc *desc =
+				&txq->desc_ring[txq->desc_ring_tail];
+
+			rte_pktmbuf_free(desc->pkt);
+
+			txq->desc_ring_tail =
+				(txq->desc_ring_tail + 1) % txq->num_desc;
+		}
+		txq->desc_ring_head = 0;
+		txq->desc_ring_tail = 0;
+
+		memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq));
+		memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq));
+	}
+
+	return 0;
+}
+
+int start_tx_queues(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	int ret, i;
+
+	/* start TX queues */
+	for (i = 0; i < priv->num_queues; i++) {
+		struct mana_txq *txq;
+		struct ibv_qp_init_attr qp_attr = { 0 };
+		struct manadv_obj obj = {};
+		struct manadv_qp dv_qp;
+		struct manadv_cq dv_cq;
+
+		txq = dev->data->tx_queues[i];
+
+		manadv_set_context_attr(priv->ib_ctx,
+			MANADV_CTX_ATTR_BUF_ALLOCATORS,
+			(void *)((uintptr_t)&(struct manadv_ctx_allocators){
+				.alloc = &mana_alloc_verbs_buf,
+				.free = &mana_free_verbs_buf,
+				.data = (void *)(uintptr_t)txq->socket,
+			}));
+
+		txq->cq = ibv_create_cq(priv->ib_ctx, txq->num_desc,
+					NULL, NULL, 0);
+		if (!txq->cq) {
+			DRV_LOG(ERR, "failed to create cq queue index %d", i);
+			ret = -errno;
+			goto fail;
+		}
+
+		qp_attr.send_cq = txq->cq;
+		qp_attr.recv_cq = txq->cq;
+		qp_attr.cap.max_send_wr = txq->num_desc;
+		qp_attr.cap.max_send_sge = priv->max_send_sge;
+
+		/* Skip setting qp_attr.cap.max_inline_data */
+
+		qp_attr.qp_type = IBV_QPT_RAW_PACKET;
+		qp_attr.sq_sig_all = 0;
+
+		txq->qp = ibv_create_qp(priv->ib_parent_pd, &qp_attr);
+		if (!txq->qp) {
+			DRV_LOG(ERR, "Failed to create qp queue index %d", i);
+			ret = -errno;
+			goto fail;
+		}
+
+		/* Get the addresses of CQ, QP and DB */
+		obj.qp.in = txq->qp;
+		obj.qp.out = &dv_qp;
+		obj.cq.in = txq->cq;
+		obj.cq.out = &dv_cq;
+		ret = manadv_init_obj(&obj, MANADV_OBJ_QP | MANADV_OBJ_CQ);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to get manadv objects");
+			goto fail;
+		}
+
+		txq->gdma_sq.buffer = obj.qp.out->sq_buf;
+		txq->gdma_sq.count = obj.qp.out->sq_count;
+		txq->gdma_sq.size = obj.qp.out->sq_size;
+		txq->gdma_sq.id = obj.qp.out->sq_id;
+
+		txq->tx_vp_offset = obj.qp.out->tx_vp_offset;
+		priv->db_page = obj.qp.out->db_page;
+		DRV_LOG(INFO, "txq sq id %u vp_offset %u db_page %px "
+				" buf %px count %u size %u",
+				txq->gdma_sq.id, txq->tx_vp_offset,
+				priv->db_page,
+				txq->gdma_sq.buffer, txq->gdma_sq.count,
+				txq->gdma_sq.size);
+
+		txq->gdma_cq.buffer = obj.cq.out->buf;
+		txq->gdma_cq.count = obj.cq.out->count;
+		txq->gdma_cq.size = txq->gdma_cq.count * COMP_ENTRY_SIZE;
+		txq->gdma_cq.id = obj.cq.out->cq_id;
+
+		/* CQ head starts with count (not 0) */
+		txq->gdma_cq.head = txq->gdma_cq.count;
+
+		DRV_LOG(INFO, "txq cq id %u buf %px count %u size %u head %u",
+			txq->gdma_cq.id, txq->gdma_cq.buffer,
+			txq->gdma_cq.count, txq->gdma_cq.size,
+			txq->gdma_cq.head);
+	}
+
+	return 0;
+
+fail:
+	stop_tx_queues(dev);
+	return ret;
+}
+
+static inline uint16_t get_vsq_frame_num(uint32_t vsq)
+{
+	union {
+		uint32_t gdma_txq_id;
+		struct {
+			uint32_t reserved1	: 10;
+			uint32_t vsq_frame	: 14;
+			uint32_t reserved2	: 8;
+		};
+	} v;
+
+	v.gdma_txq_id = vsq;
+	return v.vsq_frame;
+}