[v3,2/2] net/mlx5: add mlx5 APIs for single flow dump feature

Message ID 1618485564-128533-3-git-send-email-haifeil@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series support single flow dump on MLX5 PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Haifei Luo April 15, 2021, 11:19 a.m. UTC
  Modify API mlx5_flow_dev_dump to support the feature.
Modify mlx5_socket since one extra arg flow_ptr is added.

The data structure sent to DPDK application from the utility triggering
the flow dumps should be packed and endianness must be specified.
The native host endianness can be used, all exchange happens within
the same host (we use sendmsg aux data and share the file handle,
remote approach is not applicable, no inter-host communication happens).

The message structure to dump one/all flow(s):
struct mlx5_flow_dump_req {
	uint32_t port_id;
	uint64_t flow_ptr;
} __rte_packed;

If flow_ptr is 0, all flows for the specified port will be dumped.

Signed-off-by: Haifei Luo <haifeil@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.h     |  3 +++
 drivers/net/mlx5/linux/mlx5_socket.c | 47 ++++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5.h              | 10 ++++++++
 drivers/net/mlx5/mlx5_flow.c         | 30 +++++++++++++++++++++--
 4 files changed, 78 insertions(+), 12 deletions(-)
  

Comments

Slava Ovsiienko April 15, 2021, 11:28 a.m. UTC | #1
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Thursday, April 15, 2021 14:19
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li
> <xuemingl@nvidia.com>; Haifei Luo <haifeil@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Subject: [PATCH v3 2/2] net/mlx5: add mlx5 APIs for single flow dump
> feature
> 
> Modify API mlx5_flow_dev_dump to support the feature.
> Modify mlx5_socket since one extra arg flow_ptr is added.
> 
> The data structure sent to DPDK application from the utility triggering the
> flow dumps should be packed and endianness must be specified.
> The native host endianness can be used, all exchange happens within the
> same host (we use sendmsg aux data and share the file handle, remote
> approach is not applicable, no inter-host communication happens).
> 
> The message structure to dump one/all flow(s):
> struct mlx5_flow_dump_req {
> 	uint32_t port_id;
> 	uint64_t flow_ptr;
> } __rte_packed;
> 
> If flow_ptr is 0, all flows for the specified port will be dumped.
> 
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.h b/drivers/net/mlx5/linux/mlx5_os.h
index 6100a13..4ae7d0e 100644
--- a/drivers/net/mlx5/linux/mlx5_os.h
+++ b/drivers/net/mlx5/linux/mlx5_os.h
@@ -14,6 +14,9 @@  enum {
 	MLX5_FS_PATH_MAX = IBV_SYSFS_PATH_MAX + 1
 };
 
+/* Maximal data of sendmsg message(in bytes). */
+#define MLX5_SENDMSG_MAX 64
+
 #define MLX5_NAMESIZE IF_NAMESIZE
 
 #define PCI_DRV_FLAGS  (RTE_PCI_DRV_INTR_LSC | \
diff --git a/drivers/net/mlx5/linux/mlx5_socket.c b/drivers/net/mlx5/linux/mlx5_socket.c
index 6e354f4..6356b66 100644
--- a/drivers/net/mlx5/linux/mlx5_socket.c
+++ b/drivers/net/mlx5/linux/mlx5_socket.c
@@ -2,6 +2,10 @@ 
  * Copyright 2019 Mellanox Technologies, Ltd
  */
 
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/un.h>
@@ -30,10 +34,11 @@ 
 	int conn_sock;
 	int ret;
 	struct cmsghdr *cmsg = NULL;
-	int data;
-	char buf[CMSG_SPACE(sizeof(int))] = { 0 };
+	uint32_t data[MLX5_SENDMSG_MAX / sizeof(uint32_t)];
+	uint64_t flow_ptr = 0;
+	uint8_t  buf[CMSG_SPACE(sizeof(int))] = { 0 };
 	struct iovec io = {
-		.iov_base = &data,
+		.iov_base = data,
 		.iov_len = sizeof(data),
 	};
 	struct msghdr msg = {
@@ -42,11 +47,16 @@ 
 		.msg_control = buf,
 		.msg_controllen = sizeof(buf),
 	};
-	uint16_t port_id;
+
+	uint32_t port_id;
 	int fd;
 	FILE *file = NULL;
 	struct rte_eth_dev *dev;
+	struct rte_flow_error err;
+	struct mlx5_flow_dump_req  *dump_req;
+	struct mlx5_flow_dump_ack  *dump_ack;
 
+	memset(data, 0, sizeof(data));
 	/* Accept the connection from the client. */
 	conn_sock = accept(server_socket, NULL, NULL);
 	if (conn_sock < 0) {
@@ -54,11 +64,12 @@ 
 		return;
 	}
 	ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
-	if (ret < 0) {
+	if (ret != sizeof(struct mlx5_flow_dump_req)) {
 		DRV_LOG(WARNING, "wrong message received: %s",
 			strerror(errno));
 		goto error;
 	}
+
 	/* Receive file descriptor. */
 	cmsg = CMSG_FIRSTHDR(&msg);
 	if (cmsg == NULL || cmsg->cmsg_type != SCM_RIGHTS ||
@@ -77,22 +88,38 @@ 
 		DRV_LOG(WARNING, "wrong port number message");
 		goto error;
 	}
-	memcpy(&port_id, msg.msg_iov->iov_base, sizeof(port_id));
+
+	dump_req = (struct mlx5_flow_dump_req *)msg.msg_iov->iov_base;
+	if (dump_req) {
+		port_id = dump_req->port_id;
+		flow_ptr = dump_req->flow_id;
+	} else {
+		DRV_LOG(WARNING, "Invalid message");
+		goto error;
+	}
+
 	if (!rte_eth_dev_is_valid_port(port_id)) {
 		DRV_LOG(WARNING, "Invalid port %u", port_id);
 		goto error;
 	}
+
 	/* Dump flow. */
 	dev = &rte_eth_devices[port_id];
-	ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
+	if (flow_ptr == 0)
+		ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
+	else
+		ret = mlx5_flow_dev_dump(dev,
+			(struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
+
 	/* Set-up the ancillary data and reply. */
 	msg.msg_controllen = 0;
 	msg.msg_control = NULL;
 	msg.msg_iovlen = 1;
 	msg.msg_iov = &io;
-	data = -ret;
-	io.iov_len = sizeof(data);
-	io.iov_base = &data;
+	dump_ack = (struct mlx5_flow_dump_ack *)data;
+	dump_ack->rc = -ret;
+	io.iov_len = sizeof(struct mlx5_flow_dump_ack);
+	io.iov_base = dump_ack;
 	do {
 		ret = sendmsg(conn_sock, &msg, 0);
 	} while (ret < 0 && errno == EINTR);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e0f7101..452c5de 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -118,6 +118,16 @@  struct mlx5_dev_spawn_data {
 	struct mlx5_bond_info *bond_info;
 };
 
+/** Data associated with socket messages. */
+struct mlx5_flow_dump_req  {
+	uint32_t port_id; /**< There are plans in DPDK to extend port_id. */
+	uint64_t flow_id;
+} __rte_packed;
+
+struct mlx5_flow_dump_ack {
+	int rc; /**< Return code. */
+};
+
 /** Key string for IPC. */
 #define MLX5_MP_NAME "net_mlx5_mp"
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8446307..ff40406 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7161,6 +7161,10 @@  struct mlx5_meter_domains_infos *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	uint32_t handle_idx;
+	int ret;
+	struct mlx5_flow_handle *dh;
+	struct rte_flow *flow;
 
 	if (!priv->config.dv_flow_en) {
 		if (fputs("device dv flow disabled\n", file) <= 0)
@@ -7168,10 +7172,32 @@  struct mlx5_meter_domains_infos *
 		return -ENOTSUP;
 	}
 
+	/* dump all */
 	if (!flow_idx)
 		return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
-				sh->rx_domain, sh->tx_domain, file);
-	return -ENOTSUP;
+					sh->rx_domain,
+					sh->tx_domain, file);
+	/* dump one */
+	flow = mlx5_ipool_get(priv->sh->ipool
+			[MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
+	if (!flow)
+		return -ENOENT;
+
+	handle_idx = flow->dev_handles;
+	while (handle_idx) {
+		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+				handle_idx);
+		if (!dh)
+			return -ENOENT;
+		if (dh->drv_flow) {
+			ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
+					file);
+			if (ret)
+				return -ENOENT;
+		}
+		handle_idx = dh->next.next;
+	}
+	return 0;
 }
 
 /**