[v2,4/5] net/mlx5: add mlx5 APIs for single flow dump feature
Checks
Commit Message
Modify API mlx5_flow_dev_dump to support the feature.
Modify mlx5_socket since one extra arg flow_ptr is added.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_socket.c | 30 ++++++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow.c | 29 +++++++++++++++++++++++++++--
2 files changed, 51 insertions(+), 8 deletions(-)
Comments
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Wednesday, April 7, 2021 9:09
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li
> <xuemingl@nvidia.com>; Haifei Luo <haifeil@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Subject: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> Modify API mlx5_flow_dev_dump to support the feature.
> Modify mlx5_socket since one extra arg flow_ptr is added.
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Wednesday, April 7, 2021 9:09
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li
> <xuemingl@nvidia.com>; Haifei Luo <haifeil@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Subject: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> Modify API mlx5_flow_dev_dump to support the feature.
> Modify mlx5_socket since one extra arg flow_ptr is added.
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Sorry, this patch is errorneously acked instead of the
"common/mlx5: add mlx5 APIs for single flow dump feature"
I have comment for this one.
> +#ifndef _GNU_SOURCE
> +#define _GNU_SOURCE
> +#endif
> +
> #include <sys/types.h>
> #include <sys/socket.h>
> #include <sys/un.h>
> @@ -29,11 +33,15 @@
> {
> int conn_sock;
> int ret;
> + int j;
> struct cmsghdr *cmsg = NULL;
> - int data;
> + #define LENGTH 9
> + /* The first byte for port_id and the rest for flowptr. */
> + int data[LENGTH];
So, we define 36/72 bytes array? And then use each int as byte to save flow_idx value?
I suppose the correct way would be to define the structure of message in stead of using ints array,
something likle this:
struct mlx5_ipc_msg {
int status;
void* flow_idx;
}
> + /* The first byte in data for port_id and the following 8 for flowptr */
> + for (j = 1; j < LENGTH; j++)
> + flow_ptr = (flow_ptr << 8) + data[j];
If structure is define, there should be:
flow_ptr = msg->flow_idx
> + if (flow_ptr == 0)
> + ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
> + else
> + ret = mlx5_flow_dev_dump(dev,
> + (struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
> +
> + /*dump one*/
> + uint32_t handle_idx;
> + int ret;
> + struct mlx5_flow_handle *dh;
> + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
> + [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
> +
Please, move variable declarations to the routine beginning, to others
With best regards, Slava
Hi Slava,
For #1, The steering tool send messages to DPDK to request dump. Server/Client use data structure "struct msghdr"
to communicate. It has " msg_iov " ," msg_iovlen" and etc.
In the tool side, Msg_iov is constructed as 1 byte for port_id, 8 bytes for flowptr. In DPDK, then we parse the message this way.
For #2, I will move them to the beginning.
-----Original Message-----
From: Slava Ovsiienko <viacheslavo@nvidia.com>
Sent: Monday, April 12, 2021 3:38 PM
To: Haifei Luo <haifeil@nvidia.com>; dev@dpdk.org
Cc: Ori Kam <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
Subject: RE: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump feature
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Wednesday, April 7, 2021 9:09
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo
> <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler
> <shahafs@nvidia.com>
> Subject: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> Modify API mlx5_flow_dev_dump to support the feature.
> Modify mlx5_socket since one extra arg flow_ptr is added.
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Sorry, this patch is errorneously acked instead of the
"common/mlx5: add mlx5 APIs for single flow dump feature"
I have comment for this one.
> +#ifndef _GNU_SOURCE
> +#define _GNU_SOURCE
> +#endif
> +
> #include <sys/types.h>
> #include <sys/socket.h>
> #include <sys/un.h>
> @@ -29,11 +33,15 @@
> {
> int conn_sock;
> int ret;
> + int j;
> struct cmsghdr *cmsg = NULL;
> - int data;
> + #define LENGTH 9
> + /* The first byte for port_id and the rest for flowptr. */
> + int data[LENGTH];
So, we define 36/72 bytes array? And then use each int as byte to save flow_idx value?
I suppose the correct way would be to define the structure of message in stead of using ints array, something likle this:
struct mlx5_ipc_msg {
int status;
void* flow_idx;
}
> + /* The first byte in data for port_id and the following 8 for flowptr */
> + for (j = 1; j < LENGTH; j++)
> + flow_ptr = (flow_ptr << 8) + data[j];
If structure is define, there should be:
flow_ptr = msg->flow_idx
> + if (flow_ptr == 0)
> + ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
> + else
> + ret = mlx5_flow_dev_dump(dev,
> + (struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
> +
> + /*dump one*/
> + uint32_t handle_idx;
> + int ret;
> + struct mlx5_flow_handle *dh;
> + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
> + [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
> +
Please, move variable declarations to the routine beginning, to others
With best regards, Slava
HI Slava,
Yes for "we define 36/72 bytes array?".
Correction for my last comment, not byte , and it is one "int" for port_id , 8 "int" for flowptr.
Sorry for the possible confusion.
-----Original Message-----
From: Haifei Luo
Sent: Tuesday, April 13, 2021 9:29 AM
To: Slava Ovsiienko <viacheslavo@nvidia.com>; dev@dpdk.org
Cc: Ori Kam <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li <xuemingl@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
Subject: RE: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump feature
Hi Slava,
For #1, The steering tool send messages to DPDK to request dump. Server/Client use data structure "struct msghdr"
to communicate. It has " msg_iov " ," msg_iovlen" and etc.
In the tool side, Msg_iov is constructed as 1 byte for port_id, 8 bytes for flowptr. In DPDK, then we parse the message this way.
For #2, I will move them to the beginning.
-----Original Message-----
From: Slava Ovsiienko <viacheslavo@nvidia.com>
Sent: Monday, April 12, 2021 3:38 PM
To: Haifei Luo <haifeil@nvidia.com>; dev@dpdk.org
Cc: Ori Kam <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
Subject: RE: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump feature
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Wednesday, April 7, 2021 9:09
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo
> <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler
> <shahafs@nvidia.com>
> Subject: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> Modify API mlx5_flow_dev_dump to support the feature.
> Modify mlx5_socket since one extra arg flow_ptr is added.
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Sorry, this patch is errorneously acked instead of the
"common/mlx5: add mlx5 APIs for single flow dump feature"
I have comment for this one.
> +#ifndef _GNU_SOURCE
> +#define _GNU_SOURCE
> +#endif
> +
> #include <sys/types.h>
> #include <sys/socket.h>
> #include <sys/un.h>
> @@ -29,11 +33,15 @@
> {
> int conn_sock;
> int ret;
> + int j;
> struct cmsghdr *cmsg = NULL;
> - int data;
> + #define LENGTH 9
> + /* The first byte for port_id and the rest for flowptr. */
> + int data[LENGTH];
So, we define 36/72 bytes array? And then use each int as byte to save flow_idx value?
I suppose the correct way would be to define the structure of message in stead of using ints array, something likle this:
struct mlx5_ipc_msg {
int status;
void* flow_idx;
}
> + /* The first byte in data for port_id and the following 8 for flowptr */
> + for (j = 1; j < LENGTH; j++)
> + flow_ptr = (flow_ptr << 8) + data[j];
If structure is define, there should be:
flow_ptr = msg->flow_idx
> + if (flow_ptr == 0)
> + ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
> + else
> + ret = mlx5_flow_dev_dump(dev,
> + (struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
> +
> + /*dump one*/
> + uint32_t handle_idx;
> + int ret;
> + struct mlx5_flow_handle *dh;
> + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
> + [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
> +
Please, move variable declarations to the routine beginning, to others
With best regards, Slava
> -----Original Message-----
> From: Haifei Luo
> Sent: Tuesday, April 13, 2021 9:29 AM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Subject: RE: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> Hi Slava,
> For #1, The steering tool send messages to DPDK to request dump.
> Server/Client use data structure "struct msghdr"
> to communicate. It has " msg_iov " ," msg_iovlen" and etc.
> In the tool side, Msg_iov is constructed as 1 byte for port_id, 8 bytes for
> flowptr. In DPDK, then we parse the message this way.
Yes, it is clear. In my opinion we should not use byte array and parse
we should present the some structure instead:
struct mlx5_flow_dump_req {
uint8_t port_id;
void *flow_id;
} __rte_packed;
BTW, why port_id is 1 byte? port_id in DPDK is 16-bit value.
If the request dump tool uses somr structure - it should be defined in some
common file. IMO, it is not a good practice to rely on raw byte array layout.
With best regards, Slava
> For #2, I will move them to the beginning.
>
> -----Original Message-----
> From: Slava Ovsiienko <viacheslavo@nvidia.com>
> Sent: Monday, April 12, 2021 3:38 PM
> To: Haifei Luo <haifeil@nvidia.com>; dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo
> <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler
> <shahafs@nvidia.com>
> Subject: RE: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> feature
>
> > -----Original Message-----
> > From: Haifei Luo <haifeil@nvidia.com>
> > Sent: Wednesday, April 7, 2021 9:09
> > To: dev@dpdk.org
> > Cc: Ori Kam <orika@nvidia.com>; Slava Ovsiienko
> > <viacheslavo@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> > Xueming(Steven) Li <xuemingl@nvidia.com>; Haifei Luo
> > <haifeil@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler
> > <shahafs@nvidia.com>
> > Subject: [PATCH v2 4/5] net/mlx5: add mlx5 APIs for single flow dump
> > feature
> >
> > Modify API mlx5_flow_dev_dump to support the feature.
> > Modify mlx5_socket since one extra arg flow_ptr is added.
> >
> > Signed-off-by: Haifei Luo <haifeil@nvidia.com>
>
> Sorry, this patch is errorneously acked instead of the
> "common/mlx5: add mlx5 APIs for single flow dump feature"
>
> I have comment for this one.
>
> > +#ifndef _GNU_SOURCE
> > +#define _GNU_SOURCE
> > +#endif
> > +
> > #include <sys/types.h>
> > #include <sys/socket.h>
> > #include <sys/un.h>
> > @@ -29,11 +33,15 @@
> > {
> > int conn_sock;
> > int ret;
> > + int j;
> > struct cmsghdr *cmsg = NULL;
> > - int data;
> > + #define LENGTH 9
> > + /* The first byte for port_id and the rest for flowptr. */
> > + int data[LENGTH];
>
> So, we define 36/72 bytes array? And then use each int as byte to save
> flow_idx value?
> I suppose the correct way would be to define the structure of message in
> stead of using ints array, something likle this:
>
> struct mlx5_ipc_msg {
> int status;
> void* flow_idx;
> }
>
> > + /* The first byte in data for port_id and the following 8 for flowptr */
> > + for (j = 1; j < LENGTH; j++)
> > + flow_ptr = (flow_ptr << 8) + data[j];
> If structure is define, there should be:
> flow_ptr = msg->flow_idx
>
> > + if (flow_ptr == 0)
> > + ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
> > + else
> > + ret = mlx5_flow_dev_dump(dev,
> > + (struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
> > +
>
> > + /*dump one*/
> > + uint32_t handle_idx;
> > + int ret;
> > + struct mlx5_flow_handle *dh;
> > + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
> > + [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
> > +
> Please, move variable declarations to the routine beginning, to others
>
> With best regards, Slava
@@ -2,6 +2,10 @@
* Copyright 2019 Mellanox Technologies, Ltd
*/
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
@@ -29,11 +33,15 @@
{
int conn_sock;
int ret;
+ int j;
struct cmsghdr *cmsg = NULL;
- int data;
+ #define LENGTH 9
+ /* The first byte for port_id and the rest for flowptr. */
+ int data[LENGTH];
+ uint64_t flow_ptr = 0;
char buf[CMSG_SPACE(sizeof(int))] = { 0 };
struct iovec io = {
- .iov_base = &data,
+ .iov_base = &data[0],
.iov_len = sizeof(data),
};
struct msghdr msg = {
@@ -46,7 +54,9 @@
int fd;
FILE *file = NULL;
struct rte_eth_dev *dev;
+ struct rte_flow_error err;
+ memset(data, 0, sizeof(data));
/* Accept the connection from the client. */
conn_sock = accept(server_socket, NULL, NULL);
if (conn_sock < 0) {
@@ -84,15 +94,23 @@
}
/* Dump flow. */
dev = &rte_eth_devices[port_id];
- ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
+ /* The first byte in data for port_id and the following 8 for flowptr */
+ for (j = 1; j < LENGTH; j++)
+ flow_ptr = (flow_ptr << 8) + data[j];
+ if (flow_ptr == 0)
+ ret = mlx5_flow_dev_dump(dev, NULL, file, NULL);
+ else
+ ret = mlx5_flow_dev_dump(dev,
+ (struct rte_flow *)((uintptr_t)flow_ptr), file, &err);
+
/* Set-up the ancillary data and reply. */
msg.msg_controllen = 0;
msg.msg_control = NULL;
msg.msg_iovlen = 1;
msg.msg_iov = &io;
- data = -ret;
- io.iov_len = sizeof(data);
- io.iov_base = &data;
+ data[0] = -ret;
+ io.iov_len = sizeof(data[0]);
+ io.iov_base = &data[0];
do {
ret = sendmsg(conn_sock, &msg, 0);
} while (ret < 0 && errno == EINTR);
@@ -7183,10 +7183,35 @@ struct mlx5_meter_domains_infos *
return -ENOTSUP;
}
+ /*dump all*/
if (!flow_idx)
return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
- sh->rx_domain, sh->tx_domain, file);
- return -ENOTSUP;
+ sh->rx_domain,
+ sh->tx_domain, file);
+ /*dump one*/
+ uint32_t handle_idx;
+ int ret;
+ struct mlx5_flow_handle *dh;
+ struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
+
+ if (!flow)
+ return -ENOENT;
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ handle_idx);
+ if (!dh)
+ return -ENOENT;
+ if (dh->drv_flow) {
+ ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
+ file);
+ if (ret)
+ return -ENOENT;
+ }
+ handle_idx = dh->next.next;
+ }
+ return 0;
}
/**