From: Ophir Munk <ophirmu@nvidia.com>
This commit implements the mlx5_flow_os_create_flow_matcher() API. It is
the Linux rdma-core equivalent implementation. Missing rdma-core
parameters (e.g. struct mlx5dv_flow_match_parameters) are added to file
mlx5_win_defs.h. The API allocates space to hold the PRM bits in PRM
fte_match_param format and copy the DV translated PRM bits into the
matcher struct. This matcher struct will be used later by the flow
creation API.
Signed-off-by: Ophir Munk <ophirmu@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/common/mlx5/windows/mlx5_win_defs.h | 57 +++++++++++++++++++++++++++++
drivers/net/mlx5/windows/mlx5_flow_os.c | 32 ++++++++++++----
drivers/net/mlx5/windows/mlx5_flow_os.h | 1 +
3 files changed, 83 insertions(+), 7 deletions(-)
@@ -132,6 +132,63 @@ enum {
#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL 0x3
#endif
+enum ibv_flow_flags {
+ IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0,
+ IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1,
+ IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2,
+};
+
+enum ibv_flow_attr_type {
+ /* Steering according to rule specifications. */
+ IBV_FLOW_ATTR_NORMAL = 0x0,
+ /*
+ * Default unicast and multicast rule -
+ * receive all Eth traffic which isn't steered to any QP.
+ */
+ IBV_FLOW_ATTR_ALL_DEFAULT = 0x1,
+ /*
+ * Default multicast rule -
+ * receive all Eth multicast traffic which isn't steered to any QP.
+ */
+ IBV_FLOW_ATTR_MC_DEFAULT = 0x2,
+ /* Sniffer rule - receive all port traffic. */
+ IBV_FLOW_ATTR_SNIFFER = 0x3,
+};
+
+enum mlx5dv_flow_table_type {
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
+};
+
+#define MLX5DV_FLOW_TABLE_TYPE_NIC_RX MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
+#define MLX5DV_FLOW_TABLE_TYPE_NIC_TX MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
+#define MLX5DV_FLOW_TABLE_TYPE_FDB MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
+#define MLX5DV_FLOW_TABLE_TYPE_RDMA_RX MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
+
+struct mlx5dv_flow_match_parameters {
+ size_t match_sz;
+ uint64_t match_buf[]; /* Device spec format */
+};
+
+struct mlx5dv_flow_matcher_attr {
+ enum ibv_flow_attr_type type;
+ uint32_t flags; /* From enum ibv_flow_flags. */
+ uint16_t priority;
+ uint8_t match_criteria_enable; /* Device spec format. */
+ struct mlx5dv_flow_match_parameters *match_mask;
+ uint64_t comp_mask; /* Use mlx5dv_flow_matcher_attr_mask. */
+ enum mlx5dv_flow_table_type ft_type;
+};
+
+/* Windows specific mlx5_matcher. */
+struct mlx5_matcher {
+ void *ctx;
+ struct mlx5dv_flow_matcher_attr attr;
+ uint64_t match_buf[];
+};
+
struct mlx5_err_cqe {
uint8_t rsvd0[32];
uint32_t srqn;
@@ -76,12 +76,31 @@ mlx5_flow_os_create_flow_matcher(void *ctx,
void *table,
void **matcher)
{
- RTE_SET_USED(ctx);
- RTE_SET_USED(attr);
+ struct mlx5dv_flow_matcher_attr *mattr;
+
RTE_SET_USED(table);
*matcher = NULL;
- rte_errno = ENOTSUP;
- return -rte_errno;
+ mattr = attr;
+ if (mattr->type != IBV_FLOW_ATTR_NORMAL) {
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ struct mlx5_matcher *mlx5_matcher =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_matcher) +
+ MLX5_ST_SZ_BYTES(fte_match_param),
+ 0, SOCKET_ID_ANY);
+ if (!mlx5_matcher) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ mlx5_matcher->ctx = ctx;
+ memcpy(&mlx5_matcher->attr, attr, sizeof(mlx5_matcher->attr));
+ memcpy(&mlx5_matcher->match_buf,
+ mattr->match_mask->match_buf,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+ *matcher = mlx5_matcher;
+ return 0;
}
/**
@@ -96,9 +115,8 @@ mlx5_flow_os_create_flow_matcher(void *ctx,
int
mlx5_flow_os_destroy_flow_matcher(void *matcher)
{
- RTE_SET_USED(matcher);
- rte_errno = ENOTSUP;
- return -rte_errno;
+ mlx5_free(matcher);
+ return 0;
}
/**
@@ -6,6 +6,7 @@
#define RTE_PMD_MLX5_FLOW_OS_H_
#include "mlx5_flow.h"
+#include "mlx5_malloc.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;