[v1,2/2] net/mlx5/hws: add support for infiniband BTH match

Message ID 20230606110710.4116732-3-dongzhou@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series mlx5 supports InfiniBand BTH item match |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation fail Compilation issues
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/github-robot: build fail github build: failed
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-aarch64-compile-testing fail Testing issues
ci/iol-x86_64-unit-testing fail Testing issues
ci/iol-testing warning Testing issues
ci/iol-unit-testing fail Testing issues
ci/iol-aarch-unit-testing warning Testing issues
ci/iol-x86_64-compile-testing fail Testing issues
ci/iol-abi-testing warning Testing issues
ci/iol-intel-Performance success Performance Testing PASS

Commit Message

Dong Zhou June 6, 2023, 11:07 a.m. UTC
  This patch adds support to match opcode and dst_qp fields in
infiniband BTH. Currently, only the RoCEv2 packet is supported,
the input BTH match item is defaulted to match one RoCEv2 packet.

Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 76 ++++++++++++++++++++++++++-
 drivers/net/mlx5/hws/mlx5dr_definer.h |  2 +
 drivers/net/mlx5/mlx5_flow_hw.c       |  1 +
 3 files changed, 78 insertions(+), 1 deletion(-)
  

Comments

Ori Kam June 7, 2023, 6:29 a.m. UTC | #1
> -----Original Message-----
> From: Bill Zhou <dongzhou@nvidia.com>
> Sent: Tuesday, June 6, 2023 2:07 PM
> To: Ori Kam <orika@nvidia.com>; Alex Vesker <valex@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; NBU-Contact-Thomas Monjalon
> (EXTERNAL) <thomas@monjalon.net>; Matan Azrad <matan@nvidia.com>;
> Suanming Mou <suanmingm@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v1 2/2] net/mlx5/hws: add support for infiniband BTH
> match
> 
> This patch adds support to match opcode and dst_qp fields in
> infiniband BTH. Currently, only the RoCEv2 packet is supported,
> the input BTH match item is defaulted to match one RoCEv2 packet.
> 
> Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
> Acked-by: Alex Vesker <valex@nvidia.com>
> ---

Acked-by: Ori Kam <orika@nvidia.com>
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index f92d3e8e1f..1a427c9b64 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -10,6 +10,7 @@ 
 #define ETH_TYPE_IPV6_VXLAN	0x86DD
 #define ETH_VXLAN_DEFAULT_PORT	4789
 #define IP_UDP_PORT_MPLS	6635
+#define UDP_ROCEV2_PORT	4791
 #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
 
 #define STE_NO_VLAN	0x0
@@ -171,7 +172,9 @@  struct mlx5dr_definer_conv_data {
 	X(SET_BE16,	gre_opt_checksum,	v->checksum_rsvd.checksum,	rte_flow_item_gre_opt) \
 	X(SET,		meter_color,		rte_col_2_mlx5_col(v->color),	rte_flow_item_meter_color) \
 	X(SET_BE32,     ipsec_spi,              v->hdr.spi,             rte_flow_item_esp) \
-	X(SET_BE32,     ipsec_sequence_number,  v->hdr.seq,             rte_flow_item_esp)
+	X(SET_BE32,     ipsec_sequence_number,  v->hdr.seq,             rte_flow_item_esp) \
+	X(SET,		ib_l4_udp_port,		UDP_ROCEV2_PORT,	rte_flow_item_ib_bth) \
+	X(SET,		ib_l4_opcode,		v->hdr.opcode,		rte_flow_item_ib_bth)
 
 /* Item set function format */
 #define X(set_type, func_name, value, item_type) \
@@ -583,6 +586,16 @@  mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
 	memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
 }
 
+static void
+mlx5dr_definer_ib_l4_qp_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	const struct rte_flow_item_ib_bth *v = item_spec;
+
+	memcpy(tag + fc->byte_off, &v->hdr.dst_qp, sizeof(v->hdr.dst_qp));
+}
+
 static int
 mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
 			     struct rte_flow_item *item,
@@ -2041,6 +2054,63 @@  mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd,
+			       struct rte_flow_item *item,
+			       int item_idx)
+{
+	const struct rte_flow_item_ib_bth *m = item->mask;
+	struct mlx5dr_definer_fc *fc;
+	bool inner = cd->tunnel;
+
+	/* In order to match on RoCEv2(layer4 ib), we must match
+	 * on ip_protocol and l4_dport.
+	 */
+	if (!cd->relaxed) {
+		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+		if (!fc->tag_set) {
+			fc->item_idx = item_idx;
+			fc->tag_mask_set = &mlx5dr_definer_ones_set;
+			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
+			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
+		}
+
+		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
+		if (!fc->tag_set) {
+			fc->item_idx = item_idx;
+			fc->tag_mask_set = &mlx5dr_definer_ones_set;
+			fc->tag_set = &mlx5dr_definer_ib_l4_udp_port_set;
+			DR_CALC_SET(fc, eth_l4, destination_port, inner);
+		}
+	}
+
+	if (!m)
+		return 0;
+
+	if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver ||
+		m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 ||
+		m->hdr.a || m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) {
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
+	if (m->hdr.opcode) {
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_OPCODE];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ib_l4_opcode_set;
+		DR_CALC_SET_HDR(fc, ib_l4, opcode);
+	}
+
+	if (!is_mem_zero(m->hdr.dst_qp, 3)) {
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_QPN];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ib_l4_qp_set;
+		DR_CALC_SET_HDR(fc, ib_l4, qp);
+	}
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -2182,6 +2252,10 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			item_flags |= MLX5_FLOW_LAYER_MPLS;
 			cd.mpls_idx++;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IB_BTH:
+			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
+			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index 90ec4ce845..6b645f4cf0 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -134,6 +134,8 @@  enum mlx5dr_definer_fname {
 	MLX5DR_DEFINER_FNAME_OKS2_MPLS2_I,
 	MLX5DR_DEFINER_FNAME_OKS2_MPLS3_I,
 	MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I,
+	MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
+	MLX5DR_DEFINER_FNAME_IB_L4_QPN,
 	MLX5DR_DEFINER_FNAME_MAX,
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 853c94af9c..f9e7f844ea 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4969,6 +4969,7 @@  flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 		case RTE_FLOW_ITEM_TYPE_ESP:
 		case RTE_FLOW_ITEM_TYPE_FLEX:
+		case RTE_FLOW_ITEM_TYPE_IB_BTH:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*