[v2,3/3] net/mlx5/hws: add ICMPv6 ID and sequence match support
Checks
Commit Message
This patch adds ICMPv6 ID and sequence match support for HWS.
Since type and code of ICMPv6 echo is already specified by ITEM type:
RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
mlx5 pmd will set appropriate type and code automatically:
Echo request: type(128), code(0)
Echo reply: type(129), code(0)
type and code provided by application will be ignored
This patch also fixes these issues in ICMP definer.
1. Parsing inner ICMP item gets and overwrites the outer IP_PROTOCOL
function, which will remove the outer L4 match incorrectly. Fix this
by getting correct inner function.
2. Member order of mlx5_ifc_header_icmp_bits doesn't follow ICMP format.
Reorder them to make it more consistent.
Signed-off-by: Leo Xu <yongquanx@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 88 +++++++++++++++++++++++++++
1 file changed, 88 insertions(+)
Comments
20/12/2022 08:44, Leo Xu:
> This patch adds ICMPv6 ID and sequence match support for HWS.
> Since type and code of ICMPv6 echo is already specified by ITEM type:
> RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
> RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
> mlx5 pmd will set appropriate type and code automatically:
> Echo request: type(128), code(0)
> Echo reply: type(129), code(0)
> type and code provided by application will be ignored
>
> This patch also fixes these issues in ICMP definer.
> 1. Parsing inner ICMP item gets and overwrites the outer IP_PROTOCOL
> function, which will remove the outer L4 match incorrectly. Fix this
> by getting correct inner function.
> 2. Member order of mlx5_ifc_header_icmp_bits doesn't follow ICMP format.
> Reorder them to make it more consistent.
Please don't fix stuff in the same patch as a new feature.
You should have one patch per fix.
The code for the new feature may be squashed in the other mlx5 patch for the feature.
Hi Thomas,
PSB
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Wednesday, January 18, 2023 4:58 PM
> To: Leo Xu (Networking SW) <yongquanx@nvidia.com>
> Cc: dev@dpdk.org; Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Subject: Re: [PATCH v2 3/3] net/mlx5/hws: add ICMPv6 ID and sequence match
> support
>
> External email: Use caution opening links or attachments
>
>
> 20/12/2022 08:44, Leo Xu:
> > This patch adds ICMPv6 ID and sequence match support for HWS.
> > Since type and code of ICMPv6 echo is already specified by ITEM type:
> > RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
> > RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
> > mlx5 pmd will set appropriate type and code automatically:
> > Echo request: type(128), code(0)
> > Echo reply: type(129), code(0)
> > type and code provided by application will be ignored
> >
> > This patch also fixes these issues in ICMP definer.
> > 1. Parsing inner ICMP item gets and overwrites the outer IP_PROTOCOL
> > function, which will remove the outer L4 match incorrectly. Fix this
> > by getting correct inner function.
> > 2. Member order of mlx5_ifc_header_icmp_bits doesn't follow ICMP format.
> > Reorder them to make it more consistent.
>
> Please don't fix stuff in the same patch as a new feature.
> You should have one patch per fix.
> The code for the new feature may be squashed in the other mlx5 patch for the
> feature.
>
Thanks for that good catch.
Actually, those "fix stuff" related sentences should be removed.
I leave it there by accident.
Will remove them, in next patch
@@ -368,6 +368,47 @@ mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
+ __rte_unused const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
+ mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
+ __rte_unused const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST, 0, 0};
+ mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
+ __rte_unused const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0, 0};
+ mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_icmp6_echo *v = item_spec;
+ rte_be32_t dw2;
+
+ dw2 = (rte_be_to_cpu_16(v->echo.identifier) << __mlx5_dw_bit_off(header_icmp, ident)) |
+ (rte_be_to_cpu_16(v->echo.sequence) << __mlx5_dw_bit_off(header_icmp, seq_nb));
+
+ DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1441,6 +1482,48 @@ mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_icmp6_echo *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+ bool inner = cd->tunnel;
+
+ if (!cd->relaxed) {
+ /* Overwrite match on L4 type ICMP6 */
+ fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, inner);
+
+ /* Set fixed type and code for icmp6 echo request/reply */
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_icmp6_echo_dw1_mask_set;
+ if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
+ fc->tag_set = &mlx5dr_definer_icmp6_echo_request_dw1_set;
+ else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
+ fc->tag_set = &mlx5dr_definer_icmp6_echo_reply_dw1_set;
+ DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
+ }
+
+ if (!m)
+ return 0;
+
+ /* Set identifier & sequence into icmp_dw2 */
+ if (m->echo.identifier || m->echo.sequence) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
+ DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -1577,6 +1660,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
item_flags |= MLX5_FLOW_LAYER_ICMP6;
break;
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
+ ret = mlx5dr_definer_conv_item_icmp6_echo(&cd, items, i);
+ item_flags |= MLX5_FLOW_LAYER_ICMP6;
+ break;
case RTE_FLOW_ITEM_TYPE_METER_COLOR:
ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_METER_COLOR;