From patchwork Wed Oct 16 08:36:09 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Moti Haimovsky X-Patchwork-Id: 61290 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AD3F61E904; Wed, 16 Oct 2019 10:36:23 +0200 (CEST) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id 0BDB21E8E2 for ; Wed, 16 Oct 2019 10:36:23 +0200 (CEST) From: Moti Haimovsky To: viacheslavo@mellanox.com, rasland@mellanox.com Cc: dev@dpdk.org Date: Wed, 16 Oct 2019 11:36:09 +0300 Message-Id: <20191016083610.203486-1-motih@mellanox.com> X-Mailer: git-send-email 2.8.4 Subject: [dpdk-dev] [PATCH 1/2] net/mlx5: query HCA for enabled FLEX parser protos X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit add querying the HCA which FLEX protocols are already enabled. Signed-off-by: Moti Haimovsky Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_devx_cmds.c | 2 ++ drivers/net/mlx5/mlx5_prm.h | 12 ++++++++++++ 3 files changed, 15 insertions(+) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index baf945c..024a403 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -184,6 +184,7 @@ struct mlx5_hca_attr { uint32_t tunnel_lro_vxlan:1; uint32_t lro_max_msg_sz_mode:2; uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS]; + uint32_t flex_parser_protocols; }; /* Flow list . */ diff --git a/drivers/net/mlx5/mlx5_devx_cmds.c b/drivers/net/mlx5/mlx5_devx_cmds.c index acfe1de..01e4094 100644 --- a/drivers/net/mlx5/mlx5_devx_cmds.c +++ b/drivers/net/mlx5/mlx5_devx_cmds.c @@ -330,6 +330,8 @@ struct mlx5_devx_obj * attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, eth_net_offloads); attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); + attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, + flex_parser_protocols); if (!attr->eth_net_offloads) return 0; diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 3765df0..e429792 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -856,6 +856,18 @@ enum { MLX5_INLINE_MODE_INNER_TCP_UDP, }; +/* HCA bit masks indicating which Flex parser protocols are already enabled. */ +#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0) +#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1) +#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2) +#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3) +#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4) +#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5) +#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6) +#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7) +#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8) +#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; From patchwork Wed Oct 16 08:36:10 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Moti Haimovsky X-Patchwork-Id: 61291 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DCB5A1E8FF; Wed, 16 Oct 2019 10:36:26 +0200 (CEST) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id 11D281E904 for ; Wed, 16 Oct 2019 10:36:23 +0200 (CEST) From: Moti Haimovsky To: viacheslavo@mellanox.com, rasland@mellanox.com Cc: dev@dpdk.org Date: Wed, 16 Oct 2019 11:36:10 +0300 Message-Id: <20191016083610.203486-2-motih@mellanox.com> X-Mailer: git-send-email 2.8.4 Subject: [dpdk-dev] [PATCH 2/2] net/mlx5: add flow match on GENEVE item X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit adds support for matching flows on Geneve headers. Signed-off-by: Moti Haimovsky Acked-by: Viacheslav Ovsiienko --- doc/guides/nics/mlx5.rst | 10 +++- doc/guides/rel_notes/release_19_11.rst | 1 + drivers/net/mlx5/mlx5.h | 2 + drivers/net/mlx5/mlx5_devx_cmds.c | 6 +++ drivers/net/mlx5/mlx5_flow.c | 89 ++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_flow.h | 38 ++++++++++++++- drivers/net/mlx5/mlx5_flow_dv.c | 84 ++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_prm.h | 9 +++- 8 files changed, 235 insertions(+), 4 deletions(-) diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index 414c9c1..08039bc 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -87,7 +87,7 @@ Features - RX interrupts. - Statistics query including Basic, Extended and per queue. - Rx HW timestamp. -- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP. +- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP, Geneve. - Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification. - NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL increment/decrement, count, drop, mark. For details please see :ref:`mlx5_offloads_support`. @@ -146,6 +146,14 @@ Limitations - L3 VXLAN and VXLAN-GPE tunnels cannot be supported together with MPLSoGRE and MPLSoUDP. +- Match on Geneve header supports the following fields only: + + - VNI + - OAM + - protocol type + - options length + Currently, the only supported options length value is 0. + - VF: flow rules created on VF devices can only match traffic targeted at the configured MAC addresses (see ``rte_eth_dev_mac_addr_add()``). diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index cd4e350..a903533 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -286,4 +286,5 @@ Tested Platforms * Added support for VLAN push flow offload command. * Added support for VLAN set PCP offload command. * Added support for VLAN set VID offload command. + * Added support for matching on packets withe Geneve tunnel header. diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 024a403..bac1c0a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -179,6 +179,8 @@ struct mlx5_hca_attr { uint32_t wqe_vlan_insert:1; uint32_t wqe_inline_mode:2; uint32_t vport_inline_mode:3; + uint32_t tunnel_stateless_geneve_rx:1; + uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */ uint32_t lro_cap:1; uint32_t tunnel_lro_gre:1; uint32_t tunnel_lro_vxlan:1; diff --git a/drivers/net/mlx5/mlx5_devx_cmds.c b/drivers/net/mlx5/mlx5_devx_cmds.c index 01e4094..51947d3 100644 --- a/drivers/net/mlx5/mlx5_devx_cmds.c +++ b/drivers/net/mlx5/mlx5_devx_cmds.c @@ -376,6 +376,12 @@ struct mlx5_devx_obj * MLX5_GET(per_protocol_networking_offload_caps, hcattr, lro_timer_supported_periods[i]); } + attr->tunnel_stateless_geneve_rx = + MLX5_GET(per_protocol_networking_offload_caps, + hcattr, tunnel_stateless_geneve_rx); + attr->geneve_max_opt_len = + MLX5_GET(per_protocol_networking_offload_caps, + hcattr, max_geneve_opt_len); attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, hcattr, wqe_inline_mode); if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 578d003..d4d956f 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1913,6 +1913,95 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, } /** + * Validate Geneve item. + * + * @param[in] item + * Item specification. + * @param[in] itemFlags + * Bit-fields that holds the items detected until now. + * @param[in] enPriv + * Pointer to the private data structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + +int +mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_geneve *spec = item->spec; + const struct rte_flow_item_geneve *mask = item->mask; + int ret; + uint16_t gbhdr; + uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; + const struct rte_flow_item_geneve nic_mask = { + .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), + .vni = "\xff\xff\xff", + .protocol = RTE_BE16(UINT16_MAX), + }; + + if (!(priv->config.hca_attr.flex_parser_protocols & + MLX5_HCA_FLEX_GENEVE_ENABLED) || + !priv->config.hca_attr.tunnel_stateless_geneve_rx) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Geneve is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_geneve_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_geneve), error); + if (ret) + return ret; + if (spec) { + gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); + if (MLX5_GENEVE_VER_VAL(gbhdr) || + MLX5_GENEVE_CRITO_VAL(gbhdr) || + MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Geneve protocol unsupported" + " fields are being used"); + if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported Geneve options length"); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve tunnel must be fully defined"); + return 0; +} + +/** * Validate MPLS item. * * @param[in] dev diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 235bccd..9658db1 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -63,6 +63,7 @@ #define MLX5_FLOW_LAYER_IPIP (1u << 21) #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22) #define MLX5_FLOW_LAYER_NVGRE (1u << 23) +#define MLX5_FLOW_LAYER_GENEVE (1u << 24) /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ @@ -83,7 +84,8 @@ #define MLX5_FLOW_LAYER_TUNNEL \ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ - MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP) + MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ + MLX5_FLOW_LAYER_GENEVE) /* Inner Masks. */ #define MLX5_FLOW_LAYER_INNER_L3 \ @@ -188,6 +190,9 @@ #define MLX5_UDP_PORT_VXLAN 4789 #define MLX5_UDP_PORT_VXLAN_GPE 4790 +/* UDP port numbers for GENEVE. */ +#define MLX5_UDP_PORT_GENEVE 6081 + /* Priority reserved for default flows. */ #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) @@ -220,6 +225,33 @@ /* IBV hash source bits for IPV6. */ #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) + +/* Geneve header first 16Bit */ +#define MLX5_GENEVE_VER_MASK 0x3 +#define MLX5_GENEVE_VER_SHIFT 14 +#define MLX5_GENEVE_VER_VAL(a) \ + (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) +#define MLX5_GENEVE_OPTLEN_MASK 0x3F +#define MLX5_GENEVE_OPTLEN_SHIFT 7 +#define MLX5_GENEVE_OPTLEN_VAL(a) \ + (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) +#define MLX5_GENEVE_OAMF_MASK 0x1 +#define MLX5_GENEVE_OAMF_SHIFT 7 +#define MLX5_GENEVE_OAMF_VAL(a) \ + (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) +#define MLX5_GENEVE_CRITO_MASK 0x1 +#define MLX5_GENEVE_CRITO_SHIFT 6 +#define MLX5_GENEVE_CRITO_VAL(a) \ + (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) +#define MLX5_GENEVE_RSVD_MASK 0x3F +#define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) +/* + * The length of the Geneve options fields, expressed in four byte multiples, + * not including the eight byte fixed tunnel. + */ +#define MLX5_GENEVE_OPT_LEN_0 14 +#define MLX5_GENEVE_OPT_LEN_1 63 + enum mlx5_flow_drv_type { MLX5_FLOW_TYPE_MIN, MLX5_FLOW_TYPE_DV, @@ -556,4 +588,8 @@ int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, struct rte_flow_error *error); +int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error); #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index f0422dc..b1aa427 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -3434,6 +3434,14 @@ struct field_modify_info modify_tcp[] = { return ret; last_item = MLX5_FLOW_LAYER_VXLAN_GPE; break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + ret = mlx5_flow_validate_item_geneve(items, + item_flags, dev, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; case RTE_FLOW_ITEM_TYPE_MPLS: ret = mlx5_flow_validate_item_mpls(dev, items, item_flags, @@ -4492,6 +4500,77 @@ struct field_modify_info modify_tcp[] = { } /** + * Add Geneve item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ + +static void +flow_dv_translate_item_geneve(void *matcher, void *key, + const struct rte_flow_item *item, int inner) +{ + const struct rte_flow_item_geneve *geneve_m = item->mask; + const struct rte_flow_item_geneve *geneve_v = item->spec; + void *headers_m; + void *headers_v; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + uint16_t dport; + uint16_t gbhdr_m; + uint16_t gbhdr_v; + char *vni_m; + char *vni_v; + size_t size, i; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + dport = MLX5_UDP_PORT_GENEVE; + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + } + if (!geneve_v) + return; + if (!geneve_m) + geneve_m = &rte_flow_item_geneve_mask; + size = sizeof(geneve_m->vni); + vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); + vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); + memcpy(vni_m, geneve_m->vni, size); + for (i = 0; i < size; ++i) + vni_v[i] = vni_m[i] & geneve_v->vni[i]; + MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, + rte_be_to_cpu_16(geneve_m->protocol)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, + rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); + gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); + gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); + MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, + MLX5_GENEVE_OAMF_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, + MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, + MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, + MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & + MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); +} + +/** * Add MPLS item to matcher and to the value. * * @param[in, out] matcher @@ -5692,6 +5771,11 @@ struct field_modify_info modify_tcp[] = { items, tunnel); last_item = MLX5_FLOW_LAYER_VXLAN_GPE; break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + flow_dv_translate_item_geneve(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_LAYER_GENEVE; + break; case RTE_FLOW_ITEM_TYPE_MPLS: flow_dv_translate_item_mpls(match_mask, match_value, items, last_item, tunnel); diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index e429792..c86f8b8 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -553,12 +553,17 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; - u8 reserved_at_c0[0x20]; + u8 geneve_vni[0x18]; + u8 reserved_at_e4[0x7]; + u8 geneve_oam[0x1]; u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_at_120[0xe0]; + u8 reserved_at_120[0xa]; + u8 geneve_opt_len[0x6]; + u8 geneve_protocol_type[0x10]; + u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_ipv4_layout_bits {