[03/28] net/mlx5: support LRO caps query using devx API
Checks
Commit Message
From: Dekel Peled <dekelp@mellanox.com>
Update function mlx5_devx_cmd_query_hca_attr() to query HCA
capabilities related to LRO.
Add relevant structs in drivers/net/mlx5/mlx5_prm.h.
Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
drivers/net/mlx5/mlx5.h | 8 ++++++
drivers/net/mlx5/mlx5_devx_cmds.c | 14 ++++++++++
drivers/net/mlx5/mlx5_prm.h | 58 ++++++++++++++++++++-------------------
3 files changed, 52 insertions(+), 28 deletions(-)
Comments
> -----Original Message-----
> From: Matan Azrad <matan@mellanox.com>
> Sent: Monday, July 22, 2019 12:13
> To: Shahaf Shuler <shahafs@mellanox.com>; Yongseok Koh
> <yskoh@mellanox.com>; Slava Ovsiienko <viacheslavo@mellanox.com>
> Cc: dev@dpdk.org; Dekel Peled <dekelp@mellanox.com>
> Subject: [PATCH 03/28] net/mlx5: support LRO caps query using devx API
>
> From: Dekel Peled <dekelp@mellanox.com>
>
> Update function mlx5_devx_cmd_query_hca_attr() to query HCA capabilities
> related to LRO.
>
> Add relevant structs in drivers/net/mlx5/mlx5_prm.h.
>
> Signed-off-by: Dekel Peled <dekelp@mellanox.com>
> Acked-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
@@ -165,6 +165,9 @@ struct mlx5_devx_mkey_attr {
uint32_t pd;
};
+/* HCA supports this number of time periods for LRO. */
+#define MLX5_LRO_NUM_SUPP_PERIODS 4
+
/* HCA attributes. */
struct mlx5_hca_attr {
uint32_t eswitch_manager:1;
@@ -175,6 +178,11 @@ struct mlx5_hca_attr {
uint32_t wqe_vlan_insert:1;
uint32_t wqe_inline_mode:2;
uint32_t vport_inline_mode:3;
+ uint32_t lro_cap:1;
+ uint32_t tunnel_lro_gre:1;
+ uint32_t tunnel_lro_vxlan:1;
+ uint32_t lro_max_msg_sz_mode:2;
+ uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
};
/* Flow list . */
@@ -360,6 +360,20 @@ struct mlx5_devx_obj *
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
hcattr, wqe_vlan_insert);
+ attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_cap);
+ attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_gre);
+ attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_vxlan);
+ attr->lro_max_msg_sz_mode = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, lro_max_msg_sz_mode);
+ for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
+ attr->lro_timer_supported_periods[i] =
+ MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_timer_supported_periods[i]);
+ }
attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
hcattr, wqe_inline_mode);
if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
@@ -1084,16 +1084,42 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_61f[0x1e1];
};
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 esw_scheduling[0x1];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 flow_meter_srtcm[0x1];
+ u8 reserved_at_8[0x8];
+ u8 log_max_flow_meter[0x8];
+ u8 flow_meter_reg_id[0x8];
+ u8 reserved_at_25[0x20];
+ u8 packet_pacing_max_rate[0x20];
+ u8 packet_pacing_min_rate[0x20];
+ u8 reserved_at_80[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+ u8 max_tsar_bw_share[0x20];
+ u8 reserved_at_100[0x6e8];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
u8 lro_cap[0x1];
u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1];
- u8 reserved_at_5[0x2];
+ u8 lro_max_msg_sz_mode[0x2];
u8 wqe_vlan_insert[0x1];
u8 self_lb_en_modifiable[0x1];
- u8 reserved_at_9[0x2];
+ u8 self_lb_mc[0x1];
+ u8 self_lb_uc[0x1];
u8 max_lso_cap[0x5];
u8 multi_pkt_send_wqe[0x2];
u8 wqe_inline_mode[0x2];
@@ -1102,7 +1128,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1];
@@ -1120,31 +1147,6 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
-struct mlx5_ifc_qos_cap_bits {
- u8 packet_pacing[0x1];
- u8 esw_scheduling[0x1];
- u8 esw_bw_share[0x1];
- u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1];
- u8 packet_pacing_burst_bound[0x1];
- u8 packet_pacing_typical_size[0x1];
- u8 flow_meter_srtcm[0x1];
- u8 reserved_at_8[0x8];
- u8 log_max_flow_meter[0x8];
- u8 flow_meter_reg_id[0x8];
- u8 reserved_at_25[0x20];
- u8 packet_pacing_max_rate[0x20];
- u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
- u8 packet_pacing_rate_table_size[0x10];
- u8 esw_element_type[0x10];
- u8 esw_tsar_type[0x10];
- u8 reserved_at_c0[0x10];
- u8 max_qos_para_vport[0x10];
- u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x6e8];
-};
-
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_per_protocol_networking_offload_caps_bits