[v7,2/3] net/mlx5: use API to set max LRO packet size

Message ID c54b7fb34a4086463f45b47881458c149a677a51.1573494112.git.dekelp@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series support API to set max LRO packet size |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Dekel Peled Nov. 11, 2019, 5:47 p.m. UTC
This patch implements use of the API for LRO aggregated packet
max size.
Rx queue create is updated to use the relevant configuration.
Documentation is updated accordingly.

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 doc/guides/nics/mlx5.rst       | 2 ++
 drivers/net/mlx5/mlx5.h        | 3 +++
 drivers/net/mlx5/mlx5_ethdev.c | 1 +
 drivers/net/mlx5/mlx5_rxq.c    | 5 +++--
 4 files changed, 9 insertions(+), 2 deletions(-)
  

Patch

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 5fd313c..fd5a326 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -207,6 +207,8 @@  Limitations
   - KEEP_CRC offload cannot be supported with LRO.
   - The first mbuf length, without head-room,  must be big enough to include the
     TCP header (122B).
+  - Rx queue with LRO offload enabled, receiving a non-LRO packet, can forward
+    it with size limited to max LRO size, not to max RX packet length.
 
 Statistics
 ----------
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 511463a..0c3a90e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -218,6 +218,9 @@  struct mlx5_hca_attr {
 #define MLX5_LRO_SUPPORTED(dev) \
 	(((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)
 
+/* Maximal size of aggregated LRO packet. */
+#define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
+
 /* LRO configurations structure. */
 struct mlx5_lro_config {
 	uint32_t supported:1; /* Whether LRO is supported. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 2b7c867..3adc824 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -606,6 +606,7 @@  struct ethtool_link_settings {
 	/* FIXME: we should ask the device for these values. */
 	info->min_rx_bufsize = 32;
 	info->max_rx_pktlen = 65536;
+	info->max_lro_pkt_size = MLX5_MAX_LRO_SIZE;
 	/*
 	 * Since we need one CQ per QP, the limit is the minimum number
 	 * between the two values.
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 24d0eaa..c725e14 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1701,7 +1701,6 @@  struct mlx5_rxq_obj *
 	return 0;
 }
 
-#define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
 					sizeof(struct rte_vlan_hdr) * 2 + \
 					sizeof(struct rte_ipv6_hdr)))
@@ -1773,7 +1772,9 @@  struct mlx5_rxq_ctrl *
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
 	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
-	unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	unsigned int max_rx_pkt_len = lro_on_queue ?
+			dev->data->dev_conf.rxmode.max_lro_pkt_size :
+			dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;