@@ -1856,7 +1856,7 @@ struct mlx5_dev_spawn_data {
if (priv->counter_fallback)
DRV_LOG(INFO, "Use fall-back DV counter management\n");
/* Check for LRO support. */
- if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+ if (config.dest_tir && config.hca_attr.lro_cap) {
/* TBD check tunnel lro caps. */
config.lro.supported = config.hca_attr.lro_cap;
DRV_LOG(DEBUG, "Device supports LRO");
@@ -1869,8 +1869,6 @@ struct mlx5_dev_spawn_data {
config.hca_attr.lro_timer_supported_periods[0];
DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
config.lro.timeout);
- config.mprq.enabled = 1;
- DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
}
}
if (config.mprq.enabled && mprq) {
@@ -433,12 +433,6 @@ struct ethtool_link_settings {
dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
/*
- * WHen using LRO, MPRQ is implicitly enabled.
- * Adjust threshold value to ensure MPRQ can be enabled.
- */
- if (lro_on && priv->config.mprq.min_rxqs_num > priv->rxqs_n)
- priv->config.mprq.min_rxqs_num = priv->rxqs_n;
- /*
* If the requested number of RX queues is not a power of two,
* use the maximum indirection table size for better balancing.
* The result is always rounded to the next power of two.
@@ -237,6 +237,9 @@
/* Amount of data bytes after eth data segment. */
#define MLX5_ESEG_EXTRA_DATA_SIZE 32u
+/* The maximum log value of segments per RQ WQE. */
+#define MLX5_MAX_LOG_RQ_SEGS 5u
+
/* Completion mode. */
enum mlx5_completion_mode {
MLX5_COMP_ONLY_ERR = 0x0,
@@ -93,7 +93,6 @@
/**
* Check whether Multi-Packet RQ is enabled for the device.
- * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
*
* @param dev
* Pointer to Ethernet device.
@@ -1607,6 +1606,7 @@ struct mlx5_rxq_ctrl *
unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
RTE_PKTMBUF_HEADROOM;
+ unsigned int max_lro_size = 0;
if (non_scatter_min_mbuf_size > mb_len && !(offloads &
DEV_RX_OFFLOAD_SCATTER)) {
@@ -1672,8 +1672,9 @@ struct mlx5_rxq_ctrl *
tmpl->rxq.strd_headroom_en = strd_headroom_en;
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
- mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len,
- (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)));
+ max_lro_size = RTE_MIN(max_rx_pkt_len,
+ (1u << tmpl->rxq.strd_num_n) *
+ (1u << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
@@ -1681,6 +1682,7 @@ struct mlx5_rxq_ctrl *
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
} else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
+ max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
@@ -1690,20 +1692,18 @@ struct mlx5_rxq_ctrl *
* and round it to the next power of two.
*/
sges_n = log2above((size / mb_len) + !!(size % mb_len));
- tmpl->rxq.sges_n = sges_n;
- /* Make sure rxq.sges_n did not overflow. */
- size = mb_len * (1 << tmpl->rxq.sges_n);
- size -= RTE_PKTMBUF_HEADROOM;
- if (size < max_rx_pkt_len) {
+ if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- dev->data->port_id,
- 1 << sges_n,
- max_rx_pkt_len);
- rte_errno = EOVERFLOW;
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ 1 << sges_n, max_rx_pkt_len,
+ 1u << MLX5_MAX_LOG_RQ_SEGS);
+ rte_errno = ENOTSUP;
goto error;
}
+ tmpl->rxq.sges_n = sges_n;
+ max_lro_size = max_rx_pkt_len;
}
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
@@ -1725,6 +1725,7 @@ struct mlx5_rxq_ctrl *
rte_errno = EINVAL;
goto error;
}
+ mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
@@ -105,7 +105,7 @@ struct mlx5_rxq_data {
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
- unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+ unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
@@ -115,7 +115,7 @@ struct mlx5_rxq_data {
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */
- unsigned int :3; /* Remaining bits. */
+ unsigned int :2; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
@@ -151,6 +151,8 @@ int __attribute__((cold))
return -ENOTSUP;
if (mlx5_mprq_enabled(dev))
return -ENOTSUP;
+ if (mlx5_lro_on(dev))
+ return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];