@@ -1898,10 +1898,6 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
return;
port->dev_conf.rxmode.max_rx_pkt_len = res->value;
- if (res->value > RTE_ETHER_MAX_LEN)
- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
port->dev_conf.rxmode.offloads = rx_offloads;
} else {
printf("Unknown parameter\n");
@@ -1517,7 +1517,7 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
* device supports jumbo frame.
*/
eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
- if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
+ if (mtu > RTE_ETHER_MTU) {
rte_port->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rte_port->dev_conf.rxmode.max_rx_pkt_len =
@@ -929,12 +929,9 @@ launch_args_parse(int argc, char** argv)
}
if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
n = atoi(optarg);
- if (n >= RTE_ETHER_MIN_LEN) {
+ if (n >= RTE_ETHER_MIN_LEN)
rx_mode.max_rx_pkt_len = (uint32_t) n;
- if (n > RTE_ETHER_MAX_LEN)
- rx_offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- } else
+ else
rte_exit(EXIT_FAILURE,
"Invalid max-pkt-len=%d - should be > %d\n",
n, RTE_ETHER_MIN_LEN);
@@ -1421,6 +1421,7 @@ init_config(void)
struct rte_gro_param gro_param;
uint32_t gso_types;
uint16_t data_size;
+ uint16_t overhead_len;
bool warning = 0;
int k;
int ret;
@@ -1457,6 +1458,25 @@ init_config(void)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_info_get() failed\n");
+ /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
+ if (port->dev_info.max_mtu &&
+ port->dev_info.max_mtu != UINT16_MAX &&
+ port->dev_info.max_rx_pktlen &&
+ port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
+ overhead_len = port->dev_info.max_rx_pktlen -
+ port->dev_info.max_mtu;
+ else
+ overhead_len =
+ RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ if (port->dev_conf.rxmode.max_rx_pkt_len <=
+ (uint32_t)(RTE_ETHER_MTU + overhead_len))
+ port->dev_conf.rxmode.max_rx_pkt_len =
+ RTE_ETHER_MTU + overhead_len;
+ else
+ port->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
@@ -1439,7 +1439,7 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->port_id);
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
val = 1;
@@ -300,7 +300,7 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return -EINVAL;
/* set to jumbo mode if needed */
- if (new_mtu > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -184,7 +184,7 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -1420,7 +1420,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
return -EINVAL;
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -98,6 +98,8 @@
#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
VLAN_TAG_SIZE)
+#define E1000_ETH_MAX_LEN \
+ (RTE_ETHER_MTU + E1000_ETH_OVERHEAD)
/*
* Maximum number of Ring Descriptors.
*
@@ -817,7 +817,7 @@ em_hardware_init(struct e1000_hw *hw)
/*
* These parameters control the automatic generation (Tx) and
* response (Rx) to Ethernet PAUSE frames.
- * - High water mark should allow for at least two standard size (1518)
+ * - High water mark should allow for at least two standard size (1522)
* frames to be received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
@@ -831,7 +831,7 @@ em_hardware_init(struct e1000_hw *hw)
rx_buf_size = em_get_rx_buffer_size(hw);
hw->fc.high_water = rx_buf_size -
- PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);
+ PMD_ROUNDUP(E1000_ETH_MAX_LEN * 2, 1024);
hw->fc.low_water = hw->fc.high_water - 1500;
if (hw->mac.type == e1000_80003es2lan)
@@ -1049,7 +1049,7 @@ em_get_max_pktlen(struct rte_eth_dev *dev)
return 0x1000;
/* Adapters that do not support jumbo frames */
case e1000_ich8lan:
- return RTE_ETHER_MAX_LEN;
+ return E1000_ETH_MAX_LEN;
default:
return MAX_JUMBO_FRAME_SIZE;
}
@@ -1719,7 +1719,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - E1000_ETH_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
@@ -1799,8 +1799,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (ret != 0)
return ret;
- frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
- VLAN_TAG_SIZE;
+ frame_size = mtu + E1000_ETH_OVERHEAD;
/* check that mtu is within the allowed range */
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
@@ -1816,7 +1815,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
@@ -1374,7 +1374,7 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
- if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
+ if (max_rx_pktlen > E1000_ETH_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
@@ -1840,7 +1840,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* one buffer.
*/
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
- rctl_bsize < RTE_ETHER_MAX_LEN) {
+ rctl_bsize < E1000_ETH_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst =
@@ -1660,7 +1660,7 @@ igb_hardware_init(struct e1000_hw *hw)
*/
rx_buf_size = igb_get_rx_buffer_size(hw);
- hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
+ hw->fc.high_water = rx_buf_size - (E1000_ETH_MAX_LEN * 2);
hw->fc.low_water = hw->fc.high_water - 1500;
hw->fc.pause_time = IGB_FC_PAUSE_TIME;
hw->fc.send_xon = 1;
@@ -3072,7 +3072,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - E1000_ETH_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
@@ -4369,7 +4369,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
@@ -677,7 +677,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -1565,7 +1565,7 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* update max frame size */
frame_size = HINIC_MTU_TO_PKTLEN(mtu);
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -2458,7 +2458,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
rte_spinlock_lock(&hw->lock);
- is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
+ is_jumbo_frame = mtu > RTE_ETHER_MTU ? true : false;
frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
/*
@@ -928,7 +928,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rte_spinlock_unlock(&hw->lock);
return ret;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -11753,7 +11753,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -282,6 +282,9 @@ struct rte_flow {
#define I40E_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+#define I40E_ETH_MAX_LEN \
+ (RTE_ETHER_MTU + I40E_ETH_OVERHEAD)
+
#define I40E_RXTX_BYTES_H_16_BIT(bytes) ((bytes) & ~I40E_48_BIT_MASK)
#define I40E_RXTX_BYTES_L_48_BIT(bytes) ((bytes) & I40E_48_BIT_MASK)
@@ -1889,22 +1889,22 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
* Check if the jumbo frame and maximum packet length are set correctly
*/
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
- "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
+ "frame is enabled", (uint32_t)I40E_ETH_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
+ rxq->max_pkt_len > I40E_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, as jumbo "
"frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)I40E_ETH_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
@@ -2825,7 +2825,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -116,7 +116,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
#endif
rx_ctx.dtype = i40e_header_split_none;
rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
- rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+ rx_ctx.rxmax = I40E_ETH_MAX_LEN;
rx_ctx.tphrdesc_ena = 1;
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
@@ -2797,23 +2797,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
+ (uint32_t)I40E_ETH_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
+ rxq->max_pkt_len > I40E_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)I40E_ETH_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
@@ -67,6 +67,9 @@
#define IAVF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
+#define IAVF_ETH_MAX_LEN \
+ (RTE_ETHER_MTU + IAVF_ETH_OVERHEAD)
+
#define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)
#define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
#define IAVF_48_BIT_MASK RTE_LEN2MASK(IAVF_48_BIT_WIDTH, uint64_t)
@@ -418,23 +418,23 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
* correctly.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
+ (uint32_t)IAVF_ETH_MAX_LEN,
(uint32_t)IAVF_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
if (max_pkt_len < RTE_ETHER_MIN_LEN ||
- max_pkt_len > RTE_ETHER_MAX_LEN) {
+ max_pkt_len > IAVF_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)IAVF_ETH_MAX_LEN);
return -EINVAL;
}
}
@@ -1167,7 +1167,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -60,23 +60,23 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
* correctly.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (max_pkt_len <= ICE_ETH_MAX_LEN ||
max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
+ (uint32_t)ICE_ETH_MAX_LEN,
(uint32_t)ICE_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
if (max_pkt_len < RTE_ETHER_MIN_LEN ||
- max_pkt_len > RTE_ETHER_MAX_LEN) {
+ max_pkt_len > ICE_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)ICE_ETH_MAX_LEN);
return -EINVAL;
}
}
@@ -3904,7 +3904,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev_data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -136,6 +136,9 @@
#define ICE_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
+#define ICE_ETH_MAX_LEN \
+ (RTE_ETHER_MTU + ICE_ETH_OVERHEAD)
+
#define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK)
#define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK)
@@ -246,23 +246,23 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
dev->data->dev_conf.rxmode.max_rx_pkt_len);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)RTE_ETHER_MAX_LEN,
+ (uint32_t)ICE_ETH_MAX_LEN,
(uint32_t)ICE_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
- rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
+ rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
(uint32_t)RTE_ETHER_MIN_LEN,
- (uint32_t)RTE_ETHER_MAX_LEN);
+ (uint32_t)ICE_ETH_MAX_LEN);
return -EINVAL;
}
}
@@ -701,7 +701,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
rx_ctx.dtype = 0; /* No Header Split mode */
rx_ctx.dsize = 1; /* 32B descriptors */
- rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+ rx_ctx.rxmax = ICE_ETH_MAX_LEN;
/* TPH: Transaction Layer Packet (TLP) processing hints */
rx_ctx.tphrdesc_ena = 1;
rx_ctx.tphwdesc_ena = 1;
@@ -26,6 +26,7 @@
*/
#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
+#define IGC_ETH_MAX_LEN (RTE_ETHER_MTU + IGC_ETH_OVERHEAD)
#define IGC_FC_PAUSE_TIME 0x0680
#define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
@@ -877,7 +878,7 @@ igc_hardware_init(struct igc_hw *hw)
/*
* Hardware flow control
- * - High water mark should allow for at least two standard size (1518)
+ * - High water mark should allow for at least two standard size (1522)
* frames to be received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
@@ -888,7 +889,7 @@ igc_hardware_init(struct igc_hw *hw)
* by 1500.
*/
rx_buf_size = igc_get_rx_buffer_size(hw);
- hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
+ hw->fc.high_water = rx_buf_size - (IGC_ETH_MAX_LEN * 2);
hw->fc.low_water = hw->fc.high_water - 1500;
hw->fc.pause_time = IGC_FC_PAUSE_TIME;
hw->fc.send_xon = 1;
@@ -2192,7 +2193,7 @@ eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
+ max_high_water = rx_buf_size - IGC_ETH_MAX_LEN;
if (fc_conf->high_water > max_high_water ||
fc_conf->high_water < fc_conf->low_water) {
PMD_DRV_LOG(ERR,
@@ -2801,7 +2801,7 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev_data->dev_conf.rxmode.offloads |=
(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
else
@@ -5173,7 +5173,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* switch to jumbo mode if needed */
- if (frame_size > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -481,7 +481,7 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return -1;
}
- if (frame_len > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
@@ -1508,7 +1508,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
/* switch to jumbo mode if needed */
- if ((uint32_t)mtu > RTE_ETHER_MAX_LEN)
+ if ((uint32_t)mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -552,7 +552,7 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
if (rc)
return rc;
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -58,7 +58,7 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
if (rc)
return rc;
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -2367,7 +2367,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
fp->rxq->rx_buf_size = rc;
}
}
- if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -1017,7 +1017,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
- if (mtu > RTE_ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
@@ -176,7 +176,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
return -EINVAL;
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -1292,6 +1292,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf orig_conf;
+ uint16_t overhead_len;
+ uint32_t *max_rx_pktlen;
int diag;
int ret;
@@ -1323,6 +1325,16 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
if (ret != 0)
goto rollback;
+ /* Get the real Ethernet overhead length */
+ max_rx_pktlen = &dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ if (dev_info.max_mtu &&
+ dev_info.max_mtu != UINT16_MAX &&
+ dev_info.max_rx_pktlen &&
+ dev_info.max_rx_pktlen > dev_info.max_mtu)
+ overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
/* If number of queues specified by application for both Rx and Tx is
* zero, use driver preferred values. This cannot be done individually
* as it is valid for either Tx or Rx (but not both) to be zero.
@@ -1410,13 +1422,16 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
goto rollback;
}
} else {
- if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
- dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
+ uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
+ if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
+ pktlen > RTE_ETHER_MTU + overhead_len)
/* Use default value */
- dev->data->dev_conf.rxmode.max_rx_pkt_len =
- RTE_ETHER_MAX_LEN;
+ (*max_rx_pktlen) = RTE_ETHER_MTU + overhead_len;
}
+ /* Scale the MTU size to adapt max_rx_pkt_len */
+ dev->data->mtu = (*max_rx_pktlen) - overhead_len;
+
/*
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
The jumbo frame used the 'RTE_ETHER_MAX_LEN' as boundary condition, this fix will change the boundary condition with 'RTE_ETHER_MTU'. When the MTU(1500) set, the frame type of rx packet will be different if used different overhead, it will cause the consistency issue, and the normal packet will be dropped. Hence, using fixed value 'RTE_ETHER_MTU' can avoid this issue. Following scopes will be changed: - 'rte_ethdev' - 'app', e.g.: 'test-pmd'; - net PMDs which support VLAN tag(s) within overhead, e.g.: i40e; Signed-off-by: Steve Yang <stevex.yang@intel.com> --- app/test-pmd/cmdline.c | 4 ---- app/test-pmd/config.c | 2 +- app/test-pmd/parameters.c | 7 ++----- app/test-pmd/testpmd.c | 20 ++++++++++++++++++++ drivers/net/axgbe/axgbe_ethdev.c | 2 +- drivers/net/cxgbe/cxgbe_ethdev.c | 2 +- drivers/net/dpaa/dpaa_ethdev.c | 2 +- drivers/net/dpaa2/dpaa2_ethdev.c | 2 +- drivers/net/e1000/e1000_ethdev.h | 2 ++ drivers/net/e1000/em_ethdev.c | 13 ++++++------- drivers/net/e1000/em_rxtx.c | 4 ++-- drivers/net/e1000/igb_ethdev.c | 6 +++--- drivers/net/enetc/enetc_ethdev.c | 2 +- drivers/net/hinic/hinic_pmd_ethdev.c | 2 +- drivers/net/hns3/hns3_ethdev.c | 2 +- drivers/net/hns3/hns3_ethdev_vf.c | 2 +- drivers/net/i40e/i40e_ethdev.c | 2 +- drivers/net/i40e/i40e_ethdev.h | 3 +++ drivers/net/i40e/i40e_ethdev_vf.c | 10 +++++----- drivers/net/i40e/i40e_fdir.c | 2 +- drivers/net/i40e/i40e_rxtx.c | 8 ++++---- drivers/net/iavf/iavf.h | 3 +++ drivers/net/iavf/iavf_ethdev.c | 10 +++++----- drivers/net/ice/ice_dcf_ethdev.c | 8 ++++---- drivers/net/ice/ice_ethdev.c | 2 +- drivers/net/ice/ice_ethdev.h | 3 +++ drivers/net/ice/ice_rxtx.c | 10 +++++----- drivers/net/igc/igc_ethdev.c | 7 ++++--- drivers/net/ipn3ke/ipn3ke_representor.c | 2 +- drivers/net/ixgbe/ixgbe_ethdev.c | 2 +- drivers/net/liquidio/lio_ethdev.c | 2 +- drivers/net/nfp/nfp_net.c | 2 +- drivers/net/octeontx/octeontx_ethdev.c | 2 +- drivers/net/octeontx2/otx2_ethdev_ops.c | 2 +- drivers/net/qede/qede_ethdev.c | 2 +- drivers/net/sfc/sfc_ethdev.c | 2 +- drivers/net/thunderx/nicvf_ethdev.c | 2 +- lib/librte_ethdev/rte_ethdev.c | 23 +++++++++++++++++++---- 38 files changed, 111 insertions(+), 72 deletions(-)