Hi,
> -----Original Message-----
> From: Yigit, Ferruh <ferruh.yigit@intel.com>
> Sent: Saturday, July 10, 2021 1:29
> To: Jerin Jacob <jerinj@marvell.com>; Li, Xiaoyun <xiaoyun.li@intel.com>;
> Ajit Khaparde <ajit.khaparde@broadcom.com>; Somnath Kotur
> <somnath.kotur@broadcom.com>; Igor Russkikh
> <igor.russkikh@aquantia.com>; Pavel Belous <pavel.belous@aquantia.com>;
> Somalapuram Amaranath <asomalap@amd.com>; Rasesh Mody
> <rmody@marvell.com>; Shahed Shaikh <shshaikh@marvell.com>; Chas
> Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>;
> Nithin Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar K
> <kirankumark@marvell.com>; Sunil Kumar Kori <skori@marvell.com>; Satha
> Rao <skoteshwar@marvell.com>; Rahul Lakkireddy
> <rahul.lakkireddy@chelsio.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>; Sachin Saxena <sachin.saxena@oss.nxp.com>;
> Wang, Haiyue <haiyue.wang@intel.com>; Marcin Wojtas
> <mw@semihalf.com>; Michal Krawczyk <mk@semihalf.com>; Guy Tzalik
> <gtzalik@amazon.com>; Evgeny Schemeilin <evgenys@amazon.com>; Igor
> Chauskin <igorch@amazon.com>; Gagandeep Singh <g.singh@nxp.com>;
> Daley, John <johndale@cisco.com>; Hyong Youb Kim <hyonkim@cisco.com>;
> Gaetan Rivet <grive@u256.net>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang,
> Xiao W <xiao.w.wang@intel.com>; Ziyang Xuan <xuanziyang2@huawei.com>;
> Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>; Guoyang Zhou
> <zhouguoyang@huawei.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Lijun Ou <oulijun@huawei.com>; Xing, Beilei <beilei.xing@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Yang, Qiming <qiming.yang@intel.com>;
> Andrew Boyer <aboyer@pensando.io>; Xu, Rosen <rosen.xu@intel.com>;
> Matan Azrad <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Zyta Szpak
> <zr@semihalf.com>; Liron Himi <lironh@marvell.com>; Heinrich Kuhn
> <heinrich.kuhn@netronome.com>; Harman Kalra <hkalra@marvell.com>;
> Nalla Pradeep <pnalla@marvell.com>; Radha Mohan Chintakuntla
> <radhac@marvell.com>; Veerasenareddy Burru <vburru@marvell.com>;
> Devendra Singh Rawat <dsinghrawat@marvell.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; Maciej Czekaj <mczekaj@marvell.com>;
> Jiawen Wu <jiawenwu@trustnetic.com>; Jian Wang
> <jianwang@trustnetic.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>;
> Yong Wang <yongwang@vmware.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>;
> Akhil Goyal <gakhil@marvell.com>; Hunt, David <david.hunt@intel.com>;
> Mcnamara, John <john.mcnamara@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: [PATCH 4/4] ethdev: remove jumbo offload flag
>
> Removing 'DEV_RX_OFFLOAD_JUMBO_FRAME' offload flag.
>
> Instead of drivers announce this capability, application can deduct the
> capability by checking reported 'dev_info.max_mtu' or
> 'dev_info.max_rx_pktlen'.
>
> And instead of application explicitly set this flag to enable jumbo
> frames, this can be deducted by driver by comparing requested 'mtu' to
> 'RTE_ETHER_MTU'.
>
> Removing this additional configuration for simplification.
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
> app/test-eventdev/test_pipeline_common.c | 2 -
> app/test-pmd/cmdline.c | 2 +-
> app/test-pmd/config.c | 24 +---------
> app/test-pmd/testpmd.c | 46 +------------------
> app/test-pmd/testpmd.h | 2 +-
> doc/guides/howto/debug_troubleshoot.rst | 2 -
> doc/guides/nics/bnxt.rst | 1 -
> doc/guides/nics/features.rst | 3 +-
> drivers/net/atlantic/atl_ethdev.c | 1 -
> drivers/net/axgbe/axgbe_ethdev.c | 1 -
> drivers/net/bnx2x/bnx2x_ethdev.c | 1 -
> drivers/net/bnxt/bnxt.h | 1 -
> drivers/net/bnxt/bnxt_ethdev.c | 10 +---
> drivers/net/bonding/rte_eth_bond_pmd.c | 8 ----
> drivers/net/cnxk/cnxk_ethdev.h | 5 +-
> drivers/net/cnxk/cnxk_ethdev_ops.c | 1 -
> drivers/net/cxgbe/cxgbe.h | 1 -
> drivers/net/cxgbe/cxgbe_ethdev.c | 8 ----
> drivers/net/cxgbe/sge.c | 5 +-
> drivers/net/dpaa/dpaa_ethdev.c | 2 -
> drivers/net/dpaa2/dpaa2_ethdev.c | 2 -
> drivers/net/e1000/e1000_ethdev.h | 4 +-
> drivers/net/e1000/em_ethdev.c | 4 +-
> drivers/net/e1000/em_rxtx.c | 19 +++-----
> drivers/net/e1000/igb_rxtx.c | 3 +-
> drivers/net/ena/ena_ethdev.c | 2 -
> drivers/net/enetc/enetc_ethdev.c | 3 +-
> drivers/net/enic/enic_res.c | 1 -
> drivers/net/failsafe/failsafe_ops.c | 2 -
> drivers/net/fm10k/fm10k_ethdev.c | 1 -
> drivers/net/hinic/hinic_pmd_ethdev.c | 1 -
> drivers/net/hns3/hns3_ethdev.c | 1 -
> drivers/net/hns3/hns3_ethdev_vf.c | 1 -
> drivers/net/i40e/i40e_ethdev.c | 1 -
> drivers/net/i40e/i40e_ethdev_vf.c | 3 +-
> drivers/net/i40e/i40e_rxtx.c | 2 +-
> drivers/net/iavf/iavf_ethdev.c | 3 +-
> drivers/net/ice/ice_dcf_ethdev.c | 3 +-
> drivers/net/ice/ice_dcf_vf_representor.c | 1 -
> drivers/net/ice/ice_ethdev.c | 1 -
> drivers/net/ice/ice_rxtx.c | 3 +-
> drivers/net/igc/igc_ethdev.h | 1 -
> drivers/net/igc/igc_txrx.c | 2 +-
> drivers/net/ionic/ionic_ethdev.c | 1 -
> drivers/net/ipn3ke/ipn3ke_representor.c | 3 +-
> drivers/net/ixgbe/ixgbe_ethdev.c | 5 +-
> drivers/net/ixgbe/ixgbe_pf.c | 9 +---
> drivers/net/ixgbe/ixgbe_rxtx.c | 3 +-
> drivers/net/mlx4/mlx4_rxq.c | 1 -
> drivers/net/mlx5/mlx5_rxq.c | 1 -
> drivers/net/mvneta/mvneta_ethdev.h | 3 +-
> drivers/net/mvpp2/mrvl_ethdev.c | 1 -
> drivers/net/nfp/nfp_net.c | 6 +--
> drivers/net/octeontx/octeontx_ethdev.h | 1 -
> drivers/net/octeontx2/otx2_ethdev.h | 1 -
> drivers/net/octeontx_ep/otx_ep_ethdev.c | 3 +-
> drivers/net/octeontx_ep/otx_ep_rxtx.c | 6 ---
> drivers/net/qede/qede_ethdev.c | 1 -
> drivers/net/sfc/sfc_rx.c | 2 -
> drivers/net/thunderx/nicvf_ethdev.h | 1 -
> drivers/net/txgbe/txgbe_rxtx.c | 1 -
> drivers/net/virtio/virtio_ethdev.c | 1 -
> drivers/net/vmxnet3/vmxnet3_ethdev.c | 1 -
> examples/ip_fragmentation/main.c | 3 +-
> examples/ip_reassembly/main.c | 3 +-
> examples/ipsec-secgw/ipsec-secgw.c | 2 -
> examples/ipv4_multicast/main.c | 1 -
> examples/kni/main.c | 5 --
> examples/l3fwd-acl/main.c | 2 -
> examples/l3fwd-graph/main.c | 1 -
> examples/l3fwd-power/main.c | 2 -
> examples/l3fwd/main.c | 1 -
> .../performance-thread/l3fwd-thread/main.c | 2 -
> examples/vhost/main.c | 2 -
> lib/ethdev/rte_ethdev.c | 26 +----------
> lib/ethdev/rte_ethdev.h | 1 -
> 76 files changed, 42 insertions(+), 250 deletions(-)
>
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-
> eventdev/test_pipeline_common.c
> index 5fcea74b4d43..2775e72c580d 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -199,8 +199,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct
> evt_options *opt)
>
> port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
> RTE_ETHER_CRC_LEN;
> - if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
> - port_conf.rxmode.offloads |=
> DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> t->internal_port = 1;
> RTE_ETH_FOREACH_DEV(i) {
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index 8bdc042f6e8e..c0b6132d64e8 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -1921,7 +1921,7 @@ cmd_config_max_pkt_len_parsed(void
> *parsed_result,
> return;
> }
>
> - update_jumbo_frame_offload(port_id, res->value);
> + update_mtu_from_frame_size(port_id, res->value);
> }
>
> init_port_config();
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index a87265d7638b..23a48557b676 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -1136,39 +1136,19 @@ port_reg_set(portid_t port_id, uint32_t reg_off,
> uint32_t reg_v)
> void
> port_mtu_set(portid_t port_id, uint16_t mtu)
> {
> + struct rte_port *port = &ports[port_id];
> int diag;
> - struct rte_port *rte_port = &ports[port_id];
> - struct rte_eth_dev_info dev_info;
> - int ret;
>
> if (port_id_is_invalid(port_id, ENABLED_WARN))
> return;
>
> - ret = eth_dev_info_get_print_err(port_id, &dev_info);
> - if (ret != 0)
> - return;
> -
> - if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
> - printf("Set MTU failed. MTU:%u is not in valid range, min:%u
> - max:%u\n",
> - mtu, dev_info.min_mtu, dev_info.max_mtu);
> - return;
> - }
> diag = rte_eth_dev_set_mtu(port_id, mtu);
> if (diag) {
> printf("Set MTU failed. diag=%d\n", diag);
> return;
> }
>
> - rte_port->dev_conf.rxmode.mtu = mtu;
> -
> - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)
> {
> - if (mtu > RTE_ETHER_MTU) {
> - rte_port->dev_conf.rxmode.offloads |=
> -
> DEV_RX_OFFLOAD_JUMBO_FRAME;
> - } else
> - rte_port->dev_conf.rxmode.offloads &=
> -
> ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> - }
> + port->dev_conf.rxmode.mtu = mtu;
> }
>
> /* Generic flow management functions. */
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 2c79cae05664..92feadefab59 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -1473,11 +1473,6 @@ init_config(void)
> rte_exit(EXIT_FAILURE,
> "rte_eth_dev_info_get() failed\n");
>
> - ret = update_jumbo_frame_offload(pid, 0);
> - if (ret != 0)
> - printf("Updating jumbo frame offload failed for
> port %u\n",
> - pid);
> -
> if (!(port->dev_info.tx_offload_capa &
> DEV_TX_OFFLOAD_MBUF_FAST_FREE))
> port->dev_conf.txmode.offloads &=
> @@ -3364,24 +3359,18 @@ rxtx_port_config(struct rte_port *port)
> }
>
> /*
> - * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME
> offload,
> - * MTU is also aligned.
> + * Helper function to set MTU from frame size
> *
> * port->dev_info should be set before calling this function.
> *
> - * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
> - * ETH_OVERHEAD". This is useful to update flags but not MTU value.
> - *
> * return 0 on success, negative on error
> */
> int
> -update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
> +update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
> {
> struct rte_port *port = &ports[portid];
> uint32_t eth_overhead;
> - uint64_t rx_offloads;
> uint16_t mtu, new_mtu;
> - bool on;
>
> eth_overhead = get_eth_overhead(&port->dev_info);
>
> @@ -3390,39 +3379,8 @@ update_jumbo_frame_offload(portid_t portid,
> uint32_t max_rx_pktlen)
> return -1;
> }
>
> - if (max_rx_pktlen == 0)
> - max_rx_pktlen = mtu + eth_overhead;
> -
> - rx_offloads = port->dev_conf.rxmode.offloads;
> new_mtu = max_rx_pktlen - eth_overhead;
>
> - if (new_mtu <= RTE_ETHER_MTU) {
> - rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> - on = false;
> - } else {
> - if ((port->dev_info.rx_offload_capa &
> DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
> - printf("Frame size (%u) is not supported by
> port %u\n",
> - max_rx_pktlen, portid);
> - return -1;
> - }
> - rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
> - on = true;
> - }
> -
> - if (rx_offloads != port->dev_conf.rxmode.offloads) {
> - uint16_t qid;
> -
> - port->dev_conf.rxmode.offloads = rx_offloads;
> -
> - /* Apply JUMBO_FRAME offload configuration to Rx queue(s)
> */
> - for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
> - if (on)
> - port->rx_conf[qid].offloads |=
> DEV_RX_OFFLOAD_JUMBO_FRAME;
> - else
> - port->rx_conf[qid].offloads &=
> ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> - }
> - }
> -
> if (mtu == new_mtu)
> return 0;
>
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index 42143f85924f..b94bf668dc4d 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -1012,7 +1012,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id,
> __rte_unused uint16_t queue,
> __rte_unused void *user_param);
> void add_tx_dynf_callback(portid_t portid);
> void remove_tx_dynf_callback(portid_t portid);
> -int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
> +int update_mtu_from_frame_size(portid_t portid, uint32_t
> max_rx_pktlen);
>
> /*
> * Work-around of a compilation error with ICC on invocations of the
> diff --git a/doc/guides/howto/debug_troubleshoot.rst
> b/doc/guides/howto/debug_troubleshoot.rst
> index 457ac441429a..df69fa8bcc24 100644
> --- a/doc/guides/howto/debug_troubleshoot.rst
> +++ b/doc/guides/howto/debug_troubleshoot.rst
> @@ -71,8 +71,6 @@ RX Port and associated core :numref:`dtg_rx_rate`.
> * Identify if port Speed and Duplex is matching to desired values with
> ``rte_eth_link_get``.
>
> - * Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with
> ``rte_eth_dev_info_get``.
> -
> * Check promiscuous mode if the drops do not occur for unique MAC
> address
> with ``rte_eth_promiscuous_get``.
>
> diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
> index feb0c6a7657a..e6f1628402fc 100644
> --- a/doc/guides/nics/bnxt.rst
> +++ b/doc/guides/nics/bnxt.rst
> @@ -886,7 +886,6 @@ processing. This improved performance is derived
> from a number of optimizations:
>
> DEV_RX_OFFLOAD_VLAN_STRIP
> DEV_RX_OFFLOAD_KEEP_CRC
> - DEV_RX_OFFLOAD_JUMBO_FRAME
> DEV_RX_OFFLOAD_IPV4_CKSUM
> DEV_RX_OFFLOAD_UDP_CKSUM
> DEV_RX_OFFLOAD_TCP_CKSUM
> diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
> index c98242f3b72f..a077c30644d2 100644
> --- a/doc/guides/nics/features.rst
> +++ b/doc/guides/nics/features.rst
> @@ -165,8 +165,7 @@ Jumbo frame
>
> Supports Rx jumbo frames.
>
> -* **[uses] rte_eth_rxconf,rte_eth_rxmode**:
> ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
> - ``dev_conf.rxmode.mtu``.
> +* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``dev_conf.rxmode.mtu``.
> * **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
> * **[related] API**: ``rte_eth_dev_set_mtu()``.
>
> diff --git a/drivers/net/atlantic/atl_ethdev.c
> b/drivers/net/atlantic/atl_ethdev.c
> index 3f654c071566..5a198f53fce7 100644
> --- a/drivers/net/atlantic/atl_ethdev.c
> +++ b/drivers/net/atlantic/atl_ethdev.c
> @@ -158,7 +158,6 @@ static struct rte_pci_driver rte_atl_pmd = {
> | DEV_RX_OFFLOAD_IPV4_CKSUM \
> | DEV_RX_OFFLOAD_UDP_CKSUM \
> | DEV_RX_OFFLOAD_TCP_CKSUM \
> - | DEV_RX_OFFLOAD_JUMBO_FRAME \
> | DEV_RX_OFFLOAD_MACSEC_STRIP \
> | DEV_RX_OFFLOAD_VLAN_FILTER)
>
> diff --git a/drivers/net/axgbe/axgbe_ethdev.c
> b/drivers/net/axgbe/axgbe_ethdev.c
> index c36cd7b1d2f0..0bc9e5eeeb10 100644
> --- a/drivers/net/axgbe/axgbe_ethdev.c
> +++ b/drivers/net/axgbe/axgbe_ethdev.c
> @@ -1217,7 +1217,6 @@ axgbe_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c
> b/drivers/net/bnx2x/bnx2x_ethdev.c
> index 009a94e9a8fa..50ff04bb2241 100644
> --- a/drivers/net/bnx2x/bnx2x_ethdev.c
> +++ b/drivers/net/bnx2x/bnx2x_ethdev.c
> @@ -535,7 +535,6 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
> dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
> dev_info->speed_capa = ETH_LINK_SPEED_10G |
> ETH_LINK_SPEED_20G;
> - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
> dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
> diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
> index e93a7eb933b4..9ad7821b4736 100644
> --- a/drivers/net/bnxt/bnxt.h
> +++ b/drivers/net/bnxt/bnxt.h
> @@ -591,7 +591,6 @@ struct bnxt_rep_info {
> DEV_RX_OFFLOAD_TCP_CKSUM | \
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
> | \
> DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
> | \
> - DEV_RX_OFFLOAD_JUMBO_FRAME | \
> DEV_RX_OFFLOAD_KEEP_CRC | \
> DEV_RX_OFFLOAD_VLAN_EXTEND | \
> DEV_RX_OFFLOAD_TCP_LRO | \
> diff --git a/drivers/net/bnxt/bnxt_ethdev.c
> b/drivers/net/bnxt/bnxt_ethdev.c
> index 1e7da8ba61a6..c4fd27bd92de 100644
> --- a/drivers/net/bnxt/bnxt_ethdev.c
> +++ b/drivers/net/bnxt/bnxt_ethdev.c
> @@ -728,15 +728,10 @@ static int bnxt_start_nic(struct bnxt *bp)
> unsigned int i, j;
> int rc;
>
> - if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
> - bp->eth_dev->data->dev_conf.rxmode.offloads |=
> - DEV_RX_OFFLOAD_JUMBO_FRAME;
> + if (bp->eth_dev->data->mtu > RTE_ETHER_MTU)
> bp->flags |= BNXT_FLAG_JUMBO;
> - } else {
> - bp->eth_dev->data->dev_conf.rxmode.offloads &=
> - ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> + else
> bp->flags &= ~BNXT_FLAG_JUMBO;
> - }
>
> /* THOR does not support ring groups.
> * But we will use the array to save RSS context IDs.
> @@ -1221,7 +1216,6 @@ bnxt_receive_function(struct rte_eth_dev
> *eth_dev)
> if (eth_dev->data->dev_conf.rxmode.offloads &
> ~(DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_KEEP_CRC |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c
> b/drivers/net/bonding/rte_eth_bond_pmd.c
> index b2a1833e3f91..844ac1581a61 100644
> --- a/drivers/net/bonding/rte_eth_bond_pmd.c
> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
> @@ -1731,14 +1731,6 @@ slave_configure(struct rte_eth_dev
> *bonded_eth_dev,
> slave_eth_dev->data->dev_conf.rxmode.mtu =
> bonded_eth_dev->data->dev_conf.rxmode.mtu;
>
> - if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
> - DEV_RX_OFFLOAD_JUMBO_FRAME)
> - slave_eth_dev->data->dev_conf.rxmode.offloads |=
> - DEV_RX_OFFLOAD_JUMBO_FRAME;
> - else
> - slave_eth_dev->data->dev_conf.rxmode.offloads &=
> - ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> -
> nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
> nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h
> b/drivers/net/cnxk/cnxk_ethdev.h
> index 4eead0390532..aa147eee45c9 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -75,9 +75,8 @@
> #define CNXK_NIX_RX_OFFLOAD_CAPA \
> (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM
> | \
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_SCATTER | \
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
> - DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP |
> \
> - DEV_RX_OFFLOAD_VLAN_STRIP)
> + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
> DEV_RX_OFFLOAD_RSS_HASH | \
> + DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP)
>
> #define RSS_IPV4_ENABLE \
> (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
> ETH_RSS_NONFRAG_IPV4_UDP | \
> diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c
> b/drivers/net/cnxk/cnxk_ethdev_ops.c
> index 349896f6a1bf..d0924df76152 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_ops.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
> @@ -92,7 +92,6 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev
> *eth_dev, uint16_t queue_id,
> {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
> {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
> {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
> - {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
> {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
> {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
> {DEV_RX_OFFLOAD_SECURITY, " Security,"},
> diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
> index 7c89a028bf16..37625c5bfb69 100644
> --- a/drivers/net/cxgbe/cxgbe.h
> +++ b/drivers/net/cxgbe/cxgbe.h
> @@ -51,7 +51,6 @@
> DEV_RX_OFFLOAD_IPV4_CKSUM | \
> DEV_RX_OFFLOAD_UDP_CKSUM | \
> DEV_RX_OFFLOAD_TCP_CKSUM | \
> - DEV_RX_OFFLOAD_JUMBO_FRAME | \
> DEV_RX_OFFLOAD_SCATTER | \
> DEV_RX_OFFLOAD_RSS_HASH)
>
> diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c
> b/drivers/net/cxgbe/cxgbe_ethdev.c
> index 70b879fed100..1374f32b6826 100644
> --- a/drivers/net/cxgbe/cxgbe_ethdev.c
> +++ b/drivers/net/cxgbe/cxgbe_ethdev.c
> @@ -661,14 +661,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev
> *eth_dev,
> if ((&rxq->fl) != NULL)
> rxq->fl.size = temp_nb_desc;
>
> - /* Set to jumbo mode if necessary */
> - if (eth_dev->data->mtu > RTE_ETHER_MTU)
> - eth_dev->data->dev_conf.rxmode.offloads |=
> - DEV_RX_OFFLOAD_JUMBO_FRAME;
> - else
> - eth_dev->data->dev_conf.rxmode.offloads &=
> - ~DEV_RX_OFFLOAD_JUMBO_FRAME;
> -
> err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
> &rxq->fl, NULL,
> is_pf4(adapter) ?
> diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
> index 830f5192474d..21b8fe61c9a7 100644
> --- a/drivers/net/cxgbe/sge.c
> +++ b/drivers/net/cxgbe/sge.c
> @@ -365,13 +365,10 @@ static unsigned int refill_fl_usembufs(struct
> adapter *adap, struct sge_fl *q,
> struct rte_mbuf *buf_bulk[n];
> int ret, i;
> struct rte_pktmbuf_pool_private *mbp_priv;
> - u8 jumbo_en = rxq->rspq.eth_dev->data-
> >dev_conf.rxmode.offloads &
> - DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data.
> */
> mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
> - if (jumbo_en &&
> - ((mbp_priv->mbuf_data_room_size -
> RTE_PKTMBUF_HEADROOM) >= 9000))
> + if ((mbp_priv->mbuf_data_room_size -
> RTE_PKTMBUF_HEADROOM) >= 9000)
> buf_size_idx = RX_LARGE_MTU_BUF;
>
> ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk,
> n);
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c
> b/drivers/net/dpaa/dpaa_ethdev.c
> index 60dd4f67fc26..9cc808b767ea 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -54,7 +54,6 @@
>
> /* Supported Rx offloads */
> static uint64_t dev_rx_offloads_sup =
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_SCATTER;
>
> /* Rx offloads which cannot be disabled */
> @@ -592,7 +591,6 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev
> *dev,
> uint64_t flags;
> const char *output;
> } rx_offload_map[] = {
> - {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo
> frame,"},
> {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
> {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
> {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
> diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c
> b/drivers/net/dpaa2/dpaa2_ethdev.c
> index 6b44b0557e6a..53508972a4c2 100644
> --- a/drivers/net/dpaa2/dpaa2_ethdev.c
> +++ b/drivers/net/dpaa2/dpaa2_ethdev.c
> @@ -44,7 +44,6 @@ static uint64_t dev_rx_offloads_sup =
> DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_TIMESTAMP;
>
> /* Rx offloads which cannot be disabled */
> @@ -298,7 +297,6 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev
> *dev,
> {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer
> UDP csum,"},
> {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
> {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
> - {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo
> frame,"},
> {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
> {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
> {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
> diff --git a/drivers/net/e1000/e1000_ethdev.h
> b/drivers/net/e1000/e1000_ethdev.h
> index 3b4d9c3ee6f4..1ae78fe71f02 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -468,8 +468,8 @@ void eth_em_rx_queue_release(void *rxq);
> void em_dev_clear_queues(struct rte_eth_dev *dev);
> void em_dev_free_queues(struct rte_eth_dev *dev);
>
> -uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
> -uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
> +uint64_t em_get_rx_port_offloads_capa(void);
> +uint64_t em_get_rx_queue_offloads_capa(void);
>
> int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t
> rx_queue_id,
> uint16_t nb_rx_desc, unsigned int socket_id,
> diff --git a/drivers/net/e1000/em_ethdev.c
> b/drivers/net/e1000/em_ethdev.c
> index 6ebef55588bc..8a752eef52cf 100644
> --- a/drivers/net/e1000/em_ethdev.c
> +++ b/drivers/net/e1000/em_ethdev.c
> @@ -1083,8 +1083,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
> dev_info->max_rx_queues = 1;
> dev_info->max_tx_queues = 1;
>
> - dev_info->rx_queue_offload_capa =
> em_get_rx_queue_offloads_capa(dev);
> - dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
> + dev_info->rx_queue_offload_capa =
> em_get_rx_queue_offloads_capa();
> + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() |
> dev_info->rx_queue_offload_capa;
> dev_info->tx_queue_offload_capa =
> em_get_tx_queue_offloads_capa(dev);
> dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
> diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
> index dfd8f2fd0074..e061f80a906a 100644
> --- a/drivers/net/e1000/em_rxtx.c
> +++ b/drivers/net/e1000/em_rxtx.c
> @@ -1359,12 +1359,9 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
> }
>
> uint64_t
> -em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
> +em_get_rx_port_offloads_capa(void)
> {
> uint64_t rx_offload_capa;
> - uint32_t max_rx_pktlen;
> -
> - max_rx_pktlen = em_get_max_pktlen(dev);
>
> rx_offload_capa =
> DEV_RX_OFFLOAD_VLAN_STRIP |
> @@ -1374,14 +1371,12 @@ em_get_rx_port_offloads_capa(struct
> rte_eth_dev *dev)
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_KEEP_CRC |
> DEV_RX_OFFLOAD_SCATTER;
> - if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
> - rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
>
> return rx_offload_capa;
> }
>
> uint64_t
> -em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
> +em_get_rx_queue_offloads_capa(void)
> {
> uint64_t rx_queue_offload_capa;
>
> @@ -1390,7 +1385,7 @@ em_get_rx_queue_offloads_capa(struct
> rte_eth_dev *dev)
> * capability be same to per port queue offloading capability
> * for better convenience.
> */
> - rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
> + rx_queue_offload_capa = em_get_rx_port_offloads_capa();
>
> return rx_queue_offload_capa;
> }
> @@ -1839,7 +1834,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
> * to avoid splitting packets that don't fit into
> * one buffer.
> */
> - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME
> ||
> + if (dev->data->mtu > RTE_ETHER_MTU ||
> rctl_bsize < RTE_ETHER_MAX_LEN) {
> if (!dev->data->scattered_rx)
> PMD_INIT_LOG(DEBUG, "forcing scatter
> mode");
> @@ -1874,14 +1869,14 @@ eth_em_rx_init(struct rte_eth_dev *dev)
> if ((hw->mac.type == e1000_ich9lan ||
> hw->mac.type == e1000_pch2lan ||
> hw->mac.type == e1000_ich10lan) &&
> - rxmode->offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + dev->data->mtu > RTE_ETHER_MTU) {
> u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
> E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
> E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
> }
>
> if (hw->mac.type == e1000_pch2lan) {
> - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> + if (dev->data->mtu > RTE_ETHER_MTU)
> e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
> else
> e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
> @@ -1908,7 +1903,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
> /*
> * Configure support of jumbo frames, if any.
> */
> - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> + if (dev->data->mtu > RTE_ETHER_MTU)
> rctl |= E1000_RCTL_LPE;
> else
> rctl &= ~E1000_RCTL_LPE;
> diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
> index de12997b4bdd..9998d4ea4179 100644
> --- a/drivers/net/e1000/igb_rxtx.c
> +++ b/drivers/net/e1000/igb_rxtx.c
> @@ -1640,7 +1640,6 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev
> *dev)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_KEEP_CRC |
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_RSS_HASH;
> @@ -2344,7 +2343,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
> * Configure support of jumbo frames, if any.
> */
> max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
> - if (dev->data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (dev->data->mtu & RTE_ETHER_MTU) {
> rctl |= E1000_RCTL_LPE;
>
> /*
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index e9b718786a39..4322dce260f5 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -2042,8 +2042,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM;
>
> - rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
> -
> /* Inform framework about available features */
> dev_info->rx_offload_capa = rx_feat;
> dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
> diff --git a/drivers/net/enetc/enetc_ethdev.c
> b/drivers/net/enetc/enetc_ethdev.c
> index a7372c1787c7..6457677d300a 100644
> --- a/drivers/net/enetc/enetc_ethdev.c
> +++ b/drivers/net/enetc/enetc_ethdev.c
> @@ -210,8 +210,7 @@ enetc_dev_infos_get(struct rte_eth_dev *dev
> __rte_unused,
> (DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_KEEP_CRC |
> - DEV_RX_OFFLOAD_JUMBO_FRAME);
> + DEV_RX_OFFLOAD_KEEP_CRC);
>
> return 0;
> }
> diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
> index a8f5332a407f..6a4758ea8e8a 100644
> --- a/drivers/net/enic/enic_res.c
> +++ b/drivers/net/enic/enic_res.c
> @@ -209,7 +209,6 @@ int enic_get_vnic_config(struct enic *enic)
> DEV_TX_OFFLOAD_TCP_TSO;
> enic->rx_offload_capa =
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> diff --git a/drivers/net/failsafe/failsafe_ops.c
> b/drivers/net/failsafe/failsafe_ops.c
> index 5ff33e03e034..47c5efe9ea77 100644
> --- a/drivers/net/failsafe/failsafe_ops.c
> +++ b/drivers/net/failsafe/failsafe_ops.c
> @@ -1193,7 +1193,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
> DEV_RX_OFFLOAD_HEADER_SPLIT |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_VLAN_EXTEND |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_TIMESTAMP |
> DEV_RX_OFFLOAD_SECURITY |
> @@ -1211,7 +1210,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
> DEV_RX_OFFLOAD_HEADER_SPLIT |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_VLAN_EXTEND |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_TIMESTAMP |
> DEV_RX_OFFLOAD_SECURITY |
> diff --git a/drivers/net/fm10k/fm10k_ethdev.c
> b/drivers/net/fm10k/fm10k_ethdev.c
> index 5e4b361ca6c0..093021246286 100644
> --- a/drivers/net/fm10k/fm10k_ethdev.c
> +++ b/drivers/net/fm10k/fm10k_ethdev.c
> @@ -1779,7 +1779,6 @@ static uint64_t
> fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_HEADER_SPLIT |
> DEV_RX_OFFLOAD_RSS_HASH);
> }
> diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c
> b/drivers/net/hinic/hinic_pmd_ethdev.c
> index ce0b52c718ab..b1563350ec0e 100644
> --- a/drivers/net/hinic/hinic_pmd_ethdev.c
> +++ b/drivers/net/hinic/hinic_pmd_ethdev.c
> @@ -747,7 +747,6 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *info)
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_TCP_LRO |
> DEV_RX_OFFLOAD_RSS_HASH;
>
> diff --git a/drivers/net/hns3/hns3_ethdev.c
> b/drivers/net/hns3/hns3_ethdev.c
> index 868d381a4772..0c58c55844b0 100644
> --- a/drivers/net/hns3/hns3_ethdev.c
> +++ b/drivers/net/hns3/hns3_ethdev.c
> @@ -2717,7 +2717,6 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
> struct rte_eth_dev_info *info)
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_RSS_HASH |
> DEV_RX_OFFLOAD_TCP_LRO);
> info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> diff --git a/drivers/net/hns3/hns3_ethdev_vf.c
> b/drivers/net/hns3/hns3_ethdev_vf.c
> index ff28cad53a03..c488e03f23a4 100644
> --- a/drivers/net/hns3/hns3_ethdev_vf.c
> +++ b/drivers/net/hns3/hns3_ethdev_vf.c
> @@ -956,7 +956,6 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev,
> struct rte_eth_dev_info *info)
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_RSS_HASH |
> DEV_RX_OFFLOAD_TCP_LRO);
> info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index dad151eac5f1..ad7802f63031 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -3758,7 +3758,6 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_VLAN_EXTEND |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_RSS_HASH;
>
> dev_info->tx_queue_offload_capa =
> DEV_TX_OFFLOAD_MBUF_FAST_FREE;
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index f7f9d44ef181..1c314e2ffdd0 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -1932,7 +1932,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct
> i40e_rx_queue *rxq)
> /**
> * Check if the jumbo frame and maximum packet length are set
> correctly
> */
> - if (dev_data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (dev_data->mtu > RTE_ETHER_MTU) {
> if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
> rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must
> be "
> @@ -2378,7 +2378,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_FILTER;
>
> dev_info->tx_queue_offload_capa = 0;
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
> index aa43796ef1af..a421acf8f6b6 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -2906,7 +2906,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
> rxq->max_pkt_len =
> RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq-
> >rx_buf_len,
> data->mtu + I40E_ETH_OVERHEAD);
> - if (data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (data->mtu > RTE_ETHER_MTU) {
> if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
> rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must
> "
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index 049671ef3da9..f156add80e0d 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -574,7 +574,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct
> iavf_rx_queue *rxq)
> /* Check if the jumbo frame and maximum packet length are set
> * correctly.
> */
> - if (dev->data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (dev->data->mtu & RTE_ETHER_MTU) {
> if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
> max_pkt_len > IAVF_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must
> be "
> @@ -939,7 +939,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_RSS_HASH;
>
> diff --git a/drivers/net/ice/ice_dcf_ethdev.c
> b/drivers/net/ice/ice_dcf_ethdev.c
> index 34b6c9b2a7ed..72fdcc29c28a 100644
> --- a/drivers/net/ice/ice_dcf_ethdev.c
> +++ b/drivers/net/ice/ice_dcf_ethdev.c
> @@ -65,7 +65,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct
> ice_rx_queue *rxq)
> /* Check if the jumbo frame and maximum packet length are set
> * correctly.
> */
> - if (dev->data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (dev_data->mtu > RTE_ETHER_MTU) {
> if (max_pkt_len <= ICE_ETH_MAX_LEN ||
> max_pkt_len > ICE_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must
> be "
> @@ -664,7 +664,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_RSS_HASH;
> dev_info->tx_offload_capa =
> diff --git a/drivers/net/ice/ice_dcf_vf_representor.c
> b/drivers/net/ice/ice_dcf_vf_representor.c
> index 970461f3e90a..07843c6dbc92 100644
> --- a/drivers/net/ice/ice_dcf_vf_representor.c
> +++ b/drivers/net/ice/ice_dcf_vf_representor.c
> @@ -141,7 +141,6 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev
> *dev,
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_SCATTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_VLAN_EXTEND |
> DEV_RX_OFFLOAD_RSS_HASH;
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
> index c1a96d3de183..a17c11e95e0b 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -3491,7 +3491,6 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>
> dev_info->rx_offload_capa =
> DEV_RX_OFFLOAD_VLAN_STRIP |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_KEEP_CRC |
> DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_VLAN_FILTER;
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> index a3de4172e2bc..a7b0915dabfc 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -259,7 +259,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue
> *rxq)
> struct ice_rlan_ctx rx_ctx;
> enum ice_status err;
> uint16_t buf_size;
> - struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
> uint32_t rxdid = ICE_RXDID_COMMS_OVS;
> uint32_t regval;
> uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
> @@ -273,7 +272,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue
> *rxq)
> RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq-
> >rx_buf_len,
> frame_size);
>
> - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
> + if (dev_data->mtu > RTE_ETHER_MTU) {
> if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
> rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
> PMD_DRV_LOG(ERR, "maximum packet length must
> "
> diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
> index b3473b5b1646..5e6c2ff30157 100644
> --- a/drivers/net/igc/igc_ethdev.h
> +++ b/drivers/net/igc/igc_ethdev.h
> @@ -73,7 +73,6 @@ extern "C" {
> DEV_RX_OFFLOAD_UDP_CKSUM | \
> DEV_RX_OFFLOAD_TCP_CKSUM | \
> DEV_RX_OFFLOAD_SCTP_CKSUM | \
> - DEV_RX_OFFLOAD_JUMBO_FRAME | \
> DEV_RX_OFFLOAD_KEEP_CRC | \
> DEV_RX_OFFLOAD_SCATTER | \
> DEV_RX_OFFLOAD_RSS_HASH)
> diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
> index d80808a002f5..30940857eac0 100644
> --- a/drivers/net/igc/igc_txrx.c
> +++ b/drivers/net/igc/igc_txrx.c
> @@ -1099,7 +1099,7 @@ igc_rx_init(struct rte_eth_dev *dev)
> IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
>
> /* Configure support of jumbo frames, if any. */
> - if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
> + if (dev->data->mtu & RTE_ETHER_MTU)
> rctl |= IGC_RCTL_LPE;
> else
> rctl &= ~IGC_RCTL_LPE;
> diff --git a/drivers/net/ionic/ionic_ethdev.c
> b/drivers/net/ionic/ionic_ethdev.c
> index 97447a10e46a..795980cb1ca5 100644
> --- a/drivers/net/ionic/ionic_ethdev.c
> +++ b/drivers/net/ionic/ionic_ethdev.c
> @@ -414,7 +414,6 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> - DEV_RX_OFFLOAD_JUMBO_FRAME |
> DEV_RX_OFFLOAD_VLAN_FILTER |
> DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_SCATTER |
> diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c
> b/drivers/net/ipn3ke/ipn3ke_representor.c
> index 377b96c0236a..4e5d234e8c7d 100644
> --- a/drivers/net/ipn3ke/ipn3ke_representor.c
> +++ b/drivers/net/ipn3ke/ipn3ke_representor.c
> @@ -74,8 +74,7 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev
> *ethdev,
> DEV_RX_OFFLOAD_TCP_CKSUM |
> DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> DEV_RX_OFFLOAD_VLAN_EXTEND |
> - DEV_RX_OFFLOAD_VLAN_FILTER |
> - DEV_RX_OFFLOAD_JUMBO_FRAME;
> + DEV_RX_OFFLOAD_VLAN_FILTER;
>
> dev_info->tx_queue_offload_capa =
> DEV_TX_OFFLOAD_MBUF_FAST_FREE;
> dev_info->tx_offload_capa =
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
@@ -199,8 +199,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN;
- if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
t->internal_port = 1;
RTE_ETH_FOREACH_DEV(i) {
@@ -1921,7 +1921,7 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
return;
}
- update_jumbo_frame_offload(port_id, res->value);
+ update_mtu_from_frame_size(port_id, res->value);
}
init_port_config();
@@ -1136,39 +1136,19 @@ port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
void
port_mtu_set(portid_t port_id, uint16_t mtu)
{
+ struct rte_port *port = &ports[port_id];
int diag;
- struct rte_port *rte_port = &ports[port_id];
- struct rte_eth_dev_info dev_info;
- int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- ret = eth_dev_info_get_print_err(port_id, &dev_info);
- if (ret != 0)
- return;
-
- if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
- printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
- mtu, dev_info.min_mtu, dev_info.max_mtu);
- return;
- }
diag = rte_eth_dev_set_mtu(port_id, mtu);
if (diag) {
printf("Set MTU failed. diag=%d\n", diag);
return;
}
- rte_port->dev_conf.rxmode.mtu = mtu;
-
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (mtu > RTE_ETHER_MTU) {
- rte_port->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- } else
- rte_port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
+ port->dev_conf.rxmode.mtu = mtu;
}
/* Generic flow management functions. */
@@ -1473,11 +1473,6 @@ init_config(void)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_info_get() failed\n");
- ret = update_jumbo_frame_offload(pid, 0);
- if (ret != 0)
- printf("Updating jumbo frame offload failed for port %u\n",
- pid);
-
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
@@ -3364,24 +3359,18 @@ rxtx_port_config(struct rte_port *port)
}
/*
- * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
- * MTU is also aligned.
+ * Helper function to set MTU from frame size
*
* port->dev_info should be set before calling this function.
*
- * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
- * ETH_OVERHEAD". This is useful to update flags but not MTU value.
- *
* return 0 on success, negative on error
*/
int
-update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
+update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
{
struct rte_port *port = &ports[portid];
uint32_t eth_overhead;
- uint64_t rx_offloads;
uint16_t mtu, new_mtu;
- bool on;
eth_overhead = get_eth_overhead(&port->dev_info);
@@ -3390,39 +3379,8 @@ update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
return -1;
}
- if (max_rx_pktlen == 0)
- max_rx_pktlen = mtu + eth_overhead;
-
- rx_offloads = port->dev_conf.rxmode.offloads;
new_mtu = max_rx_pktlen - eth_overhead;
- if (new_mtu <= RTE_ETHER_MTU) {
- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = false;
- } else {
- if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- printf("Frame size (%u) is not supported by port %u\n",
- max_rx_pktlen, portid);
- return -1;
- }
- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = true;
- }
-
- if (rx_offloads != port->dev_conf.rxmode.offloads) {
- uint16_t qid;
-
- port->dev_conf.rxmode.offloads = rx_offloads;
-
- /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
- for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
- if (on)
- port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
- }
-
if (mtu == new_mtu)
return 0;
@@ -1012,7 +1012,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
__rte_unused void *user_param);
void add_tx_dynf_callback(portid_t portid);
void remove_tx_dynf_callback(portid_t portid);
-int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
+int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen);
/*
* Work-around of a compilation error with ICC on invocations of the
@@ -71,8 +71,6 @@ RX Port and associated core :numref:`dtg_rx_rate`.
* Identify if port Speed and Duplex is matching to desired values with
``rte_eth_link_get``.
- * Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
-
* Check promiscuous mode if the drops do not occur for unique MAC address
with ``rte_eth_promiscuous_get``.
@@ -886,7 +886,6 @@ processing. This improved performance is derived from a number of optimizations:
DEV_RX_OFFLOAD_VLAN_STRIP
DEV_RX_OFFLOAD_KEEP_CRC
- DEV_RX_OFFLOAD_JUMBO_FRAME
DEV_RX_OFFLOAD_IPV4_CKSUM
DEV_RX_OFFLOAD_UDP_CKSUM
DEV_RX_OFFLOAD_TCP_CKSUM
@@ -165,8 +165,7 @@ Jumbo frame
Supports Rx jumbo frames.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
- ``dev_conf.rxmode.mtu``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``dev_conf.rxmode.mtu``.
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
* **[related] API**: ``rte_eth_dev_set_mtu()``.
@@ -158,7 +158,6 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_JUMBO_FRAME \
| DEV_RX_OFFLOAD_MACSEC_STRIP \
| DEV_RX_OFFLOAD_VLAN_FILTER)
@@ -1217,7 +1217,6 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
@@ -535,7 +535,6 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@@ -591,7 +591,6 @@ struct bnxt_rep_info {
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
@@ -728,15 +728,10 @@ static int bnxt_start_nic(struct bnxt *bp)
unsigned int i, j;
int rc;
- if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
- bp->eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (bp->eth_dev->data->mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
- } else {
- bp->eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
bp->flags &= ~BNXT_FLAG_JUMBO;
- }
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
@@ -1221,7 +1216,6 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
if (eth_dev->data->dev_conf.rxmode.offloads &
~(DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -1731,14 +1731,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
- if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME)
- slave_eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- slave_eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
@@ -75,9 +75,8 @@
#define CNXK_NIX_RX_OFFLOAD_CAPA \
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP | \
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
+ DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
@@ -92,7 +92,6 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_SECURITY, " Security,"},
@@ -51,7 +51,6 @@
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
@@ -661,14 +661,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
if ((&rxq->fl) != NULL)
rxq->fl.size = temp_nb_desc;
- /* Set to jumbo mode if necessary */
- if (eth_dev->data->mtu > RTE_ETHER_MTU)
- eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, NULL,
is_pf4(adapter) ?
@@ -365,13 +365,10 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
- if (jumbo_en &&
- ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)
buf_size_idx = RX_LARGE_MTU_BUF;
ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
@@ -54,7 +54,6 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
@@ -592,7 +591,6 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
@@ -44,7 +44,6 @@ static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
@@ -298,7 +297,6 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
@@ -468,8 +468,8 @@ void eth_em_rx_queue_release(void *rxq);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
-uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
-uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_port_offloads_capa(void);
+uint64_t em_get_rx_queue_offloads_capa(void);
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
@@ -1083,8 +1083,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
- dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
- dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa();
+ dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() |
dev_info->rx_queue_offload_capa;
dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
@@ -1359,12 +1359,9 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
}
uint64_t
-em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+em_get_rx_port_offloads_capa(void)
{
uint64_t rx_offload_capa;
- uint32_t max_rx_pktlen;
-
- max_rx_pktlen = em_get_max_pktlen(dev);
rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
@@ -1374,14 +1371,12 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
- if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
- rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
}
uint64_t
-em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+em_get_rx_queue_offloads_capa(void)
{
uint64_t rx_queue_offload_capa;
@@ -1390,7 +1385,7 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
* capability be same to per port queue offloading capability
* for better convenience.
*/
- rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa();
return rx_queue_offload_capa;
}
@@ -1839,7 +1834,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
+ if (dev->data->mtu > RTE_ETHER_MTU ||
rctl_bsize < RTE_ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1874,14 +1869,14 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
- rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ dev->data->mtu > RTE_ETHER_MTU) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (dev->data->mtu > RTE_ETHER_MTU)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
@@ -1908,7 +1903,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (dev->data->mtu > RTE_ETHER_MTU)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
@@ -1640,7 +1640,6 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
@@ -2344,7 +2343,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Configure support of jumbo frames, if any.
*/
max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->mtu & RTE_ETHER_MTU) {
rctl |= E1000_RCTL_LPE;
/*
@@ -2042,8 +2042,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
/* Inform framework about available features */
dev_info->rx_offload_capa = rx_feat;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
@@ -210,8 +210,7 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME);
+ DEV_RX_OFFLOAD_KEEP_CRC);
return 0;
}
@@ -209,7 +209,6 @@ int enic_get_vnic_config(struct enic *enic)
DEV_TX_OFFLOAD_TCP_TSO;
enic->rx_offload_capa =
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -1193,7 +1193,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |
@@ -1211,7 +1210,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |
@@ -1779,7 +1779,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_RSS_HASH);
}
@@ -747,7 +747,6 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_RSS_HASH;
@@ -2717,7 +2717,6 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
@@ -956,7 +956,6 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
@@ -3758,7 +3758,6 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
@@ -1932,7 +1932,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -2378,7 +2378,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_queue_offload_capa = 0;
@@ -2906,7 +2906,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
rxq->max_pkt_len =
RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
data->mtu + I40E_ETH_OVERHEAD);
- if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -574,7 +574,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->mtu & RTE_ETHER_MTU) {
if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -939,7 +939,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;
@@ -65,7 +65,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_data->mtu > RTE_ETHER_MTU) {
if (max_pkt_len <= ICE_ETH_MAX_LEN ||
max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -664,7 +664,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
@@ -141,7 +141,6 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_RSS_HASH;
@@ -3491,7 +3491,6 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -259,7 +259,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size;
- struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
@@ -273,7 +272,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
frame_size);
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -73,7 +73,6 @@ extern "C" {
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
@@ -1099,7 +1099,7 @@ igc_rx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* Configure support of jumbo frames, if any. */
- if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (dev->data->mtu & RTE_ETHER_MTU)
rctl |= IGC_RCTL_LPE;
else
rctl &= ~IGC_RCTL_LPE;
@@ -414,7 +414,6 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_SCATTER |
@@ -74,8 +74,7 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
@@ -6229,7 +6229,6 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -6251,14 +6250,12 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
- rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
- (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE)
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
else
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);
@@ -600,15 +600,10 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < max_frame) {
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (max_frame > IXGBE_ETH_MAX_LEN) {
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (max_frame > IXGBE_ETH_MAX_LEN)
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
- } else {
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
- }
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
@@ -3021,7 +3021,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
@@ -5083,7 +5082,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->mtu & RTE_ETHER_MTU) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -684,7 +684,6 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)
@@ -335,7 +335,6 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
if (!config->mprq.enabled)
@@ -54,8 +54,7 @@
#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
/** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
/** Tx offloads capabilities */
#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
@@ -59,7 +59,6 @@
/** Port Rx offload capabilities */
#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */
@@ -643,8 +643,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- hw->mtu = dev->data->mtu;
+ hw->mtu = dev->data->mtu;
if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
@@ -1307,9 +1306,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
};
- /* All NFP devices support jumbo frames */
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
@@ -60,7 +60,6 @@
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_VLAN_FILTER)
#define OCTEONTX_TX_OFFLOADS ( \
@@ -147,7 +147,6 @@
DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_VLAN_FILTER | \
@@ -39,8 +39,7 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
- devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
- devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+ devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
@@ -953,12 +953,6 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
- if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
- !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
- rte_pktmbuf_free(droq_pkt);
- goto oq_read_fail;
- }
-
if (droq_pkt->nb_segs > 1 &&
!(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
rte_pktmbuf_free(droq_pkt);
@@ -1392,7 +1392,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_RSS_HASH);
@@ -915,8 +915,6 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
{
uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
- caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
return caps & sfc_rx_get_offload_mask(sa);
}
@@ -40,7 +40,6 @@
#define NICVF_RX_OFFLOAD_CAPA ( \
DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
@@ -1953,7 +1953,6 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
@@ -2442,7 +2442,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
host_features = VIRTIO_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -56,7 +56,6 @@
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_RSS_HASH)
int vmxnet3_segs_dynfield_offset = -1;
@@ -149,8 +149,7 @@ static struct rte_eth_conf port_conf = {
.mtu = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME),
+ DEV_RX_OFFLOAD_SCATTER),
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -164,8 +164,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.mtu = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
@@ -2207,8 +2207,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- if (mtu_size > RTE_ETHER_MTU)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
@@ -109,7 +109,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.mtu = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -790,11 +790,6 @@ kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
}
memcpy(&conf, &port_conf, sizeof(conf));
- /* Set new MTU */
- if (new_mtu > RTE_ETHER_MTU)
- conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
conf.rxmode.mtu = new_mtu;
ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
@@ -1813,8 +1813,6 @@ parse_args(int argc, char **argv)
};
printf("jumbo frame is enabled\n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MULTI_SEGS;
@@ -493,7 +493,6 @@ parse_args(int argc, char **argv)
const struct option lenopts = {"max-pkt-len",
required_argument, 0, 0};
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
/*
@@ -1952,8 +1952,6 @@ parse_args(int argc, char **argv)
0, 0};
printf("jumbo frame is enabled \n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MULTI_SEGS;
@@ -702,7 +702,6 @@ parse_args(int argc, char **argv)
"max-pkt-len", required_argument, 0, 0
};
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
/*
@@ -2986,8 +2986,6 @@ parse_args(int argc, char **argv)
required_argument, 0, 0};
printf("jumbo frame is enabled - disabling simple TX path\n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MULTI_SEGS;
@@ -637,8 +637,6 @@ us_vhost_parse_args(int argc, char **argv)
}
mergeable = !!ret;
if (ret) {
- vmdq_conf_default.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
vmdq_conf_default.rxmode.mtu =
JUMBO_FRAME_MAX_SIZE;
}
@@ -118,7 +118,6 @@ static const struct {
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
- RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
@@ -1479,13 +1478,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
goto rollback;
}
- if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
- dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
- /* Use default value */
- dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
- }
-
dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
/*
@@ -3625,7 +3617,6 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int ret;
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
- int is_jumbo_frame_capable = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -3653,27 +3644,12 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
frame_size = mtu + overhead_len;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
-
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)
- is_jumbo_frame_capable = 1;
}
- if (mtu > RTE_ETHER_MTU && is_jumbo_frame_capable == 0)
- return -EINVAL;
-
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
- if (!ret) {
+ if (!ret)
dev->data->mtu = mtu;
- /* switch to jumbo mode if needed */
- if (mtu > RTE_ETHER_MTU)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
-
return eth_err(port_id, ret);
}
@@ -1359,7 +1359,6 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
-#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
#define DEV_RX_OFFLOAD_SCATTER 0x00002000
/**
* Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME